metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joaopalmeiro/rosalind",
"score": 3
} |
#### File: rosalind/code/computing-gc-content.py
```python
sample = """>Rosalind_6404
CCTGCGGAAGATCGGCACTAGAATAGCCAGAACCGTTTCTCTGAGGCTTCCGGCCTTCCC
TCCCACTAATAATTCTGAGG
>Rosalind_5959
CCATCGGTAGCGCATCCTTAGTCCAATTAAGTCCCTATCCAGGCGCTCCGCCGAAGGTCT
ATATCCATTTGTCAGCAGACACGC
>Rosalind_0808
CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGAC
TGGGAACCTGCGGGCAGTAGGTGGAAT"""
with open('../datasets/rosalind_gc.txt', 'r') as file:
data = file.read()
def highest_gc_content(sample: str) -> str:
dna_strings = sample.split(">")[1:]
fasta_id = ""
gc_content = 0.0
for dna in dna_strings:
current_fasta_id = dna.split("\n", 1)[0]
current_dna_string = dna.split("\n", 1)[1].replace("\n", "")
current_gc_content = ((current_dna_string.count(
"G") + current_dna_string.count("C")) / len(current_dna_string)) * 100
if(current_gc_content > gc_content):
fasta_id = current_fasta_id
gc_content = current_gc_content
return(f"{fasta_id}\n{gc_content}")
print(highest_gc_content(data))
```
#### File: rosalind/code/counting-point-mutations.py
```python
sample = """GAGCCTACTAACGGGAT
CATCGTAATGACGGCCT"""
with open('../datasets/rosalind_hamm.txt', 'r') as file:
data = file.read()
def hamming_distance(sample: str) -> int:
dna_s = sample.split()[0]
dna_t = sample.split()[1]
d_H = 0
for s1, s2 in zip(dna_s, dna_t):
if s1 != s2:
d_H += 1
return(d_H)
print(hamming_distance(data))
```
#### File: rosalind/code/rabbits-and-recurrence-relations.py
```python
sample = "5 3"
with open('../datasets/rosalind_fib.txt', 'r') as file:
sample = file.read()
n = int(sample.split()[0])
k = int(sample.split()[1])
def fibonacci(n: int, k: int) -> int:
a = 0
b = 1
if n == 0:
return a
elif n == 1:
return b
else:
for i in range(2, n+1):
c = a + b
a = b * k
b = c
return b
print(fibonacci(n, k))
```
#### File: rosalind/code/translating-rna-into-protein.py
```python
sample = "AUGGCCAUGGCGCCCAGAACUGAGAUCAAUAGUACCCGUAUUAACGGGUGA"
with open('../datasets/rna_codon_table.txt', 'r') as file:
table = file.read().split()
rna_codon_table = dict(zip(table[::2], table[1::2]))
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(rna_codon_table)
with open('../datasets/rosalind_prot.txt', 'r') as file:
data = file.read()
def translate_rna(rna_codon_table: dict, sample: str) -> str:
length = int(len(sample) / 3)
protein = ""
for i in range(length):
amino = rna_codon_table[sample[i*3:i*3+3]]
if(amino.lower() != "stop"):
protein = protein + amino
return(protein)
print(translate_rna(rna_codon_table, data))
``` |
{
"source": "joaopalmeiro/themepark",
"score": 3
} |
#### File: themepark/themepark/utils.py
```python
import json
import click
def pretty_print(data):
click.echo(json.dumps(data, ensure_ascii=False, sort_keys=False, indent=2))
``` |
{
"source": "joaopalmeiro/toppics",
"score": 4
} |
#### File: toppics/toppics/utils.py
```python
from typing import Any, Sequence
from click import style
def list2str(seq: Sequence[Any]) -> str:
# Source: https://stackoverflow.com/a/53981846
# seq = [str(s) for s in seq]
seq = [style(str(s), underline=True) for s in seq]
if len(seq) < 3:
return " and ".join(seq)
return ", ".join(seq[:-1]) + ", and " + seq[-1]
``` |
{
"source": "joaopalmeiro/tugafy",
"score": 3
} |
#### File: tugafy/tugafy/dict_reader.py
```python
import csv
import io
import urllib.request as request
import functools
from typing import Dict
@functools.lru_cache(maxsize=2)
def get_dictionary() -> Dict[str, str]:
url = "https://raw.githubusercontent.com/joaopalmeiro/en-pt-dict-DS-ML/master/data/dict.csv"
response = request.urlopen(url)
csv_file = list(
csv.reader(io.StringIO(response.read().decode("utf-8")), delimiter=",")
)
dictionary_dict = {term[0]: term[1] for term in csv_file[1:]}
return dictionary_dict
``` |
{
"source": "joaopalmeiro/vtils",
"score": 3
} |
#### File: vtils/vtils/pae.py
```python
import numpy as np
class ApproximateEntropy:
def __init__(self, m: int = 2, r: float = 20.0) -> None:
self.m = m
self.r = r
@staticmethod
def _phi(N: int, m: int, r: float, data: np.ndarray) -> float:
W = N - m + 1
windows = np.empty((W, m))
for i in range(W):
windows[i, :] = data[i : i + m]
# Similarities for each window.
S = np.zeros(W)
for i in range(W):
# `axis=1`: maxima along the second axis.
# Each window of size m is compared with
# every other window of the same size.
distance = np.max(np.abs(windows - np.roll(windows, i, axis=0)), axis=1)
S += np.less_equal(distance, r)
S /= W
# `.item()`: return Python float.
return (W ** (-1) * np.sum(np.log(S))).item()
def compute(self, data: np.ndarray) -> float:
if data.ndim != 1:
raise ValueError("The input data must be 1D.")
N = data.shape[0]
phi_m = self._phi(N, self.m, self.r, data)
phi_m_plus_one = self._phi(N, self.m + 1, self.r, data)
return abs(phi_m - phi_m_plus_one)
class Scaler:
def __init__(self, w: int, h: int) -> None:
self.h = h
self.w = w
def compute(self, data: np.ndarray) -> np.ndarray:
if data.ndim != 1:
raise ValueError("The input data must be 1D.")
# Number of elements in the array.
N = data.size
# 1D linear interpolation.
# - x: X-coordinates (interpolated values);
# - xp: Input-based X-coordinates (data points);
# - fp: Y-coordinates (data points/input).
interpolated_data = np.interp(
x=np.linspace(start=0, stop=N - 1, num=self.w), xp=np.arange(N), fp=data
)
# 0 -> min value.
# h (or height) -> max value.
# More info: https://en.wikipedia.org/wiki/Feature_scaling.
scaled_data = interpolated_data - np.min(interpolated_data)
scaled_data = scaled_data * (self.h / np.max(scaled_data))
return scaled_data
class PAE:
def __init__(self, w: int, h: int, m: int = 2, r: float = 20.0) -> None:
self.scale = Scaler(w, h)
self.approximate_entropy = ApproximateEntropy(m, r)
def compute(self, data: np.ndarray) -> float:
scaled_data = self.scale.compute(data)
return self.approximate_entropy.compute(scaled_data)
``` |
{
"source": "joao-paulo-alves/carla_vts",
"score": 3
} |
#### File: PythonAPI/my_workplace/get_loc.py
```python
import carla
import carla_agents
from carla import *
import numpy
import pygame
import random
import keyboard
import time
# ---------------------------------------Global-----------------------------
# SETTING CLIENT
client = carla.Client('localhost', 2000)
client.set_timeout(300.0)
# SET A SPECIFIED SCENARIO
world = client.load_world('Town10HD')
# GET ALL THE RECOMMENDED SPAWN LOCATIONS FOR VEHICLES
spawn_points = world.get_map().get_spawn_points()
i_len = len(spawn_points)
def main():
loc_init = carla.Transform(Location(x=0, y=0, z=10))
spectator = world.get_spectator()
spectator.set_transform(carla.Transform(loc_init.location))
i = 0
while True:
if keyboard.read_key() == "d": # if key 'q' is pressed
spectator.set_transform(carla.Transform(loc_init.location + Location(y=1)))
loc_init = carla.Transform(loc_init.location + Location(y=1))
elif keyboard.read_key() == "a":
spectator.set_transform(carla.Transform(loc_init.location + Location(y=-1)))
loc_init = carla.Transform(loc_init.location + Location(y=-1))
elif keyboard.read_key() == "w":
spectator.set_transform(carla.Transform(loc_init.location + Location(x=1)))
loc_init = carla.Transform(loc_init.location + Location(x=1))
elif keyboard.read_key() == "s":
spectator.set_transform(carla.Transform(loc_init.location + Location(x=-1)))
loc_init = carla.Transform(loc_init.location + Location(x=-1))
elif keyboard.read_key() == "x":
print(loc_init)
elif keyboard.read_key() == "q":
i = i - 1
if i < 0:
i = 0
spectator.set_transform(spawn_points[i])
print(i)
elif keyboard.read_key() == "e":
i = i + 1
if i > i_len - 1:
i = i_len - 1
spectator.set_transform(spawn_points[i])
print(i)
elif keyboard.read_key() == "z":
break
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
print('\ndone.')
```
#### File: my_workplace/repository/teste.py
```python
import carla
import carla_agents
from carla import *
import numpy
import pygame
import random
from carla.agents.navigation.basic_agent import BasicAgent
from carla.agents.navigation.behavior_agent import BehaviorAgent
# ---------------------------------------Global-----------------------------
# SETTING CLIENT
client = carla.Client('localhost', 2000)
client.set_timeout(30.0)
# SET A SPECIFIED SCENARIO
world = client.load_world('Town10HD')
# GET THE CURRENT SCENARIO
# world = client.get_world()
# GET ALL THE BLUEPRINTS
blueprint_library = world.get_blueprint_library()
lib_car = world.get_blueprint_library().filter('vehicle.*')
lib_walker = world.get_blueprint_library().filter('walker.pedestrian.*')
# GET ALL THE RECOMMENDED SPAWN LOCATIONS FOR VEHICLES
spawn_points = world.get_map().get_spawn_points()
# CREATE TRAFFIC MANAGER INSTANCE
traffic_manager = client.get_trafficmanager()
tm_port = traffic_manager.get_port()
# SYNCH MODE FOR INCREASED TRAFFIC_MANAGER STABILITY
init_settings = world.get_settings()
settings = world.get_settings()
settings.synchronous_mode = True
traffic_manager.set_synchronous_mode(True)
settings.fixed_delta_seconds = 0.5
# ______________________________________________________
class Sensor:
attr = blueprint_library.find('sensor.camera.rgb')
attr.set_attribute('image_size_x', '1820')
attr.set_attribute('image_size_y', '940')
attr.set_attribute('fov', '110')
attr.set_attribute('fstop', '1.6')
location = carla.Location(0.4, 0, 1.2)
rotation = carla.Rotation(8.75, 0, 0)
transform = carla.Transform(location, rotation)
info = 0
# ________________________________________________________
# ______________________________________________________
class MainActor:
attr = blueprint_library.find('vehicle.tesla.model3')
info = 0
# ________________________________________________________
# ______________________________________________________
# ______________________________________________________
class Cars:
total = random.randint(25, 26)
info = [0] * total
attr = 0
# ________________________________________________________
class Walkers:
info = 0
# ________________________________________________________
def generate_scenario():
z = 0
for x in Cars.info:
y = random.randint(0, 154)
Cars.attr = random.choice(lib_car)
while Cars.attr.id == 'vehicle.bh.crossbike' or Cars.attr.id == 'vehicle.micro.microlino' or Cars.attr.id == 'vehicle.diamondback.century' or Cars.attr.id == 'vehicle.vespa.zx125' or Cars.attr.id == 'vehicle.gazelle.omafiets':
print("Removed!|!!!")
Cars.attr = random.choice(lib_car)
spawned = world.try_spawn_actor(Cars.attr, carla.Transform(spawn_points[y].location,
carla.Rotation(yaw=spawn_points[y].rotation.yaw,
roll=spawn_points[y].rotation.roll,
pitch=spawn_points[
y].rotation.pitch)))
while spawned is None:
y = random.randint(0, 154)
spawned = world.try_spawn_actor(Cars.attr, carla.Transform(spawn_points[y].location,
carla.Rotation(yaw=spawn_points[y].rotation.yaw,
roll=spawn_points[
y].rotation.roll,
pitch=spawn_points[
y].rotation.pitch)))
Cars.info[z] = spawned
z = z + 1
for x in Cars.info:
x.set_autopilot(True, tm_port)
# ________________________________________________________
def main():
# SETTING UP MAIN ACTOR
MainActor.info = world.spawn_actor(MainActor.attr,
carla.Transform(spawn_points[1].location,
carla.Rotation(yaw=spawn_points[1].rotation.yaw,
roll=spawn_points[1].rotation.roll,
pitch=spawn_points[1].rotation.pitch)))
Sensor.info = world.spawn_actor(Sensor.attr, Sensor.transform, attach_to=MainActor.info,
attachment_type=carla.AttachmentType.Rigid)
agent = BehaviorAgent(MainActor.info, 'normal')
set_dest = 1
agent.set_destination(spawn_points[54].location)
#Sensor.info.listen(lambda image: image.save_to_disk('output/%06d.png' % image.frame))
spectator = world.get_spectator()
world.apply_settings(init_settings)
#generate_scenario()
while 1:
world.tick()
if agent.done():
if set_dest == 0:
agent.set_destination((spawn_points[54]).location)
set_dest = 1
elif set_dest == 1:
agent.set_destination((spawn_points[80]).location)
set_dest = 2
elif set_dest == 2:
agent.set_destination((spawn_points[86]).location)
set_dest = 3
elif set_dest == 3:
agent.set_destination((spawn_points[1]).location)
set_dest = 0
MainActor.info.apply_control(agent.run_step())
transform = MainActor.info.get_transform()
spectator.set_transform(carla.Transform(transform.location + carla.Location(z=25),
carla.Rotation(pitch=-90)))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
settings.synchronous_mode = False
traffic_manager.set_synchronous_mode(False)
print('\ndone.')
``` |
{
"source": "joaopaulocastro/resnetkeras",
"score": 3
} |
#### File: joaopaulocastro/resnetkeras/DataGenerator.py
```python
import keras
import Constants as const
import numpy as np
import h5py
class DataGeneratorFromPreprocessed(keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, QtdeExamples, QtdeBatches, QtdeChannels = const.X_Channels):
'Initialization'
# folder where to fetch data
self.folderPath = const.PreprocessedDataFolderPath(QtdeExamples)
self.qtdeBatches = QtdeBatches
self.qtdeChannels = QtdeChannels
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return self.qtdeBatches
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.qtdeBatches - 1)
np.random.shuffle(self.indexes)
self.indexes = np.append(self.indexes, self.qtdeBatches - 1)
def __getitem__(self, index):
'Generate one batch of data'
X, y = self.__data_generation(index)
return X, y
def __data_generation(self, index):
'Generates data containing batch_size samples'
i = self.indexes[index]
f = h5py.File(self.folderPath + 'XY' + str(i+1) + '.hdf5', "r")
x0 = f['X'][:]
Y = f['Y'][:]
f.close()
if self.qtdeChannels == const.X_Channels:
X = x0
else:
shape = (x0.shape[0], x0.shape[1], x0.shape[2], self.qtdeChannels)
X = np.zeros(shape, np.float16)
X[:,:,:,:] = x0
return X, Y
```
#### File: joaopaulocastro/resnetkeras/ImageAsArray.py
```python
import numpy as np
import PIL
from PIL import Image, ImageFilter
import Constants as const
import math
def GetImagePreprocessedArray(file,
filter = None,
tone = 0,
stepH = 0,
stepW = 0,
degree = 0):
"""
Arguments:
file: image file path
filter: filter to be applied to image
tone: integer to be added to each color component (R, G and B)
stepH: image displacement in height pixels
stepW: image displacement in width pixels
degree: rotation angle
"""
# open image
img = Image.open(file)
# get image size
w, h = img.size
# apply filter
if filter == None:
filterImg = img
else:
filterImg = img.filter(filter)
arr = np.array(filterImg).astype(np.int32)
# apply tone (carefull about overflow)
arr2 = arr + tone
arr2[arr2 < 0] = 0
arr2[arr2 > 255] = 255
# displace on height and width
if stepH < 0:
sliceOrigH = slice(0, h+(stepH))
sliceDestH = slice(-stepH, h)
else:
sliceOrigH = slice(stepH, h)
sliceDestH = slice(0, h-stepH)
if stepW < 0:
sliceOrigW = slice(0,w+stepW)
sliceDestW = slice(-stepW,w)
else:
sliceOrigW = slice(stepW,w)
sliceDestW = slice(0,w-stepW)
arr2[sliceDestH,sliceDestW,:] = arr2[sliceOrigH,sliceOrigW,:]
# rotate
if degree != 0:
imgRot = Image.fromarray(arr2.astype(np.uint8)).convert('RGBA').rotate(degree)
imgRotBack = Image.new("RGBA", imgRot.size, (255, 255, 255, 255))
arr2 = np.array(Image.composite(imgRot, imgRotBack, imgRot)).astype(np.int32)
# resize to match NN input size
if (arr2.shape[0] != const.X_Height) or (arr2.shape[1] != const.X_Width):
arr2 = np.array(Image.fromarray(arr2.astype(np.uint8)).resize(size=(const.X_Width, const.X_Height), resample=PIL.Image.ANTIALIAS))
# apply mask to avoid noise outside of circle
# important: as we're using images from fisheye cameras, the Region Of Interest is a circle
masked_arr = arr2.copy()
masked_arr[~createCircularMask(const.X_Width, const.X_Height)] = 255
# return (np.expand_dims(masked_arr, axis=2)/np.float16(255.)).astype(np.float16)
return (masked_arr/np.float16(255.)).astype(np.float16)
def createCircularMask(h, w, center=None, radius=None):
"""
this is an auxiliary function to create a circular mask over the image
remember that our images were obtained from fisheye lenses, so the ROI is a circle...
"""
if center is None: # use the middle of the image
center = [int(w/2), int(h/2)]
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w-center[0], h-center[1])
Y, X = np.ogrid[:h, :w]
dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2)
mask = dist_from_center <= radius
return mask
```
#### File: joaopaulocastro/resnetkeras/LoadSets.py
```python
import Constants as const
import FindLabel
import ImageAsArray as myImg
import numpy as np
from os import walk
def LoadSet(folder, classFile = const.LabelsFilePath):
# get all files
f = []
for (dirpath, dirnames, filenames) in walk(folder):
f.extend(filenames)
break
fl = FindLabel.FindLabel(classFile)
lbl = np.zeros((len(f)), dtype=np.uint8)
for i in range(0, len(f)):
l = fl.find_label(f[i], raiseError=False)
if l < 0:
l = const.Y_Classes + 1
lbl[i] = l - 1
# prepare training set
Qtde = len(f)
X = np.empty(shape=(Qtde, const.X_Height, const.X_Width, const.X_Channels), dtype=np.float16)
Y = np.zeros(shape=(Qtde, const.Y_Classes + 1), dtype=np.uint8)
# print('X.nbytes: ' + str(X.nbytes))
for i in range(0, len(f)):
if (i % 50) == 0:
print(i)
X[i] = myImg.GetImagePreprocessedArray(folder + '/' + f[i])
Y[i, lbl[i]] = 1
print("Loaded set: ")
print('Qtde: ' + str(Qtde))
return Qtde, f, X, Y
```
#### File: joaopaulocastro/resnetkeras/ResNetModel.py
```python
import Constants as const
import tensorflow as tf
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model
from keras.initializers import glorot_uniform
### =============== my implementation for ResNet architectures : BEGIN ============================
def identity_block(X, f, filters, stage, block, small = False):
"""
Identity block implementation
arguments:
X = input tensor resulting from previous block, having shape(m, H_prev, W_prev, C_prev), where:
m
H_prev = H resulting from previous block
W_prev = W resulting from previous block
C_prev = Channels resultring from previous block
f = integer, shape of the middle convolution window for the main path (kernel size)
filters = integer list, number of filters in the convolution layers for the main path
stage = integer used to give each layer a different name
block = string used to give each layer a different name
small = used to differentiate kernel size and padding when building Small or Large resnet
Returns:
X -- output of the identity block, tensor of shape (H, W, C)
"""
# give each layer a different name
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value.
X_shortcut = X
# kernel size and padding for 1st components
ks = (1, 1)
pad = 'valid'
if (small):
ks = (f, f)
pad = 'same'
# First component of main path
X = Conv2D(filters = F1, kernel_size = ks, strides = (1,1), padding = pad, name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
if (not small):
X = Activation('relu')(X)
# Third component of main path
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = layers.Add()([X_shortcut, X])
X = Activation('relu')(X)
return X
def conv_block(X, f, filters, stage, block, s = 2, small = False):
"""
Implementation of the convolutional block
Arguments:
X = input tensor resulting from previous block, having shape(m, H_prev, W_prev, C_prev), where:
m
H_prev = H resulting from previous block
W_prev = W resulting from previous block
C_prev = Channels resultring from previous block
f = integer, shape of the middle convolution window for the main path (kernel size)
filters = integer list, number of filters in the convolution layers for the main path
stage = integer used to give each layer a different name
block = string used to give each layer a different name
s = integer, defines stride
small = used to differentiate kernel size and padding when building Small or Large resnet
Returns:
X -- output of the convolutional block, tensor of shape (H, W, C)
"""
# give each layer a different name
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
# kernel size and padding for 1st components
ks = (1, 1)
pad = 'valid'
shortcutFilters = F3
if (small):
ks = (f, f)
pad = 'same'
shortcutFilters = F2
##### MAIN PATH #####
# First component of main path
X = Conv2D(F1, ks, strides = (s,s), padding = pad, name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
if (not small):
X = Activation('relu')(X)
# Third component of main path
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
##### SHORTCUT PATH ####
X_shortcut = Conv2D(filters = shortcutFilters, kernel_size = (1, 1), strides = (s,s), padding = 'valid', name = conv_name_base + '1', kernel_initializer = glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)
##### END SHORTCUT PATH ####
# Final step: RELU activation
X = layers.Add()([X_shortcut, X])
X = Activation('relu')(X)
return X
def ResNetSmall(input_shape = (const.X_Height, const.X_Width, const.X_Channels), classes = const.Y_Classes, Layers = 18):
"""
Implementation of small size ResNet (18 or 34 layers)
"""
if (Layers != 18) and (Layers != 34):
raise ValueError('Invalid layer count: ' + str(Layers) + " (must be 18 or 34).")
Layers34 = (Layers == 34)
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = conv_block(X, f = 3, filters = [64, 64, None], stage = 2, block='a', s = 1, small = True)
X = identity_block(X, 3, [64, 64, None], stage=2, block='b', small = True)
if (Layers34):
X = identity_block(X, 3, [64, 64, None], stage=2, block='c', small = True)
# Stage 3
X = conv_block(X, f = 3, filters = [128, 128, None], stage = 3, block='a', s = 2, small = True)
X = identity_block(X, 3, [128, 128, None], stage=3, block='b', small = True)
if (Layers34):
X = identity_block(X, 3, [128, 128, None], stage=3, block='c', small = True)
X = identity_block(X, 3, [128, 128, None], stage=3, block='d', small = True)
# Stage 4
X = conv_block(X, f = 3, filters = [256, 256, None], stage = 4, block='a', s = 2, small = True)
X = identity_block(X, 3, [256, 256, None], stage=4, block='b', small = True)
if (Layers34):
X = identity_block(X, 3, [256, 256, None], stage=4, block='c', small = True)
X = identity_block(X, 3, [256, 256, None], stage=4, block='d', small = True)
X = identity_block(X, 3, [256, 256, None], stage=4, block='e', small = True)
X = identity_block(X, 3, [256, 256, None], stage=4, block='f', small = True)
# Stage 5
X = conv_block(X, f = 3, filters = [512, 512, None], stage = 5, block='a', s = 2, small = True)
X = identity_block(X, 3, [512, 512, None], stage=5, block='b', small = True)
if (Layers34):
X = identity_block(X, 3, [512, 512, None], stage=5, block='c', small = True)
# Create model
model = Model(inputs = X_input, outputs = X, name='ResNet' + str(Layers))
return model
def ResNetLarge(input_shape = (const.X_Height, const.X_Width, const.X_Channels), classes = const.Y_Classes, Layers = 50, weights = 'imagenet'):
"""
Implementation of large size ResNet (50, 101 or 152 layers)
"""
# validate parameters
if (Layers != 50) and (Layers != 101) and (Layers != 152):
raise ValueError('Invalid layer number: ' + str(Layers) + " (must be 50, 101 or 152).")
if (weights not in [None, 'imagenet']):
raise ValueError('Invalid weights definition: ' + str(weights) + " (must be None or 'imagenet'.")
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('relu')(X)
X = ZeroPadding2D((1, 1))(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = conv_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
# Stage 3
stage3Loops = 3
if (Layers == 152):
stage3Loops = 7
X = conv_block(X, f = 3, filters = [128, 128, 512], stage = 3, block='a', s = 2)
for i in range(0, stage3Loops):
X = identity_block(X, 3, [128, 128, 512], stage=3, block='b' + str(i))
# Stage 4
stage4Loops = 5
if (Layers == 101):
stage4Loops = 22
elif (Layers == 152):
stage4Loops = 35
X = conv_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2)
for i in range(0, stage4Loops):
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b' + str(i))
# Stage 5
X = conv_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='a', s = 2)
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')
# Create model
model_name = 'resnet' + str(Layers)
model = Model(inputs = X_input, outputs = X, name=model_name)
# time to load weights (if they were required)
if (weights == 'imagenet'):
BASE_WEIGHTS_PATH = (
'https://github.com/keras-team/keras-applications/'
'releases/download/resnet/')
file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = tf.keras.utils.get_file(file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models')
model.load_weights(weights_path, by_name=False)
return model
### =============== END my implementation for ResNet architectures ============================
def ResNet(input_shape = (const.X_Height, const.X_Width, const.X_Channels), classes = const.Y_Classes, Layers = 50, source = 'keras', weights = 'imagenet'):
"""
create RestNet model
Arguments:
input_shape = Height, Width and channels for each input image
classes = how many classes model will be trainned on
Layers: how many layers; should be one of [18, 34, 50, 101, 152]
source: 'keras' (use built-in model) or 'manual' (use my custom model above)
weights: 'imagenet' (load weights from keras lib) or None (no weights loading)
'imagenet' only available if layers in [50,101,152]
"""
# validate parameters
if (Layers not in [18, 34, 50, 101, 152]):
raise ValueError('Invalid layer number: ' + str(Layers) + ' (must be one of [18, 34, 50, 101, 152]).')
if (source not in ['keras', 'manual']):
raise ValueError('Invalid model source: ' + str(source) + " (must be 'keras' or 'manual'.")
if (weights not in [None, 'imagenet']):
raise ValueError('Invalid weights definition: ' + str(weights) + " (must be None or 'imagenet'.")
if (Layers in [18, 34]):
if (source == 'keras'):
raise ValueError("No keras model available for small ResNets. 'source' parameter must be 'manual' when layers are 18 or 34.")
if (weights != None):
raise ValueError("No weights available for small ResNets. 'weights' Parameter must be None when layers are 18 or 34.")
# build model
if (source == 'keras'):
# load base model from keras
if (Layers == 50):
from keras.applications.resnet import ResNet50
baseModel = ResNet50(include_top = False, weights = weights, input_shape = input_shape)
elif (Layers == 101):
from keras.applications.resnet import ResNet101
baseModel = ResNet101(include_top = False, weights = weights, input_shape = input_shape)
elif (Layers == 152):
from keras.applications.resnet import ResNet152
baseModel = ResNet152(include_top = False, weights = weights, input_shape = input_shape)
elif (source == 'manual'):
# load model from my implementation
if (Layers in [18,34]):
baseModel = ResNetSmall(input_shape=input_shape, classes=classes, Layers=Layers)
else:
baseModel = ResNetLarge(input_shape=input_shape, classes=classes, Layers=Layers, weights=weights)
# add final layers to built-in keras model
from keras.models import Model
from keras.layers import Dense, Flatten, AveragePooling2D
X = baseModel.output
X = AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(X)
X = Flatten()(X)
Preds = Dense(const.Y_Classes, activation='softmax', name='fc' + str(const.Y_Classes))(X)
model = Model(inputs=baseModel.input, outputs=Preds)
# return the model
return model
def Compile(model):
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
def get_model_memory_usage(model):
import numpy as np
from keras import backend as K
shapes_mem_count = 0
for l in model.layers:
single_layer_mem = 1
for s in l.output_shape:
if s is None:
continue
single_layer_mem *= s
shapes_mem_count += single_layer_mem
trainable_count = np.sum([K.count_params(p) for p in set(model.trainable_weights)])
non_trainable_count = np.sum([K.count_params(p) for p in set(model.non_trainable_weights)])
total_memory = 4.0*(shapes_mem_count + trainable_count + non_trainable_count)
# gbytes = np.round(total_memory / (1024.0 ** 3), 3)
# return gbytes
return total_memory
``` |
{
"source": "JoaoPauloJorgeDeOliveira/Python-WhatsAppAPI",
"score": 2
} |
#### File: JoaoPauloJorgeDeOliveira/Python-WhatsAppAPI/whatsapp_api.py
```python
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver import ActionChains
from time import sleep
import sys
# Parameters
WP_LINK = 'https://web.whatsapp.com'
# XPATHS
CONTACTS = '//*[@id="main"]/header/div[2]/div[2]/span'
SEND = '//*[@id="main"]/footer/div[1]/div[3]'
MESSAGE_BOX = '//*[@id="main"]/footer/div[1]/div[2]/div/div[2]'
NEW_CHAT = '//*[@id="side"]/header/div[2]/div/span/div[2]/div'
SEARCH_CONTACT = '//*[@id="app"]/div/div/div[2]/div[1]/span/div/span/div/div[1]/div/label/input'
FIRST_CONTACT = '//*[@id="app"]/div/div/div[2]/div[1]/span/div/span/div/div[2]/div/div/div/div[2]/div'
MoreContactsXPath = '//*[@id="app"]/div/div/div[2]/div[3]/span/div/span/div/div/div[1]/div[5]/div[5]/div[2]'
GroupInfoXPath = '// *[ @ id = "app"] / div / div / div[2] / div[3] / span / div / span / div / div / div[1]'
# Classes
nameClass = '_19RFN._1ovWX._F7Vk'
messageClass = '_12pGw'
messageMenuClass = '_2-qoA'
messageMenuButtonsClass = '_3zy-4.Sl-9e._3_4Kp'
eraseButtonsClass = '_2eK7W._23_1v'
class WhatsApp:
def __init__(self):
self.driver = self._setup_driver()
self.driver.get(WP_LINK)
print("Please scan the QR Code and enter in the group that you want to \
have control")
@staticmethod
def _setup_driver():
print('Loading...')
chrome_options = Options()
chrome_options.add_argument("disable-infobars")
driver = webdriver.Chrome(chrome_options=chrome_options)
return driver
def _get_element(self, xpath, attempts=5, _count=0):
'''Safe get_element method with multiple attempts'''
try:
element = self.driver.find_element_by_xpath(xpath)
# print('Found element!')
return element
except Exception as e:
if _count < attempts:
sleep(1)
# print(f'Attempt {_count}')
self._get_element(xpath, attempts=attempts, _count=_count + 1)
else:
print("Element not found")
def _click(self, xpath):
el = self._get_element(xpath)
el.click()
def _send_keys(self, xpath, message):
el = self._get_element(xpath)
el.send_keys(message)
def write_message(self, message):
'''Write message in the text box but not send it'''
self._click(MESSAGE_BOX)
self._send_keys(MESSAGE_BOX, message)
def send_message(self, message):
'''Write and send message'''
self.write_message(message)
self._click(SEND)
def get_group_numbers(self):
'''Get phone numbers from a whatsapp group'''
try:
el = self.driver.find_element_by_xpath(CONTACTS)
return el.text.split(', ')
except Exception as e:
print("Group header not found")
def get_group_members_long(self):
"""Get complete members' names (or numbers, if person is not in contact list) from a WhatsApp group"""
try:
# Click on contacts:
el = self.driver.find_element_by_xpath(CONTACTS)
el.click()
sleep(3)
# Trying to click in more contacts (it may not exist)
try:
el = self.driver.find_element_by_xpath(MoreContactsXPath)
el.click()
except Exception as e:
msg = 'Error in {}.{}. Message: {}'.format(
self.__class__.__name__, # Ref. for getting class name on 2019-06-26: https://stackoverflow.com/questions/510972/getting-the-class-name-of-an-instance
sys._getframe().f_code.co_name, # Ref. for getting method name on 2019-06-26: https://stackoverflow.com/questions/251464/how-to-get-a-function-name-as-a-string-in-python
e)
print(msg)
el1 = self.driver.find_element_by_xpath(GroupInfoXPath) # Getting element for Group Info box panel.
el2 = el1.find_elements_by_class_name(nameClass) # Locating all elements of such class inside el1.
Members = [e.text for e in el2] # Getting only the texts, not the whole objects.
return Members
except Exception as e:
msg = 'Error in {}.{}. Message: {}'.format(
self.__class__.__name__,
# Ref. for getting class name on 2019-06-26: https://stackoverflow.com/questions/510972/getting-the-class-name-of-an-instance
sys._getframe().f_code.co_name,
# Ref. for getting method name on 2019-06-26: https://stackoverflow.com/questions/251464/how-to-get-a-function-name-as-a-string-in-python
e)
print(msg)
def search_contact(self, keyword):
'''Write and send message'''
self._click(NEW_CHAT)
self._send_keys(SEARCH_CONTACT, keyword)
sleep(1)
try:
self._click(FIRST_CONTACT)
except Exception as e:
print("Contact not found")
def get_all_messages(self):
all_messages_element = self.driver.find_elements_by_class_name('_12pGw')
all_messages_text = [e.text for e in all_messages_element]
return all_messages_text
def get_last_message(self):
all_messages = self.get_all_messages()
return all_messages[-1]
def get_all_messages_elements(self):
"""Gets all messages currently shown in screen."""
all_messages_element = self.driver.find_elements_by_class_name(messageClass)
return all_messages_element
def delete_message_from_recent(self, text):
"""From recent (visible) messages, deletes the one with text equals to 'text'."""
try:
all_messages_element = self.get_all_messages_elements() # Getting all recent messages.
for e in reversed(all_messages_element): # Looking at each element in reversed order.
if e.text == text:
# Moving mouse over message, so menu appear. Ref: http://allselenium.info/mouse-over-actions-using-python-selenium-webdriver/
action = ActionChains(self.driver)
action.move_to_element(e).perform()
sleep(1)
# Clicking on menu
msgMenu = self.driver.find_elements_by_class_name(messageMenuClass)
msgMenu[0].click()
sleep(1)
# Clicking on delete button:
msgMenuButtons = self.driver.find_elements_by_class_name(messageMenuButtonsClass) # Getting buttons
msgMenuButtons[-1].click() # Clicking on last button.
sleep(1)
# Clicking on 'Erase for me' button:
eraseButtons = self.driver.find_elements_by_class_name(eraseButtonsClass) # Getting buttons
eraseButtons[0].click() # Clicking on first button.
break # After deleting last msg that corresponds to 'text', breaks for loop.
else:
print('Did not find recent message with text: ' + text)
except Exception as e:
msg = 'Error in {}.{}. Message: {}'.format(
self.__class__.__name__, # Ref. for getting class name on 2019-06-26: https://stackoverflow.com/questions/510972/getting-the-class-name-of-an-instance
sys._getframe().f_code.co_name, # Ref. for getting method name on 2019-06-26: https://stackoverflow.com/questions/251464/how-to-get-a-function-name-as-a-string-in-python
e)
print(msg)
``` |
{
"source": "joaopaulojpsp/python_matematica",
"score": 4
} |
#### File: python_matematica/Conjuntos/inteiros.py
```python
def numeros_inteiros():
print("Numeros inteiros(Z): ")
z = -14;
print("...")
while(z < 15):
print(z)
z = z + 1
print("...")
def numeros_inteiros_positivos():
print("Numeros Inteiros Positivos(Z+): ")
z = 0;
while(z < 15):
print(z)
z = z + 1
print("...")
def numeros_inteiros_negativos():
print("Numeros Inteiros Negativos(Z-): ")
z = -14;
print("...")
while(z < 1):
print(z)
z = z + 1
print("...")
def numeros_inteiros_positivos_sem_zero():
print("Numeros Inteiros Positivos sem o zero(Z*+): ")
z = 1;
while(z < 15):
print(z)
z = z + 1
print("...")
def numeros_inteiros_negativos_sem_zero():
print("Numeros Inteiros Negativos Sem o Zero(Z*-): ")
z = -14;
print("...")
while(z < 0):
print(z)
z = z + 1
def options():
print("Digite um numero: ")
print("1- Todos os Numeros Inteiros (Z)")
print("2- Todos os Numeros Inteiros Positivos (Z+)")
print("3- Todos os Numeros Inteiros Negativos (Z-)")
print("4- Todos os Numeros Inteiros Positivos Sem o Zero (Z*+)")
print("5- Todos os Numeros Inteiros Negativos Sem o Zero (Z*-)")
print("6- Para Sair")
option = input()
return option
option = 7
while(option != 6):
if(option == 1):
numeros_inteiros()
elif(option == 2):
numeros_inteiros_positivos()
elif(option == 3):
numeros_inteiros_negativos()
elif(option == 4):
numeros_inteiros_positivos_sem_zero()
elif(option == 5):
numeros_inteiros_negativos_sem_zero()
option = options()
``` |
{
"source": "joaopauloramos/pythonbirds_I",
"score": 4
} |
#### File: oo/testes/teste_integracao.py
```python
from unittest import TestCase
from calculadora.oo.framework import Calculadora
class CalculadoraParaTeste(Calculadora):
def obter_inputs(self):
return '+',1,2
class CalculadoraTestes(TestCase):
def test_adcionar_operacao(self):
obj_qq=''
calculadora =Calculadora()
calculadora.adicionar_operacao('string', obj_qq)
self.assertEqual(calculadora._operacoes['string'],obj_qq)
def teste_efetuar_operacao(self):
calculadora = CalculadoraParaTeste()
self.assertEqual(3, calculadora.efetuar_operacao())
```
#### File: calculadora/procedural/biblioteca.py
```python
def calcular_iterativamente_forma_infixa():
p1 = input('Digite o primeiro número: ')
p1 = float(p1)
sinal = input('Digite o sinal da operação (+ ou -): ')
p2 = input('Digite o segundo número: ')
p2 = float(p2)
return _calcular(sinal, p1, p2)
def calcular_iterativamente_forma_prefixa():
sinal = input('Digite o sinal da operação (*, + ou -): ')
p1 = input('Digite o primeiro número: ')
p1 = float(p1)
p2 = input('Digite o segundo número: ')
p2 = float(p2)
return _calcular(sinal, p1, p2)
def _calcular(sinal, p1, p2):
if sinal == '+':
return p1 + p2
elif sinal == '-':
return p1 - p2
elif sinal == '*':
return p1 * p2
raise Exception('Operação não suportada')
```
#### File: pythonbirds_I/introducao/funcoes.py
```python
def f(a=2, b=3):
return a + b
print(f(1, 2))
print(f(4))
print(f())
print(f(b=4))
print(f(b=4, a=8))
def g():
return 4, 5
primeiro, segundo = g()
print(primeiro, segundo)
``` |
{
"source": "joaopaulosr95/p2p-key-value",
"score": 2
} |
#### File: joaopaulosr95/p2p-key-value/client.py
```python
import argparse
import logging
import socket
import struct
import sys
from utils import utils, clientutils
# Logging setup
logging.basicConfig(level=logging.DEBUG, format="[%(asctime)s][%(levelname)s] %(message)s",
datefmt="%m-%d-%Y %I:%M:%S %p")
"""
| ===================================================================
| main program
| ===================================================================
"""
if __name__ == "__main__":
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('server', type=str, metavar="host:port", help="ip:port of running server")
opt = parser.parse_args()
# connection parameters
srv_host, srv_port = opt.server.split(":")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def flush():
sys.stdout.write("Enter a service name to search: ")
sys.stdout.flush()
flush()
while True:
try:
user_input = raw_input()
if user_input.lower() == 'exit':
logger.info(" Bye =)")
break
elif len(user_input) == 0:
logger.warning("Your query must be at least 1 character long")
else:
# Prepare query
send_header = struct.pack(utils.MESSAGE_FORMAT["CLIREQ"], utils.MESSAGE_TYPES["CLIREQ"])
clientutils.p2p_ask_kv(sock, send_header, user_input.lower(), srv_host, srv_port)
flush()
except KeyboardInterrupt:
print("\nBye =)")
break
sock.close()
``` |
{
"source": "joaopaulosr95/python-chat",
"score": 2
} |
#### File: python-chat/pythonchat/chatutils.py
```python
import struct
import logging
# Logging setup
logging.basicConfig(level=logging.DEBUG, format="[%(asctime)s] %(message)s")
"""
| ===================================================================
| Constants definition
| ===================================================================
"""
ERROR_FLAG = -1
MAX_MSG_LEN = 65535
SRV_ID = 65535
MESSAGE_TYPES = {"OK": 1, "ERRO": 2, "OI": 3, "FLW": 4, "MSG": 5, "CREQ": 6, "CLIST": 7}
SENDER_RANGE_MIN = 1
SENDER_RANGE_MAX = 4095
VIEWER_RANGE_MIN = 4096
VIEWER_RANGE_MAX = 8191
MAX_CLIENTS = VIEWER_RANGE_MAX - VIEWER_RANGE_MIN + SENDER_RANGE_MAX - SENDER_RANGE_MIN
HEADER_FORMAT = "!HHHH"
HEADER_SIZE = struct.calcsize(HEADER_FORMAT)
"""
| ===================================================================
| deliver_message: sends a message to a defined host
| ===================================================================
"""
def deliver_message(to_sock, message_type, from_id, to_id, seq_number, message_len=None, message=None):
logger = logging.getLogger(__name__)
header = struct.pack(HEADER_FORMAT, message_type, from_id, to_id, seq_number)
# 5 tries to deliver the message
for i in range(5):
try:
to_sock.send(header)
if message_type in (MESSAGE_TYPES["CLIST"], MESSAGE_TYPES["MSG"]):
to_sock.send(struct.pack("!H", message_len) + message)
if message_type not in (MESSAGE_TYPES["OI"], MESSAGE_TYPES["OK"], MESSAGE_TYPES["ERRO"]):
answer = struct.unpack(HEADER_FORMAT, to_sock.recv(HEADER_SIZE))[0]
if answer == MESSAGE_TYPES["OK"]:
break
elif answer == MESSAGE_TYPES["ERRO"]:
logger.warning("Fail to deliver message")
break
else:
break
except: # TODO better error handling
pass
``` |
{
"source": "joaopauloss/flask-training",
"score": 3
} |
#### File: flask-training/users/users.py
```python
import jwt
import datetime
from flask import current_app
# from constants import user_levels, token_active_time
import json
token_exception_messages = {
'expired': 'Inactivity timeout has expired.',
'forbidden': 'Forbidden access.',
'invalid': 'Invalid authentication.'
}
token_active_time = {
'days': 0,
'hours': 0,
'seconds': 600,
't_days': 0,
't_hours': 0,
't_seconds': 30
}
user_levels = {
"low": "level:low",
"medium": "level:medium",
"high": "level:high",
"all": ["level:low", "level:medium", "level:high"]
}
class Users:
def __init__(self):
self.users_credentials = dict()
self.load_credentials()
def authenticate_user(self, user_name, password):
"""
Authenticates the user login.
:param user_name: User name
:param password: <PASSWORD>
:return: The token, if authorized.
"""
user = self.users_credentials.get(user_name)
if user:
if user.get("pwd") == password:
return self.encode_auth_token(user_name)
return None
def get_user_level(self, user_name):
"""
Gets the user level as URI syntax.
:param user_name: User name
:return: User's level.
"""
user = self.users_credentials.get(user_name)
if user:
return user_levels.get(user.get("level"))
return None
def load_credentials(self):
"""
Loads the local text file of users' credentials
"""
with open('credentials.txt') as file:
data = file.readlines()
self.users_credentials = json.loads(''.join(list(map(lambda x: x.replace('\n', ''), data))).strip())
def encode_auth_token(self, user_name):
"""
Generates the authentication token for a user at login stage.
:param user_level: User's permission level
:param user_name: User's unique name
:return: Token with the user ID, permission level and expiration date.
"""
try:
is_testing = current_app.config.get("DEBUG") or current_app.config.get("TESTING")
payload = {
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(
days=(token_active_time["t_days"] if is_testing else token_active_time["days"]),
hours=(token_active_time["t_hours"] if is_testing else token_active_time["hours"]),
seconds=(token_active_time["t_seconds"] if is_testing else token_active_time["seconds"])
),
# 'aud': user_level,
'user_name': user_name
}
return jwt.encode(payload, current_app.config.get('SECRET_KEY'), algorithm='HS256')
except Exception as e:
return e
``` |
{
"source": "joaopaulq/slt",
"score": 2
} |
#### File: slt/signjoey/translation.py
```python
import torch
import pickle
import gzip
from torchtext import data
from slt.signjoey.model import build_model
from slt.signjoey.batch import Batch
from slt.signjoey.data import make_data_iter
from slt.signjoey.vocabulary import PAD_TOKEN
from slt.signjoey.dataset import SignTranslationDataset
from slt.signjoey.phoenix_utils.phoenix_cleanup import (
clean_phoenix_2014,
clean_phoenix_2014_trans,
)
from slt.signjoey.vocabulary import (
build_vocab,
UNK_TOKEN,
EOS_TOKEN,
BOS_TOKEN,
PAD_TOKEN,
)
from slt.signjoey.helpers import (
bpe_postprocess,
load_config,
load_checkpoint,
)
torch.backends.cudnn.deterministic = True
def translate(cfg_file: str, ckpt: str, input_path: str) -> str:
cfg = load_config(cfg_file)
batch_type = cfg["training"].get("batch_type", "sentence")
use_cuda = cfg["training"].get("use_cuda", False)
level = cfg["data"]["level"]
dataset_version = cfg["data"].get("version", "phoenix_2014_trans")
translation_max_output_length = cfg["training"].get(
"translation_max_output_length", None
)
txt_lowercase = cfg["data"]["txt_lowercase"]
best_recog_beam_size = cfg["translating"]["best_recog_beam_size"]
best_trans_beam_size = cfg["translating"]["best_trans_beam_size"]
best_trans_beam_alpha = cfg["translating"]["best_trans_beam_alpha"]
sgn_dim = cfg["data"]["feature_size"]
do_recognition = cfg["training"].get("recognition_loss_weight", 1.0) > 0.0
do_translation = cfg["training"].get("translation_loss_weight", 1.0) > 0.0
gls_vocab = build_vocab(
field="gls",
min_freq=None,
max_size=None,
dataset=None,
vocab_file='slt/data/gls.vocab',
)
txt_vocab = build_vocab(
field="txt",
min_freq=None,
max_size=None,
dataset=None,
vocab_file='slt/data/txt.vocab',
)
def tokenize_text(text):
return list(text) if level == "char" else text.split()
def tokenize_features(features):
ft_list = torch.split(features, 1, dim=0)
return [ft.squeeze() for ft in ft_list]
def stack_features(features, something):
return torch.stack([torch.stack(ft, dim=0) for ft in features], dim=0)
sequence_field = data.RawField()
signer_field = data.RawField()
sgn_field = data.Field(
use_vocab=False,
init_token=None,
dtype=torch.float32,
preprocessing=tokenize_features,
tokenize=lambda features: features,
batch_first=True,
include_lengths=True,
postprocessing=stack_features,
pad_token=torch.zeros((sgn_dim,)),
)
gls_field = data.Field(
pad_token=PAD_TOKEN,
tokenize=tokenize_text,
batch_first=True,
lower=False,
include_lengths=True,
)
txt_field = data.Field(
init_token=BOS_TOKEN,
eos_token=EOS_TOKEN,
pad_token=PAD_TOKEN,
tokenize=tokenize_text,
unk_token=UNK_TOKEN,
batch_first=True,
lower=txt_lowercase,
include_lengths=True,
)
gls_field.vocab = gls_vocab
txt_field.vocab = txt_vocab
trans_path = "slt/data/trans.pkl"
build_embeddings(trans_path, input_path)
trans_data = SignTranslationDataset(
path=trans_path,
fields=(sequence_field, signer_field, sgn_field, gls_field, txt_field),
)
trans_iter = make_data_iter(
dataset=trans_data,
batch_size=1,
batch_type=batch_type,
shuffle=False,
train=False,
)
model = build_model(
cfg=cfg["model"],
gls_vocab=gls_vocab,
txt_vocab=txt_vocab,
sgn_dim=sgn_dim,
do_recognition=False,
do_translation=True,
)
ckpt = cfg["training"]["load_model"]
model_checkpoint = load_checkpoint(ckpt, use_cuda=use_cuda)
model.load_state_dict(model_checkpoint["model_state"])
if use_cuda: model.cuda()
model.eval()
with torch.no_grad():
all_gls_outputs = []
all_txt_outputs = []
batch = Batch(
is_train=False,
torch_batch=next(iter(trans_iter)),
txt_pad_index=txt_vocab.stoi[PAD_TOKEN],
sgn_dim=sgn_dim,
use_cuda=use_cuda,
frame_subsampling_ratio=None,
)
sort_reverse_index = batch.sort_by_sgn_lengths()
(
batch_gls_predictions,
batch_txt_predictions,
_,
) = model.run_batch(
batch=batch,
recognition_beam_size=best_recog_beam_size if do_recognition else None,
translation_beam_size=best_trans_beam_size if do_translation else None,
translation_beam_alpha=best_trans_beam_alpha if do_translation else None,
translation_max_output_length=translation_max_output_length if do_translation else None,
)
# sort outputs back to original order
if do_recognition:
all_gls_outputs.extend(
[batch_gls_predictions[sri] for sri in sort_reverse_index]
)
if do_translation:
all_txt_outputs.extend(
batch_txt_predictions[sort_reverse_index]
)
if do_recognition:
assert len(all_gls_outputs) == len(trans_data)
# decode back to symbols
decoded_gls = model.gls_vocab.arrays_to_sentences(arrays=all_gls_outputs)
# Gloss clean-up function
if dataset_version == "phoenix_2014_trans":
gls_cln_fn = clean_phoenix_2014_trans
elif dataset_version == "phoenix_2014":
gls_cln_fn = clean_phoenix_2014
else:
raise ValueError("Unknown Dataset Version: " + dataset_version)
# Construct gloss sequences for metrics
gls_hyp = [gls_cln_fn(" ".join(t)) for t in decoded_gls]
if do_translation:
assert len(all_txt_outputs) == len(trans_data)
# decode back to symbols
decoded_txt = model.txt_vocab.arrays_to_sentences(arrays=all_txt_outputs)
# evaluate with metric on full dataset
join_char = " " if level in ["word", "bpe"] else ""
# Construct text sequences for metrics
txt_hyp = [join_char.join(t) for t in decoded_txt]
# post-process
if level == "bpe":
txt_hyp = [bpe_postprocess(v) for v in txt_hyp]
results = {
"gls_hyp": gls_hyp if do_recognition else None,
"txt_hyp": txt_hyp if do_translation else None,
}
return results['txt_hyp'][0]
def build_embeddings(trans_path: str, input_path: str) -> None:
with open(trans_path, "rb") as f:
trans = pickle.load(f)
with gzip.open("slt/data/phoenix14t.pami0.test", "rb") as f:
annotations = pickle.load(f)
video_name = 'test/' + input_path.split(sep='/')[2]
video_name = video_name[:-4]
for annot in annotations:
if video_name == annot['name']:
trans[0]['sign'] = annot['sign']
with open(trans_path, "wb") as f:
pickle.dump(trans, f)
``` |
{
"source": "joaopbernhardt/nubank_django",
"score": 2
} |
#### File: nubank_django/nubank_django/domain.py
```python
import json
import logging
from decimal import Decimal
from typing import List, Optional
from django.core.cache import cache
from nubank_django.models import (
CREDIT_STATEMENT_TYPES,
DEBIT_STATEMENT_TYPES,
AccountStatement,
CardStatement,
)
from nubank_django.nu import get_authed_nu_client
from nubank_django.utils import amount_to_decimal
logger = logging.getLogger(__name__)
NUBANK_CACHE_TTL = 60 * 60 * 2 # 2 hours
def persist_card_statements(parsed_card_statements: List[CardStatement]):
existing_card_statements_ids = CardStatement.objects.values_list("nubank_id", flat=True)
card_statements_to_create = []
for statement in parsed_card_statements:
if statement.nubank_id in existing_card_statements_ids:
continue
card_statements_to_create.append(statement)
CardStatement.objects.bulk_create(card_statements_to_create)
def parse_card_statements(raw_card_statements: List[dict]) -> List[CardStatement]:
logger.info(
"Starting parsing of card statements.",
extra={"card_statements_count": len(raw_card_statements)},
)
parsed_card_statements = []
for raw_card_statement in raw_card_statements:
try:
parsed_card_statement = CardStatement(
nubank_id=raw_card_statement["id"],
account=raw_card_statement.get("account"),
amount=amount_to_decimal(raw_card_statement["amount"] / 100),
amount_without_iof=amount_to_decimal(raw_card_statement.get("amount_without_iof", 0) / 100) or None,
category=raw_card_statement["category"],
description=raw_card_statement["description"],
details=raw_card_statement["details"],
source=raw_card_statement.get("source"),
time=raw_card_statement["time"],
title=raw_card_statement["title"],
tokenized=raw_card_statement.get("tokenized"),
)
parsed_card_statement.clean_fields()
parsed_card_statement.clean()
parsed_card_statements.append(parsed_card_statement)
except Exception:
logger.exception("Could not parse statement.", extra={"statement": raw_card_statement})
logger.info(
"Parsed card statements.",
extra={"parsed_card_statements_count": len(parsed_card_statements)},
)
return parsed_card_statements
def get_raw_card_statements(cache_policy: str = "push-pull") -> List[dict]:
logger.info("Starting to get raw card statement.", extra={cache_policy: cache_policy})
CARD_STATEMENTS_CACHE_KEY = "card_statements.json"
cached_statements = cache.get(CARD_STATEMENTS_CACHE_KEY)
if "pull" in cache_policy and cached_statements:
logger.info(f"Cache hit for '{CARD_STATEMENTS_CACHE_KEY}'.")
return json.loads(cached_statements)
nu = get_authed_nu_client()
raw_statements = nu.get_card_statements()
if "push" in cache_policy:
logger.info(f"Setting cache for '{CARD_STATEMENTS_CACHE_KEY}'.")
cache.set(CARD_STATEMENTS_CACHE_KEY, json.dumps(raw_statements), NUBANK_CACHE_TTL)
return raw_statements
def full_load_card_statements():
raw = get_raw_card_statements()
parsed = parse_card_statements(raw)
persist_card_statements(parsed)
def get_raw_account_statements(cache_policy: str = "push-pull") -> List[dict]:
logger.info("Starting to get raw nuconta statement.", extra={cache_policy: cache_policy})
nu = get_authed_nu_client()
raw_statements = nu.get_account_feed_with_pix_mapping()
logger.info("Returning nuconta statements", extra={"statements_count": len(raw_statements)})
return raw_statements
def persist_parsed_account_statements(
parsed_statements: List[AccountStatement],
) -> None:
logger.info(
"Started persisting statements.",
extra={"parsed_statements_count": len(parsed_statements)},
)
existing_statement_ids = AccountStatement.objects.values_list("nubank_id", flat=True)
statements_to_create = []
statements_already_existing = []
for statement in parsed_statements:
if statement.nubank_id in existing_statement_ids:
statements_already_existing.append(statement)
continue
statements_to_create.append(statement)
AccountStatement.objects.bulk_create(statements_to_create)
logger.info(
"Persisted statements to database.",
extra={
"statements_count": len(statements_to_create),
"already_existed_count": len(statements_already_existing),
},
)
def _account_name_from_statement(statement: dict) -> Optional[str]:
if statement.get("destinationAccount"):
account_name = statement["destinationAccount"]["name"]
elif statement["__typename"] == "PixTransferOutEvent":
account_name = statement["detail"].split("\n")[0]
elif statement["__typename"] == "TransferInEvent":
origin_account = statement.get("originAccount") or {}
account_name = origin_account.get("name", "DESCONHECIDO")
else:
account_name = None
return account_name
def parse_account_statements(raw_statements: List[dict]) -> List[AccountStatement]:
logger.info(
"Starting parsing of statements",
extra={"statements_count": len(raw_statements)},
)
parsed_statements = []
for raw_statement in raw_statements:
try:
parsed_statement = AccountStatement(
nubank_id=raw_statement["id"],
amount=amount_to_decimal(raw_statement["amount"]),
detail=raw_statement["detail"],
post_date=raw_statement["postDate"],
title=raw_statement["title"],
gql_typename=raw_statement["__typename"],
)
if parsed_statement.is_transfer_out:
parsed_statement.destination_account = _account_name_from_statement(raw_statement)
elif parsed_statement.is_transfer_in:
parsed_statement.origin_account = _account_name_from_statement(raw_statement)
# uniqueness is checked before persistence attempts, not here.
parsed_statement.clean_fields()
parsed_statement.clean()
parsed_statements.append(parsed_statement)
except Exception:
logger.exception("Could not parse statement.", extra={"statement": raw_statement})
logger.info("Parsed statements.", extra={"parsed_statements_count": len(parsed_statements)})
return parsed_statements
def full_load_nuconta_statements():
raw = get_raw_account_statements()
parsed = parse_account_statements(raw)
persist_parsed_account_statements(parsed)
```
#### File: nubank_django/nubank_django/models.py
```python
from typing import Optional
from django.db import models
from django.forms import ValidationError
class CardStatement(models.Model):
"""
Example of card statement object from pynubank:
{'_links': {
'self': {'href': 'https://prod-s0-facade.nubank.com.br/api/transactions/4ecd9a59-3747-4af4-9192-a969b23cf513'}},
'account': '0cedea55-3fa8-4c4d-b72a-1bab24f0fb0a',
'amount': 3290,
'amount_without_iof': 3290,
'category': 'transaction',
'description': 'Netflix.Com',
'details': {'status': 'settled', 'subcategory': 'card_not_present'},
'href': 'nuapp://transaction/4ecd9a59-3747-4af4-9192-a969b23cf513',
'id': '4ecd9a59-3747-4af4-9192-a969b23cf513',
'source': 'upfront_national',
'time': '2021-03-21T10:56:13Z',
'title': 'serviços',
'tokenized': True}
"""
nubank_id = models.UUIDField()
account = models.UUIDField(null=True, blank=True)
amount = models.DecimalField(max_digits=12, decimal_places=2)
amount_without_iof = models.DecimalField(max_digits=12, decimal_places=2, null=True, blank=True)
category = models.CharField(max_length=128)
description = models.TextField()
details = models.JSONField()
source = models.CharField(max_length=64, null=True, blank=True)
time = models.DateTimeField(db_index=True)
title = models.CharField(max_length=128)
tokenized = models.BooleanField(null=True, blank=True)
def __str__(self) -> str:
return f"({self.time.date().strftime('%d/%b/%Y')}) {self.description}: R$ {self.amount}"
DEBIT_STATEMENT_TYPES = (
"TransferOutEvent",
"BillPaymentEvent",
"BarcodePaymentEvent",
"PixTransferOutEvent",
)
CREDIT_STATEMENT_TYPES = ("TransferInEvent", "TransferOutReversalEvent")
class AccountStatement(models.Model):
ACCOUNT_STATEMENT_TYPE = (
# DEBIT
("TransferOutEvent", "TransferOutEvent"),
("BillPaymentEvent", "BillPaymentEvent"),
("BarcodePaymentEvent", "BarcodePaymentEvent"),
("PixTransferOutEvent", "PixTransferOutEvent"),
# CREDIT
("TransferOutReversalEvent", "TransferOutReversalEvent"),
("TransferInEvent", "TransferInEvent"),
# RESERVE
("RemoveFromReserveEvent", "RemoveFromReserveEvent"),
("AddToReserveEvent", "AddToReserveEvent"),
# UNKNOWN
("DebitPurchaseEvent", "DebitPurchaseEvent"),
("DebitWithdrawalFeeEvent", "DebitWithdrawalFeeEvent"),
("DebitWithdrawalEvent", "DebitWithdrawalEvent"),
)
nubank_id = models.UUIDField(unique=True)
destination_account = models.CharField(max_length=256, null=True, blank=True)
origin_account = models.CharField(max_length=256, null=True, blank=True)
amount = models.DecimalField(max_digits=12, decimal_places=2)
detail = models.TextField()
post_date = models.DateField()
title = models.CharField(max_length=128)
gql_typename = models.CharField("Statement Type", choices=ACCOUNT_STATEMENT_TYPE, max_length=64)
def __str__(self):
return f"({self.post_date}) {self.detail}: R$ {self.amount}"
@property
def is_transfer_out(self):
return "TransferOutEvent" in self.gql_typename
@property
def is_transfer_in(self):
return self.gql_typename == "TransferInEvent"
@property
def account_name(self) -> Optional[str]:
if self.is_transfer_out:
return self.destination_account
elif self.is_transfer_in:
return self.origin_account
def clean(self):
if self.destination_account and self.origin_account:
raise ValidationError("Destination and Origin accounts are mutually exclusive.")
if self.is_transfer_in and not getattr(self, "origin_account", None):
raise ValidationError("TransferInEvent must have an associated origin_account.")
if self.is_transfer_out and not getattr(self, "destination_account", None):
raise ValidationError("*TransferOutEvent must have an associated destination_account.")
``` |
{
"source": "joaopbicalho/CodingInPython",
"score": 3
} |
#### File: CodingInPython/gamify/gamify.py
```python
def get_cur_hedons():
global cur_hedons
return cur_hedons
def get_cur_health():
global cur_health
return cur_health
def offer_star(activity):
global cur_star
global star_counter
global time_since_curstar
global time_since_star
global star_break
global time_since_star1
global time_since_star2
global star_span
time_since_curstar = 0
star_counter += 1
time_since_star = time_since_star1 + time_since_star2
if time_since_star >= 120:
star_counter += -1
time_since_star1 = time_since_star2
time_since_star2 = 0
star_span = 1
if star_counter > 2:
cur_star = "none"
star_break = "activated"
elif activity == "running":
cur_star = "running"
elif activity == "textbooks":
cur_star = "textbooks"
elif time_since_curstar < 120:
if star_counter > 2:
cur_star = "none"
star_break = "activated"
elif activity == "running":
cur_star = "running"
star_span = star_counter
elif activity == "textbooks":
cur_star = "textbooks"
star_span = star_counter
def perform_activity(activity, duration):
global cur_health
global cur_hedons
global running_duration
global running_counter
global resting_duration
global user_state
global textbooks_duration
global textbooks_counter
global running_hed_counter
global star_time
global cur_star
global star_counter
global time_since_star
global time_since_curstar
global star_break
global time_since_star1
global time_since_star2
global star_span
global textbook_hed_counter
if activity == "running":
running_duration += duration
resting_duration = 0
textbook_hed_counter = 0
textbooks_duration = 0
textbooks_counter = 0
if user_state == "tired" and cur_star != "running":
cur_hedons += duration * (-2)
cur_star = "none"
time_since_curstar = "not zero"
if running_duration <= 180:
cur_health += duration*3
user_state = "tired"
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration >180:
running_counter += 1
if running_counter == 1:
cur_health += (running_duration - 180) + 540 - (running_duration - duration) * 3
user_state = "tired"
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += (duration)
user_state = "tired"
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif user_state == "not tired" and cur_star != "running":
running_hed_counter += 1
user_state = "tired"
cur_star = "none"
time_since_curstar = "not zero"
if running_duration <= 10:
cur_hedons += running_duration * 2
user_state = "tired"
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 10 and running_hed_counter == 1:
cur_hedons += (running_duration - 10) * (-2) + 20
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540 - (running_duration - duration) * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_hedons += duration * (-1)
user_state = "tired"
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_counter != 1:
cur_health += (running_duration - 180) + 540 - (running_duration - duration) * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif cur_star == "running" and user_state == "not tired" and star_break != "activated" and time_since_curstar == 0:
user_state = "tired"
cur_star = "none"
time_since_curstar = "not zero"
if duration <= 10:
cur_hedons += 5 * duration
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540 - (running_duration - duration) * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 10:
cur_hedons += (duration - 10) * (-2) + 50
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540 - (running_duration - duration) * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif cur_star == "running" and user_state == "tired" and star_break != "activated" and time_since_curstar == 0:
user_state = "tired"
cur_star = "none"
time_since_curstar = "not zero"
if duration <= 10:
cur_hedons += duration
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 10:
cur_hedons += (duration - 10) * (-2) + 10
if running_duration <= 180:
cur_health += duration * 3
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif running_duration > 180 and running_counter == 1:
running_counter += 1
cur_health += (running_duration - 180) + 540
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
cur_health += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif activity == "textbooks":
resting_duration = 0
cur_health = cur_health + 2 * duration
running_duration = 0
running_counter = 0
textbooks_counter += 1
textbooks_duration += duration
if user_state == "tired" and cur_star != "textbooks":
cur_star = "none"
cur_hedons += duration * (-2)
time_since_curstar = "not zero"
user_state = "tired"
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif user_state == "not tired" and cur_star == "textbooks" and star_break != "activated" and time_since_curstar == 0:
cur_star = "none"
time_since_curstar = "not zero"
user_state = "tired"
if duration <= 10:
cur_hedons += 4 * duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration <= 20:
cur_hedons += (duration - 10) + 40
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 20:
cur_hedons += ((duration - 20) * (-1)) + 50
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif user_state == "tired" and cur_star == "textbooks" and star_break != "activated" and time_since_curstar == 0:
cur_star = "none"
user_state = "tired"
time_since_curstar = "not zero"
if duration <= 10:
cur_hedons += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 10:
cur_hedons += (duration - 10) * (-2) + 10
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif user_state == "not tired" and cur_star != "textbooks":
cur_star = "none"
user_state = "tired"
time_since_curstar = "not zero"
if duration <= 20:
cur_hedons += duration
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 20 and textbook_hed_counter == 0:
textbook_hed_counter += 1
cur_hedons += (duration - 20) * (-1) + 20
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif duration > 20 and textbook_hed_counter != 0:
textbook_hed_counter += 1
cur_hedons += (duration) * (-1)
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
elif activity == "resting":
running_duration = 0
textbook_hed_counter = 0
running_counter = 0
textbooks_duration = 0
textbooks_counter = 0
resting_duration += duration
time_since_curstar = "not zero"
cur_star = "none"
if resting_duration >= 120:
user_state = "not tired"
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
else:
if star_span == 1:
time_since_star1 += duration
elif star_span == 2:
time_since_star2 += duration
def star_can_be_taken(activity):
global cur_star
global star_break
global time_since_curstar
if star_break != "activated" and cur_star == activity and time_since_curstar == 0:
return True
else:
return False
def most_fun_activity_minute():
global user_state
global cur_state
global running_duration
global textbooks_duration
if (user_state == "not tired" and running_duration < 10) or cur_star == "running":
return "running"
elif cur_star == "textbooks" or (textbooks_duration < 20 and user_state == "not tired"):
return "textbooks"
else:
return "resting"
def initialize():
global cur_hedons
cur_hedons = 0
global cur_health
cur_health = 0
global running_duration
running_duration = 0
global running_counter
running_counter = 0
global textbooks_duration
textbooks_duration = 0
global resting_duration
resting_duration = 0
global user_state
user_state = "not tired"
global cur_star
cur_star = "none"
global textbooks_counter
textbooks_counter = 0
global running_hed_counter
running_hed_counter = 0
global star_time
star_time = 0
global star_counter
star_counter = 0
global time_since_star
time_since_star = 0
global time_since_curstar
time_since_curstar = "not zero"
global star_break
star_break = "not activated"
global time_since_star1
time_since_star1 = 0
global time_since_star2
time_since_star2 = 0
global star_span
star_span = 0
global textbook_hed_counter
textbook_hed_counter = 0
# Variety of tests to verify most possible conditions of the game rules were met
if __name__=="__main__":
initialize()
perform_activity("running", 30)
print(get_cur_hedons()) # -20 = 10 * 2 + 20 * (-2)
print(get_cur_health()) # 90 = 30 * 3
print(most_fun_activity_minute()) #resting
perform_activity("resting", 30)
offer_star("running")
print(most_fun_activity_minute()) # running
perform_activity("textbooks", 30)
print(get_cur_health()) # 150 = 90 + 30*2
print(get_cur_hedons()) # -80 = -20 + 30 * (-2)
offer_star("running")
perform_activity("running", 20)
print(get_cur_health()) # 210 = 150 + 20 * 3
print(get_cur_hedons()) # -90 = -80 + 10 * (3-2) + 10 * (-2)
perform_activity("running", 170)
print(get_cur_health()) # 700 = 210 + 160 * 3 + 10 * 1
print(get_cur_hedons()) # -430 = -90 + 170 * (-2)
offer_star("running")
initialize()
offer_star("running")
perform_activity("running", 30)
print(get_cur_health()) # 90 = 30*3
print(get_cur_hedons()) # 10 = 5 * 10 + (20 * -2)
perform_activity("running", 30)
print(get_cur_health()) # 180 = 90 + 90
print(get_cur_hedons()) # -50 = 10 + (-2 * 30
print(star_can_be_taken("running")) # False
offer_star("textbooks")
perform_activity("resting", 90)
offer_star("textbooks")
print(star_can_be_taken("textbooks")) # True
perform_activity("resting", 120)
offer_star("running")
print(star_can_be_taken("running")) #False
perform_activity("running" , 180)
print(get_cur_health()) # 720 = 180 + (180 * 3)
print(get_cur_hedons()) # -340 = -50 + (50 * 10) + (-2 * 170)
initialize()
perform_activity("resting", 120)
print(get_cur_health())
print(get_cur_hedons())
offer_star("running")
perform_activity("resting", 60)
print(get_cur_health())
print(get_cur_hedons())
offer_star("running")
perform_activity("running", 30)
print(get_cur_health())
print(get_cur_hedons())
offer_star("textbooks")
perform_activity("running", 10)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("textbooks", 130)
print(get_cur_health())
print(get_cur_hedons())
offer_star("textbooks")
perform_activity("resting", 110)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("running", 50)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("textbooks", 110)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("resting", 110)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("textbooks", 20)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("resting", 70)
print(get_cur_health())
print(get_cur_hedons())
print("----------------------------------")
initialize()
perform_activity("running", 50)
print(get_cur_health())
print(get_cur_hedons())
offer_star("running")
perform_activity("running", 40)
print(get_cur_health())
print(get_cur_hedons())
offer_star("running")
offer_star("textbooks")
perform_activity("textbooks", 60)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("running", 60)
print(get_cur_health())
print(get_cur_hedons())
print(most_fun_activity_minute())
offer_star("textbooks")
perform_activity("resting", 80)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("textbooks", 40)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("textbooks", 120)
print(get_cur_health())
print(get_cur_hedons())
offer_star("textbooks")
perform_activity("running", 20)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("running", 70)
print(get_cur_health())
print(get_cur_hedons())
print("----------------------------------")
initialize()
perform_activity("textbooks", 10)
print(get_cur_health())
print(get_cur_hedons())
print(most_fun_activity_minute())
offer_star("running")
offer_star("running")
offer_star("running")
perform_activity("resting", 50)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("resting", 30)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("resting", 30)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("resting", 90)
print(get_cur_health())
print(get_cur_hedons())
print(most_fun_activity_minute())
offer_star("textbooks")
perform_activity("textbooks", 30)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("running", 50)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("running", 70)
print(get_cur_health())
print(get_cur_hedons())
perform_activity("running", 130)
print(get_cur_health())
print(get_cur_hedons())
initialize()
offer_star("textbooks")
perform_activity("textbooks", 120)
offer_star("running")
offer_star("textbooks")
print(star_can_be_taken("textbooks"))
offer_star("textbooks")
print(star_can_be_taken("textbooks"))
```
#### File: CodingInPython/gomoku/gomoku.py
```python
def is_empty(board):
for i in range(len(board)):
for j in range(len(board)):
if board[i][j] != " ":
return False
return True
def is_bounded(board, y_end, x_end, length, d_y, d_x):
open_ends = 0
s = (y_end - (length * d_y))
z = (x_end - (d_x * length))
ss = s <= 7
zz = z <= 7
sss = s >= 0
zzz = z >= 0
s1 = (y_end + d_y)
z1 = (x_end + d_x)
ss1 = s1 <= 7
zz1 = z1 <= 7
sss1 = s1 >= 0
zzz1 = z1 >= 0
if ss1 and zz1 and sss1 and zzz1:
if board[y_end + d_y][x_end + d_x] == " ":
open_ends += 1
if ss and zz and sss and zzz:
if board[y_end - (length * d_y)][x_end - (d_x * length)] == " ":
open_ends += 1
if open_ends == 2:
return "OPEN"
elif open_ends == 1:
return "SEMIOPEN"
elif open_ends == 0:
return "CLOSED"
def is_win(board):
if detect_rows(board, "w", 5)[1] > 0:
return "White won"
elif detect_rows(board, "w", 5)[0] > 0:
return "White won"
elif closed_detect_rows(board, "w", 5) > 0:
return "White won"
elif detect_rows(board, "b", 5)[1] > 0:
return "Black won"
elif detect_rows(board, "b", 5)[0] > 0:
return "Black won"
elif closed_detect_rows(board, "b", 5) > 0:
return "Black won"
for i in range(len(board)):
for j in range(len(board)):
if board[i][j] == " ":
return "Continue playing"
return "Draw"
'' The Remaining functions were written by <NAME> ''
def detect_row(board, col, y_start, x_start, length, d_y, d_x):
open_seq_count, semi_open_seq_count = 0, 0
count = 0
i = 0
seq_coord = []
while i < 8:
s = y_start + (i * d_y)
z = x_start + (i * d_x)
s1 = s >= 0
z1 = z >= 0
s2 = s<= 7
z2 = z <= 7
if s1 and s2 and z1 and z2:
if board[y_start + (i * d_y)][x_start + (i * d_x)] == col:
while s1 and s2 and z1 and z2 and board[y_start + (i* d_y)][x_start + (i * d_x)] == col:
count += 1
i += 1
s = y_start + ((i) * d_y)
z = x_start + ((i) * d_x)
s1 = s >= 0
z1 = z >= 0
s2 = s<= 7
z2 = z <= 7
if count == length:
seq_coord.append([y_start + ((i-1) * d_y) , x_start + ((i-1) * d_x)])
count = 0
i += 1
else:
count = 0
i += 1
else:
i += 1
else:
i += 1
for i in range(len(seq_coord)):
if is_bounded(board, seq_coord[i][0], seq_coord[i][1], length, d_y, d_x) == "OPEN":
open_seq_count += 1
elif is_bounded(board, seq_coord[i][0], seq_coord[i][1], length, d_y, d_x) == "SEMIOPEN":
semi_open_seq_count += 1
return open_seq_count, semi_open_seq_count
def closed_detect_row(board, col, y_start, x_start, length, d_y, d_x):
closed_seq_count = 0
count = 0
i = 0
seq_coord = []
while i < 8:
s = y_start + (i * d_y)
z = x_start + (i * d_x)
s1 = s >= 0
z1 = z >= 0
s2 = s<= 7
z2 = z <= 7
if s1 and s2 and z1 and z2:
if board[y_start + (i * d_y)][x_start + (i * d_x)] == col:
while s1 and s2 and z1 and z2 and board[y_start + (i* d_y)][x_start + (i * d_x)] == col:
count += 1
i += 1
s = y_start + ((i) * d_y)
z = x_start + ((i) * d_x)
s1 = s >= 0
z1 = z >= 0
s2 = s<= 7
z2 = z <= 7
if count == length:
seq_coord.append([y_start + ((i-1) * d_y) , x_start + ((i-1) * d_x)])
count = 0
i += 1
else:
count = 0
i += 1
else:
i += 1
else:
i += 1
for i in range(len(seq_coord)):
if is_bounded(board, seq_coord[i][0], seq_coord[i][1], length, d_y, d_x) == "CLOSED":
closed_seq_count += 1
return closed_seq_count
def closed_detect_rows(board, col, length):
closed_seq_count = 0
for i in range(len(board)):
a = (closed_detect_row(board, col, 0, i, length, 1,0))
closed_seq_count += a
for i in range(len(board)):
c = (closed_detect_row(board, col, i, 0, length, 0,1))
closed_seq_count += c
for i in range(len(board)):
e = (closed_detect_row(board, col, 0, i, length, 1,1))
closed_seq_count += e
for i in range(1,len(board)):
e1 =(closed_detect_row(board, col, i, 0, length, 1,1))
closed_seq_count += e1
for i in range(len(board)):
g = (closed_detect_row(board, col, 0, i, length, 1,-1))
closed_seq_count += g
for i in range(1, len(board)):
g1 = (closed_detect_row(board, col, i, 7, length, 1,-1))
closed_seq_count += g1
return closed_seq_count
def detect_rows(board, col, length):
open_seq_count, semi_open_seq_count = 0, 0
for i in range(len(board)):
a = (detect_row(board, col, 0, i, length, 1,0))[0]
b = (detect_row(board, col, 0, i, length, 1,0))[1]
open_seq_count += a
semi_open_seq_count += b
for i in range(len(board)):
c = (detect_row(board, col, i, 0, length, 0,1))[0]
d = (detect_row(board, col, i, 0, length, 0,1))[1]
open_seq_count += c
semi_open_seq_count += d
for i in range(len(board)):
e = (detect_row(board, col, 0, i, length, 1,1))[0]
f = (detect_row(board, col, 0, i, length, 1,1))[1]
open_seq_count += e
semi_open_seq_count += f
for i in range(1,len(board)):
e1 =(detect_row(board, col, i, 0, length, 1,1))[0]
f1 =(detect_row(board, col, i, 0, length, 1,1))[1]
open_seq_count += e1
semi_open_seq_count += f1
for i in range(len(board)):
g = (detect_row(board, col, 0, i, length, 1,-1))[0]
h = (detect_row(board, col, 0, i, length, 1,-1))[1]
open_seq_count += g
semi_open_seq_count += h
for i in range(1, len(board)):
g1 = (detect_row(board, col, i, 7, length, 1,-1))[0]
h1 = (detect_row(board, col, i, 7, length, 1,-1))[1]
open_seq_count += g1
semi_open_seq_count += h1
return open_seq_count, semi_open_seq_count
def search_max(board):
fake_board = board
possible_scores = []
pos_scores_coord = []
move_y, move_x = 0, 0
for i in range(len(board)):
for j in range(len(board)):
if board[i][j] == " ":
fake_board[i][j] = "b"
possible_scores.append([score(fake_board),i,j])
fake_board[i][j] = " "
for i in range(len(possible_scores)):
pos_scores_coord.append(possible_scores[i][0])
pop_o = pos_scores_coord.index(max(pos_scores_coord))
possible_scores[pos_scores_coord.index(max(pos_scores_coord))]
move_y = possible_scores[pop_o][1]
move_x = possible_scores[pop_o][2]
return move_y, move_x
def score(board):
MAX_SCORE = 100000
open_b = {}
semi_open_b = {}
open_w = {}
semi_open_w = {}
for i in range(2, 6):
open_b[i], semi_open_b[i] = detect_rows(board, "b", i)
open_w[i], semi_open_w[i] = detect_rows(board, "w", i)
if open_b[5] >= 1 or semi_open_b[5] >= 1:
return MAX_SCORE
elif open_w[5] >= 1 or semi_open_w[5] >= 1:
return -MAX_SCORE
return (-10000 * (open_w[4] + semi_open_w[4])+
500 * open_b[4] +
50 * semi_open_b[4] +
-100 * open_w[3] +
-30 * semi_open_w[3] +
50 * open_b[3] +
10 * semi_open_b[3] +
open_b[2] + semi_open_b[2] - open_w[2] - semi_open_w[2])
def print_board(board):
s = "*"
for i in range(len(board[0])-1):
s += str(i%10) + "|"
s += str((len(board[0])-1)%10)
s += "*\n"
for i in range(len(board)):
s += str(i%10)
for j in range(len(board[0])-1):
s += str(board[i][j]) + "|"
s += str(board[i][len(board[0])-1])
s += "*\n"
s += (len(board[0])*2 + 1)*"*"
print(s)
def make_empty_board(sz):
board = []
for i in range(sz):
board.append([" "]*sz)
return board
def analysis(board):
for c, full_name in [["b", "Black"], ["w", "White"]]:
print("%s stones" % (full_name))
for i in range(2, 6):
open, semi_open = detect_rows(board, c, i);
print("Open rows of length %d: %d" % (i, open))
print("Semi-open rows of length %d: %d" % (i, semi_open))
def play_gomoku(board_size):
board = make_empty_board(board_size)
board_height = len(board)
board_width = len(board[0])
while True:
print_board(board)
if is_empty(board):
move_y = board_height // 2
move_x = board_width // 2
else:
move_y, move_x = search_max(board)
print("Computer move: (%d, %d)" % (move_y, move_x))
board[move_y][move_x] = "b"
print_board(board)
analysis(board)
game_res = is_win(board)
if game_res in ["White won", "Black won", "Draw"]:
return game_res
print("Your move:")
move_y = int(input("y coord: "))
move_x = int(input("x coord: "))
board[move_y][move_x] = "w"
print_board(board)
analysis(board)
game_res = is_win(board)
if game_res in ["White won", "Black won", "Draw"]:
return game_res
def put_seq_on_board(board, y, x, d_y, d_x, length, col):
for i in range(length):
board[y][x] = col
y += d_y
x += d_x
def test_is_empty():
board = make_empty_board(8)
if is_empty(board):
print("TEST CASE for is_empty PASSED")
else:
print("TEST CASE for is_empty FAILED")
def test_is_bounded():
board = make_empty_board(8)
x = 5; y = 1; d_x = 0; d_y = 1; length = 3
put_seq_on_board(board, y, x, d_y, d_x, length, "w")
print_board(board)
y_end = 3
x_end = 5
if is_bounded(board, y_end, x_end, length, d_y, d_x) == 'OPEN':
print("TEST CASE for is_bounded PASSED")
else:
print("TEST CASE for is_bounded FAILED")
def test_detect_row():
board = make_empty_board(8)
x = 5; y = 1; d_x = 0; d_y = 1; length = 3
put_seq_on_board(board, y, x, d_y, d_x, length, "w")
print_board(board)
if detect_row(board, "w", 0,x,length,d_y,d_x) == (1,0):
print("TEST CASE for detect_row PASSED")
else:
print("TEST CASE for detect_row FAILED")
def test_detect_rows():
board = make_empty_board(8)
x = 5; y = 1; d_x = 0; d_y = 1; length = 3; col = 'w'
put_seq_on_board(board, y, x, d_y, d_x, length, "w")
print_board(board)
if detect_rows(board, col,length) == (1,0):
print("TEST CASE for detect_rows PASSED")
else:
print("TEST CASE for detect_rows FAILED")
def test_search_max():
board = make_empty_board(8)
x = 5; y = 0; d_x = 0; d_y = 1; length = 4; col = 'w'
put_seq_on_board(board, y, x, d_y, d_x, length, col)
x = 6; y = 0; d_x = 0; d_y = 1; length = 4; col = 'b'
put_seq_on_board(board, y, x, d_y, d_x, length, col)
print_board(board)
if search_max(board) == (4,6):
print("TEST CASE for search_max PASSED")
else:
print("TEST CASE for search_max FAILED")
def easy_testset_for_main_functions():
test_is_empty()
test_is_bounded()
test_detect_row()
test_detect_rows()
test_search_max()
def some_tests():
board = make_empty_board(8)
board[0][5] = "w"
board[0][6] = "b"
y = 5; x = 2; d_x = 0; d_y = 1; length = 3
put_seq_on_board(board, y, x, d_y, d_x, length, "w")
print_board(board)
analysis(board)
# Expected output:
# *0|1|2|3|4|5|6|7*
# 0 | | | | |w|b| *
# 1 | | | | | | | *
# 2 | | | | | | | *
# 3 | | | | | | | *
# 4 | | | | | | | *
# 5 | |w| | | | | *
# 6 | |w| | | | | *
# 7 | |w| | | | | *
# *****************
# Black stones:
# Open rows of length 2: 0
# Semi-open rows of length 2: 0
# Open rows of length 3: 0
# Semi-open rows of length 3: 0
# Open rows of length 4: 0
# Semi-open rows of length 4: 0
# Open rows of length 5: 0
# Semi-open rows of length 5: 0
# White stones:
# Open rows of length 2: 0
# Semi-open rows of length 2: 0
# Open rows of length 3: 0
# Semi-open rows of length 3: 1
# Open rows of length 4: 0
# Semi-open rows of length 4: 0
# Open rows of length 5: 0
# Semi-open rows of length 5: 0
y = 3; x = 5; d_x = -1; d_y = 1; length = 2
put_seq_on_board(board, y, x, d_y, d_x, length, "b")
print_board(board)
analysis(board)
# Expected output:
# *0|1|2|3|4|5|6|7*
# 0 | | | | |w|b| *
# 1 | | | | | | | *
# 2 | | | | | | | *
# 3 | | | | |b| | *
# 4 | | | |b| | | *
# 5 | |w| | | | | *
# 6 | |w| | | | | *
# 7 | |w| | | | | *
# *****************
#
# Black stones:
# Open rows of length 2: 1
# Semi-open rows of length 2: 0
# Open rows of length 3: 0
# Semi-open rows of length 3: 0
# Open rows of length 4: 0
# Semi-open rows of length 4: 0
# Open rows of length 5: 0
# Semi-open rows of length 5: 0
# White stones:
# Open rows of length 2: 0
# Semi-open rows of length 2: 0
# Open rows of length 3: 0
# Semi-open rows of length 3: 1
# Open rows of length 4: 0
# Semi-open rows of length 4: 0
# Open rows of length 5: 0
# Semi-open rows of length 5: 0
#
y = 5; x = 3; d_x = -1; d_y = 1; length = 1
put_seq_on_board(board, y, x, d_y, d_x, length, "b");
print_board(board);
analysis(board);
# Expected output:
# *0|1|2|3|4|5|6|7*
# 0 | | | | |w|b| *
# 1 | | | | | | | *
# 2 | | | | | | | *
# 3 | | | | |b| | *
# 4 | | | |b| | | *
# 5 | |w|b| | | | *
# 6 | |w| | | | | *
# 7 | |w| | | | | *
# *****************
#
#
# Black stones:
# Open rows of length 2: 0
# Semi-open rows of length 2: 0
# Open rows of length 3: 0
# Semi-open rows of length 3: 1
# Open rows of length 4: 0
# Semi-open rows of length 4: 0
# Open rows of length 5: 0
# Semi-open rows of length 5: 0
# White stones:
# Open rows of length 2: 0
# Semi-open rows of length 2: 0
# Open rows of length 3: 0
# Semi-open rows of length 3: 1
# Open rows of length 4: 0
# Semi-open rows of length 4: 0
# Open rows of length 5: 0
# Semi-open rows of length 5: 0
if __name__ == '__main__':
play_gomoku(8)
test_is_bounded()
test_detect_row()
test_detect_rows()
board = make_empty_board(8)
x = 0; y = 0; d_x = 1; d_y = 1; length = 3; col = 'w'
put_seq_on_board(board, y, x, d_y, d_x, length, "b")
print_board(board)
test_search_max()
print(detect_rows(board, "b", 3))
```
#### File: CodingInPython/matrix_as_lists/lists_and_matrices.py
```python
def list1_start_with_list2(list1, list2):
if len(list1) < len(list2):
return False
for i in range(len(list2)):
if list1[i] != list2[i]:
return False
return True
L = [1,2,3,4]
M = [1,2,3]
#returns True iff the pattern list2 appears in list1
def match_pattern(list1, list2):
counter = 0
if len(list2) > len(list1):
return False
for i in range(len(list1) - len(list2)):
if list1[i] != list2[0]:
counter += 1
elif list1[i] == list2[0]:
check = True
for j in range(len(list2)):
if list1[counter + j] != list2[j]:
check = False
counter +=1
break
if check == True:
return True
return False
L = [2, 2, 2, 3, 50, 100]
M = [2, 3, 50]
#returns True iff list0 contains at least two adjacent elements with the same value.
def repeats(list0):
for i in range (len(list0) - 1):
if list0[i] == list0[i+1]:
return True
return False
M = [[5, 6, 7],
[0, -3, 5],
[0, 2, 4]]
v = [1,1,1]
N = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
#prints the dimensions of Matrix M stored as a list of lists
def print_matrix_dim(M):
row = len(M)
column = len(M[1])
print(row, "x", column)
#Multiplies a matrix M by a vector v
def mult_M_v(M, v):
Mv = []
if len(M[0]) != len(v):
return False
for i in range (len(M)):
x = 0
for j in range(len(M[0])):
x += M[i][j] * v[j]
Mv.append(x)
return Mv
#Performs matrix multiplication
def mult_M_N(M, N):
MN = []
row = []
if len(M[0]) != len(N):
return False
for i in range (len(M)):
row = []
for j in range(len(M[0])):
x = 0
for k in range(len(N[0])):
x += M[i][j] * N[j][k]
row.append(x)
MN.append(row)
return MN
```
#### File: CodingInPython/synonym/synonyms.py
```python
import math
def norm(vec):
'''Return the norm of a vector stored as a dictionary,
as described in the handout for Project 3.
'''
sum_of_squares = 0.0
for x in vec:
sum_of_squares += vec[x] * vec[x]
return math.sqrt(sum_of_squares)
def cosine_similarity(vec1, vec2):
vecs_dot = 0
vecs_dot = sum(vec1[key]*vec2.get(key, 0) for key in vec1)
if vecs_dot == 0:
return -1
mag_vec1 = 0
for key in vec1:
mag_vec1 += (vec1[key] * vec1[key])
mag_vec2 = 0
for key in vec2:
mag_vec2 += (vec2[key] * vec2[key])
mags = math.sqrt((mag_vec1 * mag_vec2))
sim = vecs_dot/mags
return sim
def build_semantic_descriptors(sentences):
d = {}
for sentence in range(len(sentences)):
for word in range(len(sentences[sentence])):
for i in range(len(sentences[sentence])):
if sentences[sentence][word] == sentences[sentence][i] and i != word:
sentences[sentence][i] = -1
for word in range(len(sentences[sentence])):
if sentences[sentence][word] not in d.keys() and sentences[sentence][word] != -1:
d[sentences[sentence][word]] = {}
for i in range(len(sentences[sentence])):
if i != word and sentences[sentence][i] != -1 and sentences[sentence][word] != -1:
if sentences[sentence][i] not in d[sentences[sentence][word]].keys():
d[(sentences[sentence][word])][sentences[sentence][i]] = 1
else:
d[(sentences[sentence][word])][sentences[sentence][i]] += 1
return d
def build_semantic_descriptors_from_files(filenames):
files = []
text = ""
for i in range(len(filenames)):
text += open(filenames[i], "r", encoding="latin1").read()
text = text.replace(".", "?").replace("!", "?")
text = text.replace(",", ";").replace("-", ";").replace("--", ";").replace(":", ";")
text = text.replace(";", " ")
text = text.lower()
text = text.split("?")
for i in range(len(text)):
text[i] = text[i].split()
return build_semantic_descriptors(text)
def most_similar_word(word, choices, semantic_descriptors, similarity_fn):
x = []
if word in semantic_descriptors:
word_v = semantic_descriptors[word]
else:
word_v = {1:-1}
for i in range(len(choices)):
if choices[i] in semantic_descriptors:
choices_v = semantic_descriptors[choices[i]]
x.append(similarity_fn(word_v, choices_v))
else:
x.append(-1)
best_choice_val = max(x)
best_choice = choices[x.index(best_choice_val)]
return best_choice
def run_similarity_test(filename, semantic_descriptors, similarity_fn):
right = 0
text = open(filename, "r", encoding="latin-1").read()
text = text.split("\n")
for i in range(len(text)):
text[i] = text[i].split(" ")
word = text[i][0]
correct_choice = text[i][1]
choices = text[i][2:]
AI = most_similar_word(word, choices, semantic_descriptors, similarity_fn)
if AI == correct_choice:
right += 1
return ((right/(len(text)))*100)
if __name__ == '__main__':
#x = run_similarity_test("test.txt", descriptors, cosine_similarity)
filenames = ["wp.txt", "sw.txt"]
y = build_semantic_descriptors_from_files(filenames)
z = run_similarity_test("test.txt", y, cosine_similarity)
#L = [["i","i","am", "a", "sick", "man"],["i", "am", "a", "spiteful", "man"],["i", "am", "an", "unattractive", "man"],["i", "believe", "my" ,"liver", "liver" ,"diseased", "is", "diseased"],["however", "i", "know", "nothing", "at", "all", "about", "my","disease","disease","and", "do", "not", "know", "for", "certain", "what", "ails", "me"]]
#x = build_semantic_descriptors(L)
``` |
{
"source": "joaopcanario/cfopen-api",
"score": 3
} |
#### File: cfopenapi/blueprints/cfopen.py
```python
from flask import Blueprint, jsonify, request
from ..database import connect
from ..championship.board import Athlete
cfopen_bp = Blueprint("cfopen_bp", __name__)
@cfopen_bp.route('/leaderboards', methods=['GET'])
def leaderboards():
'''
Brazil custom leaderboards
Get state or region leaderboards filtered by divisions, male or female.
__Available leaderboards__
- Bahia
__Available Divisions__
- Masculino
- Feminino
- Boys (14-15)
- Girls (14-15)
- Boys (16-17)
- Girls (16-17)
- Men (18-34)
- Women (18-34)
- Men (35-39)
- Women (35-39)
- Men (40-44)
- Women (40-44)
- Men (45-49)
- Women (45-49)
- Men (50-54)
- Women (50-54)
- Men (55-59)
- Women (55-59)
- Men (60+)
- Women (60+)
---
tags:
- Open
summary: Get custom leaderboard
parameters:
- name: name
in: query
description: The board name.
type: string
required: true
- name: division
in: query
description: Athletes division.
type: string
required: true
responses:
200:
description: Ranking of athletes by division
schema:
type: object
properties:
ranking:
type: array
items:
schema:
properties:
affiliateName:
type: string
competitorName:
type: string
overallScore:
type: string
profilePic:
type: string
scores:
type: array
items:
schema:
properties:
scoreDisplay:
type: string
rank:
type: string
score:
type: string
'''
name = request.args.get('name')
division = request.args.get('division')
mongo = connect("MONGO_READONLY")
if not name or not division:
return jsonify(f'Missing required parameters: name={name}, '
f'division={division}'), 200
result = mongo.entitydb.find_one({"name": name})
filter_search = {"uuid": f"{result['_id']}_{division}"}
result = mongo.rankingdb.find_one(filter_search)
response = Athlete.from_list_to_leaderboard(result['athletes'])
return jsonify(response), 200
@cfopen_bp.route('/cfba', methods=['GET'])
def cfba():
'''
CFBA Barra custom leaderboards
Get CFBA Barra leaderboard filtered by genders, male (M) or female (F).
__Available leaderboards__
- CFBA Barra
---
tags:
- Open
summary: Get custom leaderboard
parameters:
- name: gender
in: query
description: Athletes gender.
type: string
required: true
responses:
200:
description: Ranking of athletes by gender
schema:
type: object
properties:
ranking:
type: array
items:
schema:
properties:
affiliateName:
type: string
competitorName:
type: string
overallScore:
type: string
profilePic:
type: string
scores:
type: array
items:
schema:
properties:
scoreDisplay:
type: string
rank:
type: string
score:
type: string
'''
gender = request.args.get('gender')
mongo = connect("MONGO_READONLY")
if not gender:
return jsonify(f'Missing required parameters: gender={gender}'), 200
elif gender not in ['M', 'F']:
return jsonify('Gender must be M or F'), 200
division = "Masculino" if gender == "M" else "Feminino"
filter_search = {"uuid": f"dj8bd2j7et4fjxa01f_{division}"}
result = mongo.rankingcfbadb.find_one(filter_search)
response = Athlete.from_list_to_leaderboard(result['athletes'])
return jsonify(response), 200
```
#### File: cfopenapi/blueprints/core.py
```python
from flask import Blueprint, redirect, url_for, jsonify
core_bp = Blueprint("core_bp", __name__)
@core_bp.route('/', methods=['GET'])
def root():
try:
return redirect(url_for('flasgger.apidocs')), 302
except Exception:
return jsonify("API Documentation isn't loaded!"), 200
```
#### File: cfopenapi/championship/external.py
```python
def load_from_api(id, page, division):
from decouple import config
import requests
import json
params = {'affiliate': id, 'page': page, 'division': division}
response = requests.get(config('OPEN_URL'), params=params)
content = json.loads(response.text)
pagination = content.get('pagination', None)
total_pages = pagination.get('totalPages') if pagination else 0
return content.get('leaderboardRows', []), total_pages
```
#### File: tests/tasks/test_tasks.py
```python
from cfopenapi import tasks
def test_refresh_empty_boards(app):
app.config['OPEN_BOARDS'] = []
result = tasks.refresh_boards()
assert result == 'Success rankings uuids: []'
# def test_add_cfba_athlete():
# data = json.load(open('data/fake_cfba_athlete_data.json'))
# expected_athlete = json.load(open('data/expected_cfba_athlete_19_2.json'))
# athletes = Athlete.from_list(data, 5)
# assert expected_athlete == athletes
``` |
{
"source": "joaopcanario/eventex",
"score": 2
} |
#### File: eventex/subscriptions/views.py
```python
from django.conf import settings
from django.contrib import messages
from django.core import mail
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render
from django.template.loader import render_to_string
from eventex.subscriptions.forms import SubscriptionForm
from eventex.subscriptions.models import Subscription
def subscribe(request):
if request.method == 'POST':
return create(request)
else:
return new(request)
def create(request):
form = SubscriptionForm(request.POST)
if not form.is_valid():
return render(request, 'subscriptions/subscription_form.html',
{'form': form})
subscription = Subscription.objects.create(**form.cleaned_data)
_send_mail(subject='Confirmação de inscrição',
from_=settings.DEFAULT_FROM_EMAIL,
to=subscription.email,
template_name='subscriptions/subscription_mail.txt',
context={'subscription': subscription})
return HttpResponseRedirect('/inscricao/{}/'.format(subscription.pk))
def new(request):
return render(request, 'subscriptions/subscription_form.html',
{'form': SubscriptionForm()})
def detail(request, pk):
try:
subscription = Subscription.objects.get(pk=pk)
except Subscription.DoesNotExist:
raise Http404
return render(request, 'subscriptions/subscription_detail.html',
{'subscription': subscription})
def _send_mail(subject, from_, to, template_name, context):
body = render_to_string(template_name, context)
mail.send_mail(subject, body, from_, [from_, to])
``` |
{
"source": "joaopcanario/google-python-exercises",
"score": 3
} |
#### File: google-python-exercises/logpuzzle/logpuzzle.py
```python
from pathlib import Path
import re
import sys
import urllib.request
"""Logpuzzle exercise
Given an apache logfile, find the puzzle urls and download the images.
Here's what a puzzle url looks like:
10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] "GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0" 302 528 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6"
"""
def read_urls(filename):
"""Returns a list of the puzzle urls from the given log file,
extracting the hostname from the filename itself.
Screens out duplicate urls and returns the urls sorted into
increasing order."""
hostname = 'http://' + Path(filename).name.split('_')[-1]
url = r'GET\s(.+/puzzle/.+\.jpg)'
with open(filename) as logfile:
lines = logfile.readlines()
urls = [hostname + re.search(url, l).group(1)
for l in lines if re.search(url, l)]
return sorted(list(set(urls)), key=lambda s: s.split('-')[-1])
def download_images(img_urls, dest_dir):
"""Given the urls already in the correct order, downloads
each image into the given directory.
Gives the images local filenames img0, img1, and so on.
Creates an index.html in the directory
with an img tag to show each local image file.
Creates the directory if necessary.
"""
dst = Path(dest_dir)
dst.mkdir(parents=True, exist_ok=True)
imgs_dst = str(dst.resolve()) + '/img'
html_file = str(dst.resolve()) + '/index.html'
image_tags = []
for idx, url in enumerate(img_urls):
print(f'Downloading image {url}')
urllib.request.urlretrieve(url, f'{imgs_dst}{idx}.jpg')
image_tags.append(f'<img src="{imgs_dst}{idx}.jpg">')
print(f'Saved in {imgs_dst}{idx}.jpg')
html = f"<html>\n<body>\n{''.join(image_tags)}\n</body>\n</html>"
with open(html_file, 'w') as index:
index.write(html)
print('Created index.html to visualize images')
def main():
args = sys.argv[1:]
if not args:
print('usage: [--todir dir] logfile ')
sys.exit(1)
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
img_urls = read_urls(args[0])
if todir:
download_images(img_urls, todir)
else:
print('\n'.join(img_urls))
if __name__ == '__main__':
main()
``` |
{
"source": "joaopdmartins/PointOfYouth",
"score": 3
} |
#### File: joaopdmartins/PointOfYouth/main.py
```python
import routes
from flask import Flask, request, url_for
app = Flask(__name__)
@app.route('/')
def home():
return app.send_static_file('index.html')
@app.route('/trending', methods=['GET'])
def trending():
# cookies = request.cookies()
# userid = get identifier from cookies
userid = 0
return routes.trendsForUser(userid)
@app.route('/category/<category_name>/results', methods=['POST'])
def saveCategoryResult(category_name):
# cookies = request.cookies()
# userid = get identifier from cookies
userid = 0
return routes.saveCategoryResult(request, userid, category_name)
@app.route('/category/<category_name>/results', methods=['GET'])
def getCategoryResult(category_name):
# cookies = request.cookies()
# userid = get identifier from cookies
userid = 0
return routes.getCategoryResult(userid, category_name)
@app.route('/user/categories', methods=['GET'])
def getUserCategories():
# cookies = request.cookies()
# userid = get identifier from cookies
userid = 0
return routes.getUserCategories(userid)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080)
``` |
{
"source": "joaopdmartins/Verifiable-AI",
"score": 3
} |
#### File: source/design-time/pipeline_undeterminism.py
```python
import pandas as pd
from re import findall
from pickle import dump
from timeit import default_timer as timer
from subprocess import call
def grace(patient_data, risk_factors, risk_threshold=145):
risk=0
for i in range(0, len(risk_factors)):
factor = risk_factors[i]
if factor == 'Age':
age = patient_data[i]
if age >= 40 and age <= 49:
risk += 15
elif age >= 50 and age <= 59:
risk += 29
elif age >= 60 and age <= 69:
risk += 44
elif age >= 70 and age <= 79:
risk += 59
elif age >= 80 and age <= 89:
risk += 73
elif age >= 90:
risk += 80
elif factor == 'HR':
hr = patient_data[i]
if hr >= 70 and hr <= 89:
risk += 6
elif hr >= 90 and hr <= 109:
risk += 12
elif hr >= 110 and hr <= 149:
risk += 21
elif hr >= 150 and hr <= 199:
risk += 32
elif hr >= 200:
risk += 41
elif factor == 'SBP':
sbp = patient_data[i]
if sbp < 80:
risk += 57
elif sbp >= 80 and sbp <= 99:
risk += 53
elif sbp >= 100 and sbp <= 119:
risk += 43
elif sbp >= 120 and sbp <= 139:
risk += 34
elif sbp >= 140 and sbp <= 159:
risk += 24
elif sbp >= 160 and sbp <= 199:
risk += 10
elif factor == 'Creat':
creatinine = patient_data[i]
if creatinine >= 0 and creatinine <= 0.39:
risk += 2
elif creatinine >= 0.4 and creatinine <= 0.79:
risk += 5
elif creatinine >= 0.8 and creatinine <= 1.19:
risk += 8
elif creatinine >= 1.2 and creatinine <= 1.59:
risk += 11
elif creatinine >= 1.6 and creatinine <= 1.99:
risk += 14
elif creatinine >= 2 and creatinine <= 3.99:
risk += 23
elif creatinine >= 4:
risk += 31
elif factor == 'KILLIP':
killip = patient_data[i]
if killip == 2:
risk += 33
elif killip == 3:
risk += 67
elif killip == 4:
risk += 100
elif factor == 'CAA':
if patient_data[i] == 1:
risk += 98
elif factor == 'TN':
if patient_data[i] == 1:
risk += 54
elif factor == 'DEP ST':
if patient_data[i] == 1:
risk += 67
if risk >= risk_threshold:
patient_in_risk=True
else:
patient_in_risk=False
return patient_in_risk
def pursuit(patient_data, risk_factors, risk_threshold=13):
risk=0
for i in range(0, len(risk_factors)):
factor = risk_factors[i]
if factor == 'Age':
age = patient_data[i]
if age >= 50 and age <= 59:
risk += 8
elif age >= 60 and age <= 69:
risk += 9
elif age >= 70 and age <= 79:
risk += 11
elif age >= 80:
risk += 12
elif factor == 'SEX':
if patient_data[i] == 1:
risk += 1
elif factor == 'CCS>II':
if patient_data[i] == 1:
risk += 2
elif factor == 'hf':
if patient_data[i] == 1:
risk += 2
elif factor == 'DEP ST':
if patient_data[i] == 1:
risk += 1
if risk >= risk_threshold:
patient_in_risk=True
else:
patient_in_risk=False
return patient_in_risk
def timi( patient_data, risk_factors, risk_threshold=4):
risk=0
for i in range(0, len(risk_factors)):
factor = risk_factors[i]
if factor == 'Age':
if patient_data[i] >= 65:
risk += 1
elif factor == 'RF':
if patient_data[i] == 1:
risk += 1
elif factor == 'AAS':
if patient_data[i] == 1:
risk += 1
elif factor == 'Kn. CAD':
if patient_data[i] == 1:
risk += 1
elif factor == 'Angina':
if patient_data[i] == 1:
risk += 1
elif factor == 'DEP ST':
if patient_data[i] == 1:
risk += 1
elif factor == 'TN':
if patient_data[i] == 1:
risk += 1
if risk >= risk_threshold:
patient_in_risk=True
else:
patient_in_risk=False
return patient_in_risk
def check_undetermininsm(patient, X_labels, model, output):
#patient: list with data
#X_labels: X_labels=['SEX','Age','Enrl','RF','CCS>II','DEP ST','SBP','HR','KILLIP','hf','TN','Creat','CAA','AAS','Angina','Kn. CAD']
#model: model name (function)
#output: risk evaluation
if model==grace:
#add the patient input data to the model
call(["spin", "-DAGE=" + str(int(patient[X_labels.index("Age")])), "-DHR="+str(int(patient[X_labels.index("HR")])), "-DSBP="+str(int(patient[X_labels.index("SBP")])), "-DCREAT="+str(int(patient[X_labels.index("Creat")]*10)), "-DKILLIP="+str(int(patient[X_labels.index("KILLIP")])), "-DCAA="+str(int(patient[X_labels.index("CAA")])), "-DDEPST="+str(int(patient[X_labels.index("DEP ST")])), "-DTN="+str(int(patient[X_labels.index("TN")])), "-DRISK="+str(output).lower(),"-a","grace_undeterminism.pml"])
elif model== pursuit:
call(["spin", "-DAGE=" + str(int(patient[X_labels.index("Age")])), "-DSEX="+str(int(patient[X_labels.index("SEX")])), "-DCCSII="+str(int(patient[X_labels.index("CCS>II")])), "-DHF="+str(int(patient[X_labels.index("hf")])), "-DDEPST="+str(int(patient[X_labels.index("DEP ST")])), "-DRISK="+str(output).lower(),"-a","pursuit_undeterminism.pml"])
elif model== timi:
call(["spin", "-DAGE=" + str(int(patient[X_labels.index("Age")])), "-DRF="+str(int(patient[X_labels.index("RF")])), "-DAAS="+str(int(patient[X_labels.index("AAS")])), "-DKNCAD="+str(int(patient[X_labels.index("Kn. CAD")])), "-DDEPST="+str(int(patient[X_labels.index("DEP ST")])),"-DTN="+str(int(patient[X_labels.index("TN")])),"-DANGINA="+str(int(patient[X_labels.index("Angina")])), "-DRISK="+str(output).lower(),"-a","timi_undeterminism.pml"])
#compile
call(["gcc","pan.c", "-o", "pan"])
#call the executer
call(["./pan"], stdout=open("data_out.txt",'w'))
#read the output
file=open("data_out.txt",'r')
filetext=file.read()
file.close()
#search for keyword "errors:"
errors = int(findall("errors:.*\\n",filetext)[0].split()[1])
if errors:
return False
return True
if __name__=='__main__':
#define the conditions
event_threshold=40 #days
#load data
#file only has relevant data -> feature columns and Event days
raw_data = pd.read_csv('data_raw_santa_cruz.csv', sep=';')
#handle missing data -> substitute NaN for the median of the feature
data = raw_data.fillna(raw_data.median().to_dict())
#change event days to event class -> applying a mask to convert into a Binary classification problem
data['Event'] = (data['Event'] < event_threshold).astype(int)
#X and y
X_data= data.drop(columns='Event').values.tolist()
y_data= data['Event'].tolist() #also works .to_numpy()
print(sum(y_data))
X_labels=['SEX','Age','Enrl','RF','CCS>II','DEP ST','SBP','HR','KILLIP','hf','TN','Creat','CAA','AAS','Angina','Kn. CAD']
model=timi
i=1
print("===START===")
states_all=[]
memory_all=[]
time_all=[]
t=[]
y_c=[]
y_c_p=[]
y_nc=[]
y_nc_p=[]
confident=0
confident_right=0
not_confident=0
not_confident_right=0
#run for each patient
for patient, y in zip(X_data,y_data):
print('\r|Patient: %d/%d|' % (i, len(y_data)),end='')
i+=1
#apply rs
risk = model(patient,X_labels)
t.append(int(risk))
#call
assess=check_undetermininsm(patient, X_labels, model, risk)
if assess:
#update confident metrics
confident+=1
y_c.append(y)
y_c_p.append(int(risk))
if y == risk:
confident_right+=1
else:
#update negative metrics
not_confident+=1
y_nc.append(y)
y_nc_p.append(int(risk))
if y == risk:
not_confident_right+=1
print('\n',X_labels,'\n',patient,y, risk)
break
#read the output for stats
file=open("data_out.txt",'r')
filetext=file.read()
file.close()
states= int(findall("[0-9]+ transitions",filetext)[0].split()[0])
memory= float(findall("[0-9]+\.?[0-9]+\ttotal actual memory usage",filetext)[0].split()[0])
time_elapsed=float(findall("pan: elapsed time.*\\n",filetext)[0].split()[3])
states_all.append(states)
memory_all.append(memory)
time_all.append(time_elapsed)
print("\n===RESULTS===\Deterministic")
print(confident_right,"right out of ", end='')
print(confident)
#print_results(y_c,y_c_p)
print("Not Deterministic")
print(not_confident_right,"right out of ", end='')
print(not_confident)
#print_results(y_nc,y_nc_p)
print()
print("AVG states:",sum(states_all)/len(states_all))
print("MAX states:",max(states_all))
print("AVG memory:",sum(memory_all)/len(memory_all))
print("MAX memory:",max(memory_all))
print("AVG time:",round(sum(time_all)/len(time_all),6), end=' ')
print("|| MAX time:",round(max(time_all),6))
print("===END RESULTS===\n")
```
#### File: source/ensemble/pipeline_hybrid.py
```python
import pandas as pd
from pickle import dump,load
import random
import numpy as np
from results import print_results
from grace import grace
from pursuit import pursuit
from timi import timi
from confidence import within_confidence_region_spin, within_confidence_region_python, explain_negative_spin
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import f1_score,recall_score,matthews_corrcoef
from sklearn.tree import DecisionTreeClassifier
#from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
#import eli5
from imblearn.over_sampling import SMOTE, RandomOverSampler
class Timi:
def fit(self,x,y):
return
def predict(self,X):
return [timi(x,['SEX','Age','Enrl','RF','CCS>II','DEP ST','SBP','HR','KILLIP','hr','TN','Creat','CAA','AAS','Angina','Kn. CAD']) for x in X]
class Pursuit:
def fit(self,x,y):
return
def predict(self,X):
return [pursuit(x,['SEX','Age','Enrl','RF','CCS>II','DEP ST','SBP','HR','KILLIP','hr','TN','Creat','CAA','AAS','Angina','Kn. CAD']) for x in X]
if __name__=='__main__':
#sets_on= [6,6,8,8,9,9,3,3]
sets_on= [4,4,5,5,3,3,1]
#DEFINE EXPERIMENT PARAMETERS
runs=50 #to validate performance results
seeds= load(open('seeds','rb')) #to allow same results
#set random seed to the last (never used in the runs)
random.seed(seeds[-1])
np.random.seed(seeds[-1])
parameters_tunning=True
train_on_wrong=False
training_oversampler=SMOTE #can be SMOTE, RandomOverSampler, None
confidence_function= within_confidence_region_python #using python for faster execution, or spin for the proposed framework
event_threshold=40 #days
#DEFINE CLASSIFIERS PARAMETERS
#Decision Tree
dec_tre = DecisionTreeClassifier(criterion='gini', splitter='best', max_depth=10, min_samples_split=2, min_samples_leaf=2, min_weight_fraction_leaf=0.0, max_features='auto', random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight={0: 1, 1: 5}, presort='deprecated', ccp_alpha=0.0)
#logistic regressor
#log = SGDClassifier(loss='log', penalty='l2', alpha=0.0001, l1_ratio=0.1, fit_intercept=True, max_iter=1000, tol=0.001, shuffle=True, verbose=0, epsilon=0.1, n_jobs=-1, random_state=None, learning_rate='optimal', eta0=0.0, power_t=0.5, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, class_weight=None, warm_start=False, average=False)
log= LogisticRegression(penalty='l2', dual=False, tol=0.001, C=0.5, fit_intercept=True, intercept_scaling=1, class_weight=None, random_state=None, solver='liblinear', max_iter=1000, multi_class='auto', verbose=0, warm_start=False, n_jobs=None, l1_ratio=None)
#KNNeighbors
knn = KNeighborsClassifier(n_neighbors=5, weights='uniform', algorithm='ball_tree', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=-1)
#Naive baise
nb = GaussianNB(priors=None, var_smoothing=1e-09)
#SVM
svm=SVC(C=2.0, kernel='linear', degree=3, gamma='scale', coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape='ovr', break_ties=False, random_state=None)
#save references for the classifiers
models=[Pursuit(),Timi(),dec_tre,log,knn,nb,svm]
m_labels=['Pursuit','Timi','DecTree','LogReg','Knn','Nb','SVM']
#DEFINE METRICS STRUCTURES
metrics={'Grace':{'f1':[],'se':[],'sp':[],'mcc':[],'g_count':[],'m_count':[],'t_count':[]}}
for m in m_labels:
metrics[m]={'f1':[],'se':[],'sp':[],'mcc':[]}
metrics[m+'Hyb']={'f1':[],'se':[],'sp':[],'mcc':[]}
print("####LOADING AND PREPROCESSING DATA####")
#load data
#file only has relevant data -> feature columns and Event days
raw_data = pd.read_csv('data_raw.csv', sep=';')
#handle missing data -> substitute NaN for the median of the feature
data = raw_data.fillna(raw_data.median().to_dict())
#change event days to event class -> applying a mask to convert into a Binary classification problem
data['Event'] = (data['Event'] < event_threshold).astype(int)
#drop KILLIP or HF(signs) because they have .94 correlation
#data= data.drop(columns='HF (signs)')
#X and y
X_data= data.drop(columns='Event').values.tolist()
y_data= data['Event'].tolist() #also works .to_numpy()
X_labels=['SEX','Age','Enrl','RF','CCS>II','DEP ST','SBP','HR','KILLIP','hr','TN','Creat','CAA','AAS','Angina','<NAME>']
X_data, X_grid, y_data, y_grid = train_test_split( X_data, y_data, test_size=0.15, stratify=y_data)
#CLASSIFIERS PARAMETERS TUNNING
if parameters_tunning:
#DEC TREE
param_grid= {'criterion':('gini', 'entropy'), 'splitter':('best','random'), }
GS= GridSearchCV(dec_tre, param_grid, scoring ='recall', n_jobs=-1, iid='deprecated', refit=True, cv=5, verbose=0, pre_dispatch='2*n_jobs')
GS.fit(X_grid,y_grid)
print(GS.best_estimator_)
dec_tre= GS.best_estimator_
#LINREG
param_grid= {'solver':('liblinear','lbfgs','saga'),'C':(0.5,1,2)}
GS= GridSearchCV(log, param_grid, scoring ='recall', n_jobs=-1, iid='deprecated', refit=True, cv=5, verbose=0, pre_dispatch='2*n_jobs')
GS.fit(X_grid,y_grid)
print(GS.best_estimator_)
log= GS.best_estimator_
#KNN
param_grid= {'n_neighbors':(5,10,15),'weights':('uniform','distance'), 'algorithm':('ball_tree','kd_tree')}
GS= GridSearchCV(knn, param_grid, scoring ='recall', n_jobs=-1, iid='deprecated', refit=True, cv=5, verbose=0, pre_dispatch='2*n_jobs')
GS.fit(X_grid,y_grid)
print(GS.best_estimator_)
knn= GS.best_estimator_
#SVM
param_grid= {'kernel':('linear', 'poly', 'rbf', 'sigmoid'),'C':(0.5,1.0,2)}
GS= GridSearchCV(svm, param_grid, scoring ='recall', n_jobs=-1, iid='deprecated', refit=True, cv=5, verbose=0, pre_dispatch='2*n_jobs')
GS.fit(X_grid,y_grid)
print(GS.best_estimator_)
svm= GS.best_estimator_
print("####STARTING TRAINING AND TESTING####")
#repeat n times:
for run in range(runs):
#setseed
random.seed(seeds[run])
np.random.seed(seeds[run])
#split train test -> careful for having a stratified split
_X_train, X_test, _y_train, y_test = train_test_split( X_data, y_data, test_size=0.39, stratify=y_data)
#add the grid search data to the training data
_X_train= _X_train + X_grid
_y_train= _y_train + y_grid
if train_on_wrong:
y_train_pred_grace = [grace(p,X_labels) for p in _X_train]
_X_train=[x for x,y,yp in zip(_X_train,_y_train,y_train_pred_grace) if (y!=yp or y==1)]
_y_train=[y for y,yp in zip(_y_train,y_train_pred_grace) if (y!=yp or y==1)]
#over/hybrid training sampling??
if training_oversampler:
X_train, y_train = training_oversampler().fit_resample(_X_train, _y_train)
else:
X_train= _X_train
y_train= _y_train
#test grace
y_test_pred_grace = [grace(p,X_labels) for p in X_test]
#confidence assess grace
assess = [confidence_function(p, X_labels, y_test_pred_grace[i], model=grace, range_params= sets_on) for i,p in enumerate(X_test)]
#metrics
metrics['Grace']['t_count'].append(len(assess))
metrics['Grace']['g_count'].append(sum(assess))
metrics['Grace']['m_count'].append(len(assess) - sum(assess))
#for each model
for i,model in enumerate(models):
#train
model.fit(X_train,y_train)
y_train_pred_model = model.predict(X_train) #for future stats
#test
y_test_pred_model = model.predict(X_test)
y_test_pred_hybrid = []
#Testing hybrid
for j,(patient, y) in enumerate(zip(X_test,y_test)):
print('\rStatus: Run:%d |Model:%s\t|Patient: %d/%d' % (run+1,m_labels[i],j,len(y_test)),end='')
if assess[j]:
y_test_pred_hybrid.append(y_test_pred_grace[j]) #risk_grace
else:
y_test_pred_hybrid.append(y_test_pred_model[j]) #risk_model
#calculate and store metrics
metrics[m_labels[i]]['f1'].append(f1_score(y_test,y_test_pred_model))
metrics[m_labels[i]]['se'].append(recall_score(y_test,y_test_pred_model))
metrics[m_labels[i]]['sp'].append(recall_score(y_test,y_test_pred_model,pos_label=0))
#metrics[m_labels[i]]['mcc'].append(matthews_corrcoef(y_test, y_test_pred_model))
metrics[m_labels[i]+"Hyb"]['f1'].append(f1_score(y_test,y_test_pred_hybrid))
metrics[m_labels[i]+"Hyb"]['se'].append(recall_score(y_test,y_test_pred_hybrid))
metrics[m_labels[i]+"Hyb"]['sp'].append(recall_score(y_test,y_test_pred_hybrid,pos_label=0))
#metrics[m_labels[i]+"Hyb"]['mcc'].append(matthews_corrcoef(y_test, y_test_pred_model))
#calculate and store grace metrics
metrics['Grace']['f1'].append(f1_score(y_test,y_test_pred_grace))
metrics['Grace']['se'].append(recall_score(y_test,y_test_pred_grace))
metrics['Grace']['sp'].append(recall_score(y_test,y_test_pred_grace,pos_label=0))
#metrics['Grace']['mcc'].append(matthews_corrcoef(y_test,y_test_pred_grace))
print_results(metrics,sets_on)
dump(metrics,open('results'+str(sets_on)+'.pickle','wb'))
```
#### File: source/online-verification/confidence.py
```python
from subprocess import call
from re import findall
from copy import deepcopy
def within_confidence_region(patient, X_labels, model, output):
#patient: list with data
#X_labels: ['SEX','Age','Enrl','RF','CCS>II','DEP ST','SBP','HR','KILLIP','TN','Creat','CAA','AAS','Angina','Kn. CAD']
#model: model name (function)
#output: risk evaluation
#add the patient input data to the model
call(["spin", "-DAGE=" + str(int(patient[X_labels.index("Age")])), "-DHR="+str(int(patient[X_labels.index("HR")])), "-DSBP="+str(int(patient[X_labels.index("SBP")])), "-DCREAT="+str(int(patient[X_labels.index("Creat")]*10)), "-DKILLIP="+str(int(patient[X_labels.index("KILLIP")])), "-DCAA="+str(int(patient[X_labels.index("CAA")])), "-DDEPST="+str(int(patient[X_labels.index("DEP ST")])), "-DTN="+str(int(patient[X_labels.index("TN")])), "-DRISK="+str(output).lower(),"-a","grace_online.pml"])
#compile
call(["gcc","pan.c", "-o", "pan",'-Wno-overlength-strings','-Wno-format-overflow','-w'])
#call the executer
call(["./pan"], stdout=open("data_out.txt",'w'))
#read the output
file=open("data_out.txt",'r')
filetext=file.read()
file.close()
#search for keyword "errors:"
errors = int(findall("errors:.*\\n",filetext)[0].split()[1])
if errors:
return False
return True
def explain_negative(patient, X_labels, model, output):
#spin -t model.pml
call(["spin","-t","-DAGE=" + str(int(patient[X_labels.index("Age")])), "-DHR="+str(int(patient[X_labels.index("HR")])), "-DSBP="+str(int(patient[X_labels.index("SBP")])), "-DCREAT="+str(int(patient[X_labels.index("Creat")]*10)), "-DKILLIP="+str(int(patient[X_labels.index("KILLIP")])), "-DCAA="+str(int(patient[X_labels.index("CAA")])), "-DDEPST="+str(int(patient[X_labels.index("DEP ST")])), "-DTN="+str(int(patient[X_labels.index("TN")])), "-DRISK="+str(output).lower(),"-t","grace_online.pml"], stdout=open("explanation.txt",'w'))
``` |
{
"source": "joaopedro02/assist_programing_education",
"score": 3
} |
#### File: paginas_apps/pagina_do_usuario/BarChart_estAprendi.py
```python
import pygal
from django.contrib.auth.models import User
from autentication.models import perfil
class BarChart_estAprendizagem():
def __init__(self, **kwargs):
self.chart = pygal.Bar(**kwargs)
def set_title(self):
self.chart.title = 'Resultado estilos de aprendizagem'
def get_data(self, request):
'''
Query the db for chart data, pack them into a dict and return it.
'''
data = {"Ativo":request.user.perfil.ea_ativo,"Reflexivo":request.user.perfil.ea_reflexivo,"Pragmático":request.user.perfil.ea_pragmatico,"Teórico":request.user.perfil.ea_teorico}
return data
def generate(self,request):
# Get chart data
chart_data = self.get_data(request)
# Add data to chart
for key, value in chart_data.items():
self.chart.add(key, value)
self.chart.value_formatter = lambda x: "%.1f%%" % x
# Return the rendered SVG
return self.chart.render(is_unicode=True)
```
#### File: paginas_apps/paginas_turmas/forms.py
```python
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from .models import Turmas
def validate_exists(value):
try:
t = Turmas.objects.get(pk=value)
except Turmas.DoesNotExist:
raise ValidationError(
" Essa turma não existe"
)
class Add_turma(forms.Form):
cod_turma=forms.IntegerField(validators=[validate_exists])
# class Meta:
# model=User
# fields = ['first_name','last_name','email','username','password']
# widgets = {'password': forms.PasswordInput}
# # nome=forms.CharField(label="Nome",max_length=100)
# sobrenome=forms.CharField(label="Sobrenome",max_length=100)
# email=forms.EmailField(max_length=200)
# username=forms.CharField(label="Usuário",max_length=100)
# senha=forms.CharField(label="Senha",max_length=80)
# sou_professor=forms.BooleanField(required=False)
class Cria_turma(forms.ModelForm):
# star_color = forms.CharField(widget=ColorPickerWidget)
class Meta:
model=Turmas
fields = ['nome','descricao']
```
#### File: educaton/questions/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from .models import Question,Answers
from django.template import loader
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.db.models import Sum
from autentication.models import perfil
@login_required
def form1(request):
# estilos de aprendizagem
# form = MyForm(request.POST)
# if form.is_valid():
answer=Answers.objects.filter(user__username=request.user.get_username())
questions=Question.objects.exclude(form=2).exclude(answers__id__in=answer)[:5]
countanswer=answer.filter(question__form=1).count()
if countanswer >=80 :
return HttpResponseRedirect(reverse('questions:respondido'))
print(countanswer)
print(questions)
# for q in questions:
# print(q.id)
for q in questions:
print(q.form)
porcentagem=(countanswer/80.0)*100.0
porcentagem_completada="width: " + str(porcentagem)+ "%;"
print(porcentagem_completada)
valor_completada=porcentagem
context = {
'questions':questions,
'porcentagem_completada':porcentagem_completada,
'valor_completada':valor_completada,
}
return render(request,'questions/perguntaForm1.html',context)
@login_required
def form2(request):
# inteligencias multiplas
answer=Answers.objects.filter(user__username=request.user.get_username())
questions=Question.objects.exclude(form=1).exclude(answers__id__in=answer)[:5]
countanswer=answer.filter(question__form=2).count()
if countanswer>=80:
return HttpResponseRedirect(reverse('questions:respondido'))
porcentagem=(countanswer/80.0)*100.0
porcentagem_completada="width: " + str(porcentagem)+ "%;"
print(porcentagem_completada)
valor_completada=porcentagem
context = {
'questions':questions,
'porcentagem_completada':porcentagem_completada,
'valor_completada':valor_completada,
}
return render(request,'questions/perguntaForm2.html',context)
@login_required
def resposta(request):
formulario=request.POST.get('questionform')
print("formulario")
print(formulario)
print("-")
answer=Answers.objects.filter(user__username=request.user.get_username())
countanswer=answer.filter(question__form=int(float(formulario))).count()
if(countanswer>=80):
return HttpResponseRedirect(reverse('pagina_do_usuario:pagina_inicial'))
qid=[]
resp=[]
if request.method == "POST":
qid.append(request.POST.get('questionid1'))
qid.append(request.POST.get('questionid2'))
qid.append(request.POST.get('questionid3'))
qid.append(request.POST.get('questionid4'))
qid.append(request.POST.get('questionid5'))
print("id values")
for i in qid:
print(i)
resp.append(request.POST.get('slider1'))
resp.append(request.POST.get('slider2'))
resp.append(request.POST.get('slider3'))
resp.append(request.POST.get('slider4'))
resp.append(request.POST.get('slider5'))
print("resp values")
for r in resp:
print(r)
for index,i in enumerate(qid):
answer= Answers.objects.filter(question=int(float(i))).order_by('answers_value')
answer=answer[int(float(resp[index]))]
answer.user.add(User.objects.get(username=request.user.get_username()))
answer=Answers.objects.filter(user__username=request.user.get_username())
countanswer=answer.filter(question__form=int(float(formulario))).count()
if(countanswer>=80):# se o usuario tiver respondido todas as questões referentes ao questionario em questao
if int(float(formulario)) == 1:
calculo_estilos_de_aprendizagem(request, answer.filter(question__form=int(float(formulario))) )
pass
if int(float(formulario)) == 2:
calculo_inteligencias_multiplas(request, answer.filter(question__form=int(float(formulario))) )
pass
return HttpResponseRedirect(reverse('questions:respondido'))
else:
if(int(float(formulario))==1):
return redirect(reverse('questions:form1'))
else:
return redirect(reverse('questions:form2'))
return HttpResponseRedirect(reverse('questions:respondido'))
@login_required
def respondido(request):
context={
}
return render(request,'questions/respondido.html',context)
@login_required
def calculo_inteligencias_multiplas(request,answers):
a=answers.filter(question__number__gte=1,question__number__lte=10)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
vl=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=11,question__number__lte=20)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
lm=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=21,question__number__lte=30)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
ve=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=31,question__number__lte=40)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
i=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=41,question__number__lte=50)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
cc=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=51,question__number__lte=60)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
rm=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=61,question__number__lte=70)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
intra=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=71,question__number__lte=80)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
n=(float(a)/30.0)*100.0
print(a)
request.user.perfil.int_verbal_linguistica=vl
request.user.perfil.int_musical=rm
request.user.perfil.int_logico_matematica=lm
request.user.perfil.int_cinestesico_corporal=cc
request.user.perfil.int_espacial_visual=ve
request.user.perfil.int_intrapessoal=intra
request.user.perfil.int_naturalista=n
request.user.perfil.int_interpessoal=i
request.user.perfil.f_int=True
request.user.perfil.save()
@login_required
def calculo_estilos_de_aprendizagem(request,answers):
#da pra fazer melhor
#precisa ordena as perguntas na entrada do banco pra gente so ter que fazer querys em intervalos
# e nao precisar fazer 80 querys
ativo=[3,5,7,9,13,20,26,27,35,37,41,43,46,48,51,61,67,74,75,77]
reflexivo=[10,16,18,19,28,31,32,34,36,39,42,44,49,55,58,63,65,69,70,79]
teorico=[2,4,6,11,15,17,21,23,25,29,33,45,50,54,60,64,66,71,78,80]
pragmatico=[1,8,12,14,22,24,30,38,40,47,52,53,56,57,59,62,68,72,73,76]
cont=0.0
for i in ativo:
a=answers.get(question__number=i)
cont=cont+a.answers_value
at=(cont/60)*100
cont=0.0
for i in reflexivo:
a=answers.get(question__number=i)
cont=cont + a.answers_value
re=(cont/60)*100
cont=0.0
for i in teorico:
a=answers.get(question__number=i)
cont=cont+a.answers_value
te=(cont/60)*100
cont=0.0
for i in pragmatico:
a=answers.get(question__number=i)
cont=cont+a.answers_value
pr=(cont/60)*100
request.user.perfil.ea_ativo=at
request.user.perfil.ea_reflexivo=re
request.user.perfil.ea_pragmatico=pr
request.user.perfil.ea_teorico=te
request.user.perfil.f_est=True
request.user.perfil.save()
pass
``` |
{
"source": "joaopedrobritot/ImageManager",
"score": 2
} |
#### File: ImageManager/gallery/views.py
```python
from django.shortcuts import render
from .models import Image
from .forms import ImageForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect
import os
# Create your views here.
@login_required
def galleryView(request):
data = {}
data['imgs'] = Image.objects.all().filter(user=request.user)
return render(request, 'gallery/gallery.html', data)
@login_required
def removeAllImages(request):
Images = Image.objects.all().filter(user=request.user)
for img in Images:
os.remove(img.image.path)
Images.delete()
return redirect('/gallery')
@login_required
def addImage(request):
data = {}
if request.method == 'POST':
data['form'] = ImageForm(request.POST, request.FILES)
if data['form'].is_valid():
image = Image()
image.title = data['form'].cleaned_data['title']
image.image = data['form'].cleaned_data['image']
image.user = request.user
image.save()
# messages.success(request, "'" + task.__str__() + "'" + " Task added!")
return redirect('/gallery')
else:
data['form'] = ImageForm()
return render(request, 'gallery/addImage.html', data)
@login_required
def removeImage(request, id):
img = get_object_or_404(Image, pk=id)
os.remove(img.image.path)
img.delete()
return redirect('/gallery')
@login_required
def viewImage(request, id):
data = {}
data['img'] = get_object_or_404(Image, pk=id)
return render(request, 'gallery/viewImage.html', data)
```
#### File: ImageManager/resin/ResinCalculator.py
```python
import datetime
import pytz
def CalculaResina(hora, resin_atual, resin):
dif = abs(resin - resin_atual)
result = dif * 8
hora += datetime.timedelta(minutes=result)
return "{:02d}:{:02d}".format(hora.hour, hora.minute)
def verifyValue(value):
value = int(value)
if value > 160:
value = 160
if(value < 0):
value = 0
return value
def resinCalculator(resin_atual, resin):
resin_atual = verifyValue(resin_atual)
resin = verifyValue(resin)
if resin_atual > resin:
return '99:99'
timezone = pytz.timezone('America/Recife')
hora_atual = datetime.datetime.now(timezone)
return CalculaResina(hora_atual, resin_atual, resin)
``` |
{
"source": "joaopedrolourencoaffonso/Chimera-chat",
"score": 3
} |
#### File: Chimera-chat/scripts_para_testes/send_Telegram.py
```python
from variables import api_id, api_hash
from telethon import TelegramClient, events, utils
import asyncio
client = TelegramClient('aplicacao', api_id, api_hash)
async def main():
i = 0;
while i < 100:
await client.send_message("me", f'Mensagem {i}')
i += 1;
with client:
client.loop.run_until_complete(main())
``` |
{
"source": "joaopedromattos/DMC2020",
"score": 3
} |
#### File: dora/pre-processing-features/utils.py
```python
import zipfile
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
DATA_FILE = "1.0v.zip"
def read_data(data_dir="../main/datasets/", data_file=DATA_FILE):
"""Returns the data, in order infos, items, orders"""
with zipfile.ZipFile(data_dir+DATA_FILE) as z:
dfs = []
for name in ["infos", "items", "orders"]:
dfs.append(pd.read_csv(z.open(f"1.0v/{name}.csv"), sep="|"))
return dfs
def process_time(df, should_print=False,
test_start=pd.to_datetime("30 June 2018 00:00:00")):
"""Adds a group_backwards and days_backwards column to the data
If ```Use the period starting on 30 June 2018 00:00:00, the day after the last date from the transaction files.```
that means the 29th is included, but the 30th not (it's the first day in our test data;
Also, the first 14 days backwards should be [16-29] June (The 15th should not be included!)
So we index "group_backwards" which is how many weeks BACKWARDS from test time we have (ie, 0 weeks backwards means we are at TEST TIME). Therefore, 0 doesn't exist for now :)
"""
df["time"] = pd.to_datetime(df["time"])
# Make sure we only have data for 2018
assert (df["time"].dt.year != 2018).sum() == 0
if should_print:
print("The first timestamp is", df["time"].min(),
"and the last is", df["time"].max())
df["days"] = df["time"].dt.dayofyear
# Make sure we have data for every single day
df["days"].unique() == np.arange(1, 181)
df["days_backwards"] = test_start.dayofyear - df["days"]
df["group_backwards"] = np.ceil(df["days_backwards"] / 14).astype(int)
# Make sure we didn't make any mistake - 16th/06 should 1
assert not (df.set_index("time").loc["16 June 2018 00:00:00":"16 June 2018 23:59:59",
"group_backwards"] != 1).sum()
# 15th/06 should be 2
assert not (df.set_index("time").loc["15 June 2018 00:00:00":"15 June 2018 23:59:59",
"group_backwards"] != 2).sum()
def merge_data(orders, items, infos, col="itemID"):
df = pd.merge(orders, items, on=col, validate="m:1")
df = pd.merge(df, infos, on=col, validate="m:1")
return df
def cost_func(target, prediction, simulatedPrice):
temp = (prediction - np.maximum(prediction - target, 0) * 1.6)
return np.sum(temp*simulatedPrice)
def promo_detector(orders, aggregation=True, mode=True):
"""
This function adds a "promotion" column at "orders.csv".
It verifies if an item of an order is being sold cheaper than it's prices "mode"/"mean".
Case affirmative, a '1' will be added in 'promotion' column in the line of the order.
Parameters: orders -> Orders DataFrame
aggregation -> Flag that mantains or not the "salesPriceMode" in our returned DataFrame
True => Return will have the column
mode -> Decision method flag (Default 'True'). If "True", the function will
use the 'mode' of the prices to decide if an item is being sold below it's normal price.
If 'False', we'll use the "mean" of the prices.
Returns: our orders Dataframe with 2 new columns ("salesPriceMode" and "promotion")
"""
def agregationMode(x): return x.value_counts().index[0] if mode else 'mean'
# Getting an itemID / salesPriceMode Dataframe
# salesPriceMode column will store the
# 'mean'/'mode' of our items
pricesAggregated = orders.groupby('itemID').agg(
salesPriceMode=('salesPrice', agregationMode))
pricesAggregated['promotion'] = 0
ordersCopy = orders.copy()
orders_with_promotion = pd.merge(
ordersCopy, pricesAggregated, how='inner', left_on='itemID', right_on='itemID')
# For every item whose salesPrice is lower than the 'mean'/'mode',
# we'll attribute 1 to it's position in 'promotion' column
orders_with_promotion.loc[orders_with_promotion['salesPrice'] <
orders_with_promotion['salesPriceMode'], 'promotion'] = 1
if (not(aggregation)):
orders_with_promotion.drop(
'salesPriceMode', axis=1, inplace=True)
return orders_with_promotion
def promo_detector_fixed(orders, aggregation=True, mode=True):
"""
This function adds a "promotion" column at "orders.csv".
It verifies if an item of an order is being sold cheaper than it's prices "mode"/"mean".
Case affirmative, a '1' will be added in 'promotion' column in the line of the order.
Parameters: orders -> Orders DataFrame
aggregation -> Flag that mantains or not the "salesPriceMode" in our returned DataFrame
True => Return will have the column
mode -> Decision method flag (Default 'True'). If "True", the function will
use the 'mode' of the prices to decide if an item is being sold below it's normal price.
If 'False', we'll use the "mean" of the prices.
Returns: our orders Dataframe with 2 new columns ("salesPriceMode" and "promotion")
"""
new_df = pd.DataFrame()
def agregationMode(x): return x.value_counts().index[0] if mode else 'mean'
for i in range(13, -1, -1):
# Getting an itemID / salesPriceMode Dataframe
# salesPriceMode column will store the
# 'mean'/'mode' of our items
current_agg = orders.loc[orders.group_backwards > i].groupby(
['itemID']).agg(salesPriceMode=('salesPrice', agregationMode))
current_agg['promotion'] = 0
orders_copy = orders.loc[orders.group_backwards == i + 1].copy()
current_orders_with_promotion = pd.merge(
orders_copy, current_agg, how='inner', left_on='itemID', right_on='itemID')
# For every item whose salesPrice is lower than the 'mean'/'mode',
# we'll attribute 1 to it's position in 'promotion' column
current_orders_with_promotion.loc[current_orders_with_promotion['salesPrice'] <
current_orders_with_promotion['salesPriceMode'], 'promotion'] = 1
new_df = pd.concat([new_df, current_orders_with_promotion])
week_13 = orders.loc[orders.group_backwards == 13].copy()
week_13['salesPriceMode'] = 0
week_13['promotion'] = 0
new_df = pd.concat([new_df, week_13])
if (not(aggregation)):
new_df.drop(
'salesPriceMode', axis=1, inplace=True)
new_df.sort_values(by=['group_backwards', 'itemID'], inplace=True)
return new_df
def promotionAggregation(orders, items, promotionMode='mean', timeScale='group_backwards', salesPriceMode='mean'):
"""The 'promotion' feature is, originally, given by sale. This function aggregates it into the selected
time scale.
Parameters
-------------
orders : A pandas DataFrame with all the sales.
items: A pandas DataFrame with the infos about all items
promotionMode : A pandas aggregation compatible data type;
The aggregation mode of the 'promotion' feature
timeScale : A String with the name of the column containing the time signature.
E.g.: 'group_backwards'
salesPriceMode : A pandas aggregation compatible data type;
The aggregation mode of the 'salesPrice' feature
"""
df = orders.groupby([timeScale, 'itemID'], as_index=False).agg(
{'order': 'sum', 'promotion': promotionMode, 'salesPrice': salesPriceMode})
items_copy = items.copy()
df.rename(columns={'order': 'orderSum', 'promotion': f'promotion_{promotionMode}',
'salesPrice': f'salesPrice_{salesPriceMode}'}, inplace=True)
return pd.merge(df, items_copy, how='left', left_on=['itemID'], right_on=['itemID'])
def dataset_builder(orders, items):
"""This function receives the 'orders' DataFrame created by Bruno's 'process_time' function.
This function aims to quickly build our dataset with few lines and simple code, based on Pandas MultiIndex Class.
Parameters
-------------
orders : A pandas DataFrame with all the sales in the format that Bruno's
'process_time' function outputs.
items : A pandas DataFrame read from 'items.csv'
Return
-------------
A new pandas DataFrame grouped by 'group_backwards', with the orders summed up and merged with the 'items' DataFrame.
"""
# Aggregating our data by pairs...
df = orders.groupby(['group_backwards', 'itemID'], as_index=False).agg(
{'order': 'sum'}).rename(columns={'order': 'orderSum'})
# Building our dataset through multiindexing...
multiIndex = pd.MultiIndex.from_product(
[range(13, 0, -1), items['itemID']], names=['group_backwards', 'itemID'])
aux = pd.DataFrame(index=multiIndex)
df = pd.merge(aux, df, left_on=['group_backwards', 'itemID'], right_on=[
'group_backwards', 'itemID'], how='left')
df.fillna(0, inplace=True)
# Gettin' informations about our items in our dataset...
df = pd.merge(df, items, left_on=['itemID'], right_on=[
'itemID']).sort_values(['group_backwards', 'itemID'], ascending=[False, True])
assert (np.sum(df.group_backwards.unique() == [
range(13, 0, -1)]) == 13), ("Something is wrong with the number of weeks")
assert (len(df) == len(items) *
13), ("There are items missing from your dataset!")
df.reset_index(drop=True, inplace=True)
return df
def cumulative_sale_by_category(df, category='category3'):
"""
This function add the "percentage_acum" features in our dataset,
which try to describe how important a certain item is inside
Its group on each category (being either 1, 2 or 3).
Parameters: orders -> Orders DataFrame after "process_time" and "dataset_builder"
Returns: our orders Dataframe with a new column (percentage_acum_cat_3)
"""
acum = pd.DataFrame()
for i in range(12, 0, -1):
orders_per_item = df.loc[df.group_backwards > i].groupby(
['itemID', category], as_index=False).agg({'orderSum': 'sum'})
orders_per_cat = df.loc[df.group_backwards > i].groupby(
[category], as_index=False).agg({'orderSum': 'sum'})
# Mergin' the amount of sales by category
# with the accumulated sales
# of an item grouped by category
# of the previous weeks
cum_sum_mean = pd.merge(orders_per_item, orders_per_cat,
left_on=category, right_on=category, validate="m:1")
# Calculating the mean of the accumulated sales...
cum_sum_mean[f'percentage_accum_{category}'] = cum_sum_mean['orderSum_x'] / \
cum_sum_mean['orderSum_y'] * 100
# These columns won't be useful anymore,
# since they were used just to calculate our mean
cum_sum_mean.drop(columns=['orderSum_x', 'orderSum_y'], inplace=True)
feature_merge = pd.merge(df.loc[df.group_backwards == i], cum_sum_mean.drop(
columns=[category]), left_on='itemID', right_on='itemID')
acum = pd.concat([acum, feature_merge])
week_13 = df.loc[df.group_backwards == 13].copy()
week_13[f'percentage_accum_{category}'] = 0
acum = pd.concat([week_13, acum])
assert (acum.loc[acum.group_backwards == 13][f'percentage_accum_{category}'].sum(
) == 0), ("The values on week 13 should all be zero. Verify your inputs")
acum.reset_index(drop=True, inplace=True)
return acum
def time_encoder(data, col, max_val):
"""This function aims to encode a time series in function sines and cosines.
Parameters
-------------
data : A pandas DataFrame with all the dataset
col : A string corresponding to the name of the column that will be encoded
max_val : Size of the time-window of encoding
Return
-------------
A new pandas DataFrame with two new columns, one encoded as sin and other as cosine.
"""
data[col + '_sin'] = np.sin(2 * np.pi * data[col]/max_val)
data[col + '_cos'] = np.cos(2 * np.pi * data[col]/max_val)
return data
``` |
{
"source": "joaopedromattos/pyGAT",
"score": 3
} |
#### File: joaopedromattos/pyGAT/utils.py
```python
import numpy as np
import scipy.sparse as sp
import torch
import spektral as spk
import pandas as pd
import networkx as nx
import os
def cora_networkx(path=None):
"""
Function that loads a graph using networkx.
Currently, It only works with cora graph dataset and the variable G,
created within the scope of this function IS NOT USED AT ANY MOMENT
(yet). I may use this variable in a later opportunity, so
I'll keep this implementation as it is now.
Parameters:
path -> A path to the cora dataset directory
Return:
adj -> Sparse and symmetric adjacency matrix of our graph.
features -> Sparse matrix with our graph features.
labels -> A NumPy array with the labels of all nodes.
idx_train -> A NumPy array with the indexes of the training nodes.
idx_val -> A NumPy array with the indexes of the validation nodes.
idx_test -> A NumPy array with the indexes of the test nodes.
"""
if (path == None):
raise ValueError("Dataset path shouldn't be of type 'None'.")
else:
# Reading our graph, according to documentation
edgelist = pd.read_csv(os.path.join(
path, "cora.cites"), sep='\t', header=None, names=["target", "source"])
# Reading and storing our feature dataframe
feature_names = ["w_{}".format(ii) for ii in range(1433)]
column_names = feature_names + ["subject"]
node_data = pd.read_csv(os.path.join(
path, "cora.content"), sep='\t', header=None, names=column_names)
# Converting our graph nodes into sequential values, in order to
# correctly index each of our feature vectors.
idx_map = {j: i for i, j in enumerate(node_data.index)}
edges = edgelist.to_numpy()
converted_edges = np.array(
list(map(idx_map.get, edges.flatten())), dtype=np.int32).reshape(edges.shape)
# In order to correctly instantiate our graph, we're going to
# convert our edges list into a sparse matrix, then transform it into
# a symmetric adj. matrix and, finally, instantiate our network in G.
adj = sp.coo_matrix((np.ones(converted_edges.shape[0]), (converted_edges[:, 0], converted_edges[:, 1])), shape=(
len(node_data), len(node_data)), dtype=np.float32)
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
G = nx.from_scipy_sparse_matrix(adj)
# Re-indexing our dataframe with our nodes in the correct order, using
# our idx_map dictionary, previously calculated...
node_data['new_index'] = node_data.index
node_data['new_index'] = node_data['new_index'].apply(
lambda x: idx_map[x])
# Encoding our labels with categorical values from 0 to 6
classes_dict = encode_onehot_dict(node_data['subject'].to_numpy())
node_data['subject'] = node_data['subject'].apply(
lambda x: classes_dict[x])
# Inserting all our calculated attributes inside our graph G
feature_dict = node_data.set_index('new_index').to_dict('index')
nx.set_node_attributes(G, feature_dict)
# Train, val, test spliting...
num_nodes = node_data.shape[0]
idxs = np.arange(0, num_nodes)
idx_train, idx_val, idx_test = np.split(
idxs, [int(.6*num_nodes), int(.8*num_nodes)])
# Adding our features to our nodes
feature_dict = node_data.set_index('new_index').to_dict('index')
nx.set_node_attributes(G, feature_dict)
# Creating our features sparse matrix and our labels numpy array
features_numpy = node_data.to_numpy()[:, :-1]
features = sp.csr_matrix(features_numpy, dtype=np.float32)
labels = features_numpy[:, -1]
return adj, features, labels, idx_train, idx_val, idx_test
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[
i, :] for i, c in enumerate(classes)}
labels_onehot = np.array(
list(map(classes_dict.get, labels)), dtype=np.int32)
return labels_onehot
def encode_onehot_dict(labels):
classes = set(labels)
classes_dict = {c: i for i, c in enumerate(classes)}
return classes_dict
def new_load_data(*args, path="./pyGAT/data/cora/", dataset='cora', custom_function=False, function=cora_networkx):
print(f"[LOAD DATA]: {dataset}")
if (not custom_function):
if (dataset == "cora" or dataset == 'citeseer' or dataset == 'pubmed'):
adj, features, labels, train, val, test = spk.datasets.citation.load_data(
dataset_name=dataset, normalize_features=True, random_split=True)
elif (dataset == 'ppi' or dataset == 'reddit'):
adj, features, labels, train, val, test = spk.datasets.graphsage.load_data(
dataset_name=dataset, max_degree=1, normalize_features=True)
else:
raise ValueError(
"Dataset not supported. List of supported datsets: ['cora', 'citeseer', 'pubmed', 'ppi', 'reddit']")
print(f"ADJ {type(adj)}, \nFEATURES {type(features)}, \nLABELS {type(labels)}, \nTRAIN {type(train)}, \nVAL {type(val)}, \nTEST {type(test)}")
# Converting one-hot encoding into categorical
# values with the indexes of each dataset partition
idx_train, idx_val, idx_test = np.where(train)[0], np.where(val)[
0], np.where(test)[0]
else:
if (function == cora_networkx or function == None):
adj, features, labels, idx_train, idx_val, idx_test = cora_networkx(
path)
else:
adj, features, labels, idx_train, idx_val, idx_test = function(
*args)
# Normalizing our features and adjacency matrices
# features = normalize_features(features)
adj = normalize_adj(adj + sp.eye(adj.shape[0]))
if (sp.issparse(adj)):
adj = adj.todense()
if (sp.issparse(features)):
features = features.todense()
# With networkx, we no longer need to convert from one-hot encoding...
if (not custom_function):
labels = np.where(labels)[1]
adj = torch.FloatTensor(adj)
features = torch.FloatTensor(features)
labels = torch.LongTensor(labels)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test
def original_load_data(path="./pyGAT/data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Test {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt(
"{}{}.content".format(path, dataset), dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt(
"{}{}.cites".format(path, dataset), dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(
labels.shape[0], labels.shape[0]), dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# Normalizing our features and adjacency matrices
features = normalize_features(features)
adj = normalize_adj(adj + sp.eye(adj.shape[0]))
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
adj = torch.FloatTensor(np.array(adj.todense()))
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test
def normalize_adj(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv_sqrt = np.power(rowsum, -0.5).flatten()
r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt)
def normalize_features(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
``` |
{
"source": "Joao-Pedro-MB/projeto-final-MC536",
"score": 3
} |
#### File: bd_acess_point/relational_querier/dataSUS.py
```python
import sys
import pandas as pd
import unicodedata
import numpy as np
from relational_querier import RelationalQuerier
# Loads dadtaSUS info into a single .csv
def loadcsv():
print('baixando os dados')
srag_2013 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/4919f202-083a-4fac-858d-99fdf1f1d765/download/influd13_limpo_final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2014 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/2182aff1-4e8b-4aee-84fc-8c9f66378a2b/download/influd14_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2015 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/97cabeb6-f09e-47a5-8358-4036fb10b535/download/influd15_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2016 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/dbb0fd9b-1345-47a5-86db-d3d2f4868a11/download/influd16_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2017 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/aab28b3c-f6b8-467f-af0b-44889a062ac6/download/influd17_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2018 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/a7b19adf-c6e6-4349-a309-7a1ec0f016a4/download/influd18_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_201314 = srag_2013.merge(srag_2014, how='outer')
srag_20131415 = srag_201314.merge(srag_2015, how='outer')
srag_2013141516 = srag_20131415.merge(srag_2016, how='outer')
srag_201314151617 = srag_2013141516.merge(srag_2017, how='outer')
srag_20131415161718 = srag_201314151617.merge(srag_2018, how='outer')
return srag_20131415161718
# Generates a .csv and saves it for quicker reruns
def gencsv():
print('gerando o csv')
srag_full = loadcsv()
srag_full.to_csv("srag_full.csv", index=True)
print("srag_full.csv has been successfully generated")
def add_data_relational(db, df = None, csv = None):
print('adicionando os dados ao banco SRAG')
if df is None and csv is not None:
df = pd.read_csv(csv)
if len(df.columns) >= 109:
df.drop([df.columns[0], df.columns[1]], axis=1, inplace=True)
i = 0
for data in df.values:
i += 1
print(i)
query = """
INSERT INTO SRAG
(DT_NOTIFIC, ID_MUNICIP ,SEM_NOT ,SG_UF_NOT ,DT_SIN_PRI ,DT_NASC ,NU_IDADE_N ,CS_SEXO ,CS_GESTANT ,
CS_RACA ,CS_ESCOL_N ,SG_UF ,ID_MN_RESI ,ID_OCUPA_N ,VACINA ,FEBRE ,TOSSE ,CALAFRIO ,DISPNEIA ,
GARGANTA ,ARTRALGIA ,MIALGIA ,CONJUNTIV ,CORIZA ,DIARREIA ,OUTRO_SIN ,OUTRO_DES ,CARDIOPATI ,
PNEUMOPATI ,RENAL ,HEMOGLOBI ,IMUNODEPRE ,TABAGISMO ,METABOLICA ,OUT_MORBI ,MORB_DESC ,HOSPITAL ,
DT_INTERNA ,CO_UF_INTE ,CO_MU_INTE ,DT_PCR ,PCR_AMOSTR ,PCR_OUT ,PCR_RES ,PCR_ETIOL ,PCR_TIPO_H ,
PCR_TIPO_N ,DT_CULTURA ,CULT_AMOST ,CULT_OUT ,CULT_RES ,DT_HEMAGLU ,HEMA_RES ,HEMA_ETIOL ,HEM_TIPO_H ,
HEM_TIPO_N ,DT_RAIOX ,RAIOX_RES ,RAIOX_OUT ,CLASSI_FIN ,CLASSI_OUT ,CRITERIO ,TPAUTOCTO ,DOENCA_TRA ,
EVOLUCAO ,DT_OBITO ,DT_ENCERRA ,DT_DIGITA ,SRAG2013FINAL ,OBES_IMC ,OUT_AMOST ,DS_OAGEETI ,DS_OUTMET ,
DS_OUTSUB ,OUT_ANTIV ,DT_COLETA ,DT_ENTUTI ,DT_ANTIVIR ,DT_IFI ,DT_OUTMET ,DT_PCR_1 ,DT_SAIDUTI ,
RES_ADNO ,AMOSTRA ,HEPATICA ,NEUROLOGIC ,OBESIDADE ,PUERPERA ,SIND_DOWN ,RES_FLUA ,RES_FLUB ,UTI ,
IFI ,PCR ,RES_OUTRO ,OUT_METODO ,RES_PARA1 ,RES_PARA2 ,RES_PARA3 ,DESC_RESP ,SATURACAO ,ST_TIPOFI ,
TIPO_PCR ,ANTIVIRAL ,SUPORT_VEN ,RES_VSR ,RES_FLUASU ,DT_UT_DOSE)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,
?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,
?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);"""
result = db.query(query, data)
db.commit()
def add_data_relational_cities(db, df = None, csv = None):
if csv:
df = pd.read_csv(csv)
if len(df.columns) > 12:
df.drop(df.columns[0], axis=1, inplace=True)
i = 0
for data in df.values:
i += 1
print(i)
query = """
INSERT INTO Cidades
(UF, Nome_UF, Mesorregiao_geografica, Nome_mesorregiao, Microrregiao_geografica,
Nome_microrregiao, Municipio, Cod_municipio, Nome_municipio, Pop_estimada, lat, lon)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?);"""
result = db.query(query, data)
db.commit()
db = RelationalQuerier()
def normalize_names(s):
s = ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
return s.replace("'", "").replace(" ", " ").lower()
def normalize_cities():
IBGE = pd.read_csv('IBGE_Municipios.csv')
geo = pd.read_csv('cidades_geo.csv')
pop_br = pd.read_csv('pop_brasil.csv')
pop_estados = pd.read_csv('pop_estados.csv')
pop_br['NOME DO MUNICÍPIO'] = pop_br['NOME DO MUNICÍPIO'].map(normalize_names)
geo['ID_MUNICIP'] = geo['ID_MUNICIP'].map(normalize_names)
IBGE['Nome_Município'] = IBGE['Nome_Município'].map(normalize_names)
pop_br.rename(columns = {'NOME DO MUNICÍPIO':'cidade'}, inplace = True)
geo.rename(columns = {'ID_MUNICIP':'cidade'}, inplace = True)
IBGE.rename(columns = {'Nome_Município':'cidade'}, inplace = True)
pop_br.drop(['UF','COD. UF','COD. MUNIC'], axis=1, inplace=True)
geo.drop(['index','UF'], axis=1, inplace=True)
df = pd.merge(IBGE, pop_br, on='cidade', how='left')
df = pd.merge(df, geo, on='cidade', how='left')
df = df.drop_duplicates(subset=['cidade'], keep='first')
df.to_csv('normalized_cities.csv')
return df
def main():
if("-gencsv" in sys.argv):
gencsv()
try:
srag_full = pd.read_csv("srag_full.csv")
except FileNotFoundError:
srag_full = loadcsv()
IBGE = pd.read_csv("IBGE_Municipios.csv")
# Uses a dict for optimized city code to city name conversion
municipdict = {}
pd.options.mode.chained_assignment = None
IBGE['Nome_Município'] = IBGE['Nome_Município'].map(normalize_names)
for i in range (len(IBGE['Código Município Completo'])):
IBGE['Código Município Completo'][i] = str(IBGE['Código Município Completo'][i])[0:6]
municipdict[IBGE['Código Município Completo'][i]] = IBGE['Nome_Município'][i]
count = 0
for i in range(len(srag_full['ID_MUNICIP'])):
try:
srag_full['ID_MUNICIP'][i] = municipdict[int(srag_full['ID_MUNICIP'][i])]
except KeyError: # If the city code cant be find deletes the line containing it
print("Erro: Chave " + srag_full['ID_MUNICIP'][i] + " na linha " + str(i) + " nao encontrada, linha sera removida dos dados")
srag_full.drop(i, inplace = True)
count = count + 1
print(str(count) + " linhas foram removidas da tabela pois continham cidades invalidas")
# Resets index column and removes redundant columns
srag_full.reset_index(inplace = True)
#srag_full.drop(srag_full.columns[[0, 1]], axis = 1, inplace = True)
srag_full.drop(['NU_ANO', 'SRAG2014FINAL', 'SRAG2015FINAL', 'SRAG2012FINAL', 'SRAG2017FINAL', 'SRAG2018FINAL'], axis = 1, inplace = True)
srag_full.to_csv("srag_full_cities.csv")
return srag_full, IBGE
if __name__ == '__main__':
try:
srag_full = pd.read_csv("srag_full_cities.csv")
except FileNotFoundError:
srag_full, IBGE = main()
print('adicionando dados')
add_data_relational(db, df = srag_full)
normalized = normalize_cities()
add_data_relational_cities(db, df = normalized)
```
#### File: breath_data/data_workflow/ibge.py
```python
import pandas as pd
URL = "https://raw.githubusercontent.com/BReATH-Brazilian-Research/breath_data/89047539c6b83ca9791a3cbb4e52106bc0eefa41/module/resources/IBGE_Municipios.csv"
def load_csv():
return pd.read_csv(URL)
```
#### File: previa/src/dataSUS.py
```python
import sys
import pandas as pd
from relational_querier import RelationalQuerier
# Loads dadtaSUS info into a single .csv
def loadcsv():
srag_2013 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/4919f202-083a-4fac-858d-99fdf1f1d765/download/influd13_limpo_final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2014 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/2182aff1-4e8b-4aee-84fc-8c9f66378a2b/download/influd14_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2015 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/97cabeb6-f09e-47a5-8358-4036fb10b535/download/influd15_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2016 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/dbb0fd9b-1345-47a5-86db-d3d2f4868a11/download/influd16_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2017 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/aab28b3c-f6b8-467f-af0b-44889a062ac6/download/influd17_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_2018 = pd.read_csv(
"https://opendatasus.saude.gov.br/dataset/e6b03178-551c-495c-9935-adaab4b2f966/resource/a7b19adf-c6e6-4349-a309-7a1ec0f016a4/download/influd18_limpo-final.csv",
sep=';', encoding='cp1252', dtype=str)
srag_201314 = srag_2013.merge(srag_2014, how='outer')
srag_20131415 = srag_201314.merge(srag_2015, how='outer')
srag_2013141516 = srag_20131415.merge(srag_2016, how='outer')
srag_201314151617 = srag_2013141516.merge(srag_2017, how='outer')
srag_20131415161718 = srag_201314151617.merge(srag_2018, how='outer')
return srag_20131415161718
# Generates a .csv and saves it for quicker reruns
def gencsv():
srag_full = loadcsv()
srag_full.to_csv("srag_full.csv", index=True)
print("srag_full.csv has been successfully generated")
def add_data_relational(db, df = None, csv = None):
if df is None and csv is not None:
df = pd.read_csv(csv)
i = 0
for data in df.values:
i+=1
query = """
INSERT INTO SRAG
(ID_MUNICIP ,SEM_NOT ,SG_UF_NOT ,DT_SIN_PRI ,DT_NASC ,NU_IDADE_N ,CS_SEXO ,CS_GESTANT ,
CS_RACA ,CS_ESCOL_N ,SG_UF ,ID_MN_RESI ,ID_OCUPA_N ,VACINA ,FEBRE ,TOSSE ,CALAFRIO ,DISPNEIA ,
GARGANTA ,ARTRALGIA ,MIALGIA ,CONJUNTIV ,CORIZA ,DIARREIA ,OUTRO_SIN ,OUTRO_DES ,CARDIOPATI ,
PNEUMOPATI ,RENAL ,HEMOGLOBI ,IMUNODEPRE ,TABAGISMO ,METABOLICA ,OUT_MORBI ,MORB_DESC ,HOSPITAL ,
DT_INTERNA ,CO_UF_INTE ,CO_MU_INTE ,DT_PCR ,PCR_AMOSTR ,PCR_OUT ,PCR_RES ,PCR_ETIOL ,PCR_TIPO_H ,
PCR_TIPO_N ,DT_CULTURA ,CULT_AMOST ,CULT_OUT ,CULT_RES ,DT_HEMAGLU ,HEMA_RES ,HEMA_ETIOL ,HEM_TIPO_H ,
HEM_TIPO_N ,DT_RAIOX ,RAIOX_RES ,RAIOX_OUT ,CLASSI_FIN ,CLASSI_OUT ,CRITERIO ,TPAUTOCTO ,DOENCA_TRA ,
EVOLUCAO ,DT_OBITO ,DT_ENCERRA ,DT_DIGITA ,SRAG2013FINAL ,OBES_IMC ,OUT_AMOST ,DS_OAGEETI ,DS_OUTMET ,
DS_OUTSUB ,OUT_ANTIV ,DT_COLETA ,DT_ENTUTI ,DT_ANTIVIR ,DT_IFI ,DT_OUTMET ,DT_PCR_1 ,DT_SAIDUTI ,
RES_ADNO ,AMOSTRA ,HEPATICA ,NEUROLOGIC ,OBESIDADE ,PUERPERA ,SIND_DOWN ,RES_FLUA ,RES_FLUB ,UTI ,
IFI ,PCR ,RES_OUTRO ,OUT_METODO ,RES_PARA1 ,RES_PARA2 ,RES_PARA3 ,DESC_RESP ,SATURACAO ,ST_TIPOFI ,
TIPO_PCR ,ANTIVIRAL ,SUPORT_VEN ,RES_VSR ,RES_FLUASU ,DT_UT_DOSE)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,
?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,
?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);"""
result = db.query(query, data)
print(i)
db.commit()
db = RelationalQuerier()
def main():
if("-gencsv" in sys.argv):
gencsv()
try:
srag_full = pd.read_csv("srag_full.csv")
except FileNotFoundError:
srag_full = loadcsv()
IBGE = pd.read_csv("IBGE_Municipios.csv")
# Uses a dict for optimized city code to city name conversion
municipdict = {}
pd.options.mode.chained_assignment = None
for i in range (len(IBGE['Código Município Completo'])):
IBGE['Código Município Completo'][i] = str(IBGE['Código Município Completo'][i])[0:6]
municipdict[IBGE['Código Município Completo'][i]] = IBGE['Nome_Município'][i]
count = 0
for i in range(len(srag_full['ID_MUNICIP'])):
try:
srag_full['ID_MUNICIP'][i] = municipdict[int(srag_full['ID_MUNICIP'][i])]
except KeyError: # If the city code cant be find deletes the line containing it
print("Erro: Chave " + srag_full['ID_MUNICIP'][i] + " na linha " + str(i) + " nao encontrada, linha sera removida dos dados")
srag_full.drop(i, inplace = True)
count = count + 1
print(str(count) + " linhas foram removidas da tabela pois continham cidades invalidas")
# Resets index column and removes redundant columns
srag_full.reset_index(inplace = True)
srag_full.drop(srag_full.columns[[0, 1]], axis = 1, inplace = True)
srag_full.drop(['NU_ANO', 'SRAG2014FINAL', 'SRAG2015FINAL', 'SRAG2012FINAL', 'SRAG2017FINAL', 'SRAG2018FINAL'], axis = 1, inplace = True)
srag_full.to_csv("srag_full_cities.csv")
return srag_full
if __name__ == '__main__':
try:
srag_full = pd.read_csv("srag_full_cities.csv")
except FileNotFoundError:
main()
print('adicionando dados')
add_data_relational(db, df = srag_full)
``` |
{
"source": "joaopedromoraez/study-on-packages-license-npm",
"score": 3
} |
#### File: study-on-packages-license-npm/src/duplicate-license-csv.py
```python
import subprocess
import os
import csv
import json
import pandas as pd
def getOutputShell(local):
path = local.replace('/[','').replace(']',' ').replace('/','').split(' ')
subprocess.getoutput("cloc repositories/\["+path[0]+"\]"+path[1]+" > .temp.txt")
resultado = subprocess.getoutput("cat .temp.txt | grep SUM | awk '{print$5}'")
return resultado
# Busca numa string a incidêcia de uma palavra
def wordSearch(string, stringParameter):
return string.lower().count(stringParameter) != 0
# Função que retorna se um repositorio tem mais de uma licença
def licencaDuplicada(file):
# Abre o arquivo .csv definidor
with open(file) as csvfile:
# Adiciona os valores a um objeto, onde cada item vai ser uma linha
temp = csv.reader(csvfile, delimiter=',')
# Cria lista que vai receber os dados do objeto
readCSV = []
# Transforma o objeto em uma lista
for line in temp:
readCSV.append(line)
# Cria uma lista para armazenar as licenças encontradas
licenseAll = []
# Cria uma lista para armazenar as licenças encontradas na raiz do projeto
licenseOnRoot = []
# Cria uma lista para armazenar as licenças encontradas fora da raiz do projeto
licenseOutRoot = []
# Cria uma lista para armazenar as licenças encontradas fora da raiz do projeto
licenseSPDX = []
# Cria uma variavel para armazenar o score da licenças encontradas
licenseScore = 100.00
licenseScoreRoot = 100.00
# Cria Variavel para verificar se o projeto tem licenças incompativeis
compatibleLicenses = True
compatibleLicensesRoot = True
# Cria variavel para salvar licenças encontradas em arquivos readme
licenseReadme = []
# Cria variavel para salvar licenças encontradas em arquivos package.json
licensePackage = []
# Cria variavel para salvar licenças encontradas em arquivos license
licenseOnLicense = []
# Cria variavel para salvar licenças encontradas em arquivos license
licenseOnOther = []
# Cria variavel para salvar licenças encontradas em arquivos copying
licenseOnCopying = []
# Variavel para para salvar se os projetos tem diferenças nas licenças listadas no readme, package.json e license
# description = True
numberLines = getOutputShell(readCSV[1][0])
if (readCSV != []) and (len(readCSV[0]) > 3):
# Vare a lista e buscar as licenças listadas na coluna 'license_expression'e que estejam na raiz do projeto
for row in readCSV:
# Busca licença em todo o projeto
if(row[4] != "") and (row[4] != "license__key"):
# Busca licença na raiz do projeto
# if(row[3] != "") and (row[3] != "license_expression") and (row[0].count('/') == 2):
# Se for encontrada um licença, ela é adicionada a lista
licenseAll.append(row[4])
if (row[0].count('/') == 2):
licenseOnRoot.append(row[4])
if (float(row[5]) < licenseScore):
licenseScoreRoot = float(row[5])
if (row[0].count('/') != 2):
licenseOutRoot.append(row[4])
if (float(row[5]) < licenseScore):
licenseScore = float(row[5])
# Testa se a licença é permissiva no projeto inteiro
if (row[8] != "Permissive"):
compatibleLicenses = False
# Testa se a licença é permissiva em licenças encontradas na raiz do projeto
if (row[8] != "Permissive") and (row[0].count('/') == 2):
compatibleLicensesRoot = False
# Salva as licenças encontradas em arquivos readme
if (wordSearch(row[0],'readme')) and (row[0].count('/') == 2):
licenseReadme.append(row[4])
# Salva as licenças encontradas em arquivos package.json
if (wordSearch(row[0],'package.json')) and (row[0].count('/') == 2):
licensePackage.append(row[4])
# Salva as licenças encontradas em arquivos de license
if (wordSearch(row[0],'licen')) and (row[0].count('/') == 2):
licenseOnLicense.append(row[4])
filesLicences = [
wordSearch(row[0],'readme'),
wordSearch(row[0],'package.json'),
wordSearch(row[0],'licen')
]
# Salva as licenças encontradas em arquivos de license
if (filesLicences.count(False) == 3) and (row[0].count('/') == 2):
licenseOnOther.append(row[4])
if(row[14] != "") and (row[14] != "license__spdx_license_key"):
# Se for encontrada um licença SPDX, ela é adicionada a lista
licenseSPDX.append(row[14])
# if (licenseReadme == []):
# description = (licensePackage.sort() == licenseOnLicense.sort())
# elif (licenseOnLicense == []):
# description = (licensePackage.sort() == licenseReadme.sort())
# elif (licensePackage == []):
# description = True
# else:
# description = (licensePackage.sort() == licenseReadme.sort() == licenseOnLicense.sort())
# if (licensePackage != []):
# description = ( licensePackage.sort() == (licenseOnLicense + licenseReadme).sort() )
# Testa se a licença ta duplicada no projeto
# Testa se a lista não é vazia
if licenseAll == []:
# Se for vazia adiciona verdadeiro para a varial de teste
duplicadoGeral = True
else:
# Cria variavel que denota se existe mais de uma licença na lista
duplicadoGeral = False
# Cria uma variavel que armazena o primeiro valor da lista de licenças
swap = licenseAll[0]
# Loop para checar se existe mais de uma licença na lista
for pizza in licenseAll:
if (pizza != swap):
duplicadoGeral = True
break
# Testa se a licença ta duplicada na raiz do projeto
# Testa se a lista não é vazia
if licenseOnRoot == []:
# Se for vazia adiciona verdadeiro para a varial de teste
duplicadoRoot = True
else:
# Cria variavel que denota se existe mais de uma licença na lista
duplicadoRoot = False
# Cria uma variavel que armazena o primeiro valor da lista de licenças
swap = licenseOnRoot[0]
# Loop para checar se existe mais de uma licença na lista
for pizza in licenseOnRoot:
if (pizza != swap):
duplicadoRoot = True
break
if (len(set(licenseAll)) == 1):
compatibleLicenses = True
if (len(set(licenseOnRoot)) == 1):
compatibleLicensesRoot = True
# Retorna o resultado da função
return {
"nome": readCSV[1][0],
"duplicado_geral":duplicadoGeral,
"duplicado_raiz":duplicadoRoot,
"quantidade_geral":len(set(licenseAll)),
"quantidade_raiz":len(set(licenseOnRoot)),
"licenca_SPDX":len(set(licenseSPDX)),
"license_score_geral":licenseScore,
"license_score_raiz":licenseScoreRoot,
"licencas_compativeis_geral":compatibleLicenses,
"licencas_compativeis_raiz":compatibleLicensesRoot,
"licencas_readme":len(set(licenseReadme)),
"licencas_packageJson":len(set(licensePackage)),
"licencas_license":len(set(licenseOnLicense)),
"licencas_outros_arquivos":len(set(licenseOnOther)),
"licencas_fora_da_raiz":len(set(licenseOutRoot)),
"numero_linhas":numberLines
}
# Com licença duplicada [node]
# print(licencaDuplicada('./summary-licenses-csv/license14.csv'))
with open('analysis_summary.csv', mode='w', encoding='utf-8', newline='') as csv_file:
fieldnames = [
'nome',
'duplicado_geral',
'duplicado_raiz',
'quantidade_geral',
'quantidade_raiz',
'licenca_SPDX',
'license_score_geral',
'license_score_raiz',
'licencas_compativeis_geral',
'licencas_compativeis_raiz',
'licencas_readme',
'licencas_packageJson',
'licencas_license',
'licencas_outros_arquivos',
'licencas_fora_da_raiz',
'numero_linhas'
]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
# writer.writerow(licencaDuplicada('./summary-licenses-csv/license14.csv'))
inicio = 0
fim = 1553
for sorvete in range(inicio, fim):
if (os.path.isfile(f'./summary-licenses-csv/license{str(sorvete)}.csv') == True) :
writer.writerow(licencaDuplicada(f'./summary-licenses-csv/license{str(sorvete)}.csv'))
name = licencaDuplicada(f'./summary-licenses-csv/license{str(sorvete)}.csv')['nome'].replace("/","")
print(f'{str(sorvete)} - {name}')
``` |
{
"source": "JoaoPedro-M/Python",
"score": 3
} |
#### File: Python/Cookie/barrinha.py
```python
import pygame
import sys
class Bola:
def __init__(self, surface, barrinha):
self.surface = surface
self.bola = pygame.image.load("bola.png")
self.bola.convert_alpha()
self.m = self.n = 300
self.barrinha = barrinha
self.rec = self.bola.get_rect()
self.rec.x, self.rec.y = self.m, self.n
self.direcao = 'SW'
def andar(self):
self.ver_colisao()
if self.direcao == 'SW':
self.m -= 5
self.n += 5
elif self.direcao == 'SE':
self.m += 5
self.n += 5
elif self.direcao == 'NW':
self.m -= 5
self.n -= 5
elif self.direcao == 'NE':
self.m += 5
self.n -= 5
def mostrar(self):
self.andar()
self.surface.blit(self.bola, (self.m, self.n))
def ver_colisao(self):
bateu_x = bateu_y = False
if self.m <= 0 or self.m >= 600:
bateu_x = True
if self.n <= 0 or self.n >= 600:
bateu_y = True
if self.barrinha.collidepoint((self.rec.x, self.rec.y)):
bateu_x = True
if bateu_x:
if self.direcao == "SW":
self.direcao = "NW"
elif self.direcao == "SE":
self.direcao = "NE"
elif self.direcao == "NW":
self.direcao = "SW"
elif self.direcao == "NE":
self.direcao = "SE"
if bateu_y:
if self.direcao == "SW":
self.direcao = "SE"
elif self.direcao == "SE":
self.direcao = "SW"
elif self.direcao == "NW":
self.direcao = "NE"
elif self.direcao == "NE":
self.direcao = "NW"
def sair():
print("acabou o jogo!")
pygame.quit()
sys.exit()
pygame.init()
tela = pygame.display.set_mode((600, 600))
pygame.display.set_caption("Jogo da barrinha")
pygame.mouse.set_pos(300, 300)
barrinha = pygame.Rect(200, 500, 130, 30)
bola = Bola(tela, barrinha)
tempo = pygame.time.Clock()
while True:
x, y = pygame.mouse.get_pos()
barrinha.x = x - 65
if barrinha.x < 0:
barrinha.x = 0
elif barrinha.x > 470:
barrinha.x = 470
for event in pygame.event.get():
if event.type == pygame.QUIT:
sair()
tela.fill((0, 0, 0))
pygame.draw.rect(tela, (255, 255, 255), barrinha)
bola.mostrar()
pygame.display.flip()
tempo.tick(30)
```
#### File: Python/Cookie/teste.py
```python
import pygame
import sys
import random
def criar_asteroide():
x1 = random.randint(0, 500)
asteroide = pygame.Rect(x1, 0, 30, 50)
return asteroide
def sair():
print('acabou o jogo!')
pygame.quit()
sys.exit()
pygame.init()
tela = pygame.display.set_mode((600, 600))
pygame.display.set_caption("Jogo de Nave")
tempo = pygame.time.Clock()
pygame.mouse.set_pos(315, 300)
nave = pygame.image.load("nave.png")
y = x = 0
tiros = []
contador = 0
asteroides = []
hora_asteroides = []
while True:
if len(hora_asteroides) == 0:
for c in range(0, 10):
hora_asteroides.append(random.randint(0, 100))
for event in pygame.event.get():
if event.type == pygame.QUIT:
sair()
if event.type == pygame.MOUSEMOTION:
x, y = pygame.mouse.get_pos()
if y != 500:
y = 500
x -= 30
if x < 0:
x = 0
elif x > 540:
x = 540
tela.fill((0, 0, 0))
tela.blit(nave, (x, y))
if contador % 50 == 0 and contador > 0:
tiros.append(pygame.Rect(x+20, 480, 20, 50))
contador += 1
for tiro in tiros:
tiro.y -= 6
if tiro.y < -100:
tiros.remove(tiro)
pygame.draw.rect(tela, (255, 0, 0), tiro)
if contador % hora_asteroides[0] == 0 and contador != 0:
asteroides.append(criar_asteroide())
for ast in asteroides:
ast.y += 1
if ast.y > 600:
sair()
for tiro in tiros:
if ast.colliderect(tiro):
asteroides.remove(ast)
tiros.remove(tiro)
pygame.draw.rect(tela, (255, 255, 255), ast)
tempo.tick(70)
pygame.display.flip()
```
#### File: Selenium/aprendendo-api/game.py
```python
import pygame
def event_handler(event):
pass
def player(screen):
pass
```
#### File: meu_site/boletim/models.py
```python
from django.db import models
class MateriasSeries(models.Model):
nome = models.CharField(max_length=200)
def __str__(self):
return self.nome
class MateriaAluno(models.Model):
nome = models.CharField(max_length=200)
def __str__(self):
return self.nome
class Series(models.Model):
materias = models.ForeignKey(to=MateriasSeries, on_delete=models.CASCADE)
nome = models.CharField(max_length=50)
def __str__(self):
return self.nome
class Materias(models.Model):
series = models.ForeignKey(to=MateriasSeries, on_delete=models.CASCADE)
alunos = models.ForeignKey(to=MateriaAluno, on_delete=models.CASCADE)
nome = models.CharField(max_length=50)
nota = models.IntegerField()
def __str__(self):
return self.nome
class Alunos(models.Model):
materias = models.ForeignKey(to=MateriaAluno, on_delete=models.CASCADE)
nome = models.CharField(max_length=50)
def __str__(self):
return self.nome
``` |
{
"source": "JoaoPedroPP/useless-code-I-created-for-nothing",
"score": 3
} |
#### File: useless-code-I-created-for-nothing/async-python-requests-on-docker/__main__.py
```python
import sys
from datetime import datetime
import requests
import feedparser
import asyncio
import json
import time
from requests.auth import HTTPBasicAuth
async def req2(data, i):
headers = {"Content-Type": "application/json"}
reti = requests.post("https://", json=data, auth=HTTPBasicAuth("user", "password"), headers=headers)
if i == 3:
time.sleep(5)
return reti.json()
async def req():
resps = []
dado = []
loop = asyncio.get_event_loop()
parsed = feedparser.parse('https://threatpost.com/feed/')
for i in range(len(parsed['entries'])):
link = parsed['entries'][i]['link']
title = parsed['entries'][i]['title']
date = parsed['entries'][i]['published']
sys_date = datetime.strptime(date, "%a, %d %b %Y %X %z")
data = {"link": link, "title": title, "date": date}
resp = loop.create_task(req2(data,i))
resps.append(resp)
for response in await asyncio.gather(*resps):
dado.append(response)
pass
return dado
def main(dict):
loop = asyncio.get_event_loop()
a = loop.run_until_complete(req())
print(a)
data = {"data": json.dumps(a)}
return data
x = main({})
print(x)
``` |
{
"source": "JoaoPedro-Ramos/pythonbirds",
"score": 3
} |
#### File: pythonbirds/oo/teste_carro.py
```python
from unittest import TestCase
from oo.carro import Motor
class CarroTestCase(TestCase):
def teste_velocidade_inicial(self):
motor = Motor()
self.assertEquals(0, motor.velocidade)
def test_acelerar(self):
motor = Motor()
motor.acelerar()
self.assertEquals(1, motor.velocidade)
def test_frear(self):
motor = Motor()
motor.frear()
self.assertEquals(0, motor.velocidade)
``` |
{
"source": "joaopequenovieira/fund-manager",
"score": 2
} |
#### File: joaopequenovieira/fund-manager/fund_manager_gui.py
```python
import tkinter as tk
from tkinter import ttk
import db_manage
def add_new_fund():
window = tk.Toplevel()
window.wm_title('Add new fund...')
window.geometry('650x50')
link_entry = tk.Entry(window)
link_entry.pack(fill='x', expand=1)
#lambda to prevent function being called on assignment
def add_butt_func():
db_manage.db_manager.add_fund(db, (link_entry.get()))
app.populate_table()
window.destroy()
add_button = tk.Button(window, text='Add', command=lambda: (add_butt_func()))
add_button.pack(side='right')
cancel_button = tk.Button(window, text='Cancel', command=window.destroy)
cancel_button.pack(side='right')
class fund_manager(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.grid()
self.create_menu_widget()
self.create_table_widget()
self.pack(fill='both', expand=1)
def create_menu_widget(self):
#menu bar setup
self.menubar = tk.Menu(self)
menubar = self.menubar
self.master.config(menu=menubar)
#new submenu
menu_new = tk.Menu(menubar, tearoff=False)
menubar.add_cascade(menu=menu_new, label='New')
menu_new.add_command(label='Add New Fund', command=add_new_fund)
def create_table_widget(self):
"""Create principle fund table"""
#main fund table
self.fund_tree = tk.ttk.Treeview(self)
fund_tree = self.fund_tree
fund_tree['show'] = 'headings' #remove empty column identifier
fund_tree.pack(fill='both', expand=1)
#define columns here
fund_tree['columns'] = ('isin', 'fund_name', 'nav', 'change')
fund_tree.heading('isin', text='ISIN')
fund_tree.column('isin', anchor='center')
fund_tree.heading('fund_name', text='Fund Name')
fund_tree.column('fund_name', anchor='center')
fund_tree.heading('nav', text='NAV')
fund_tree.column('nav', anchor='center')
fund_tree.heading('change', text='Change')
fund_tree.column('change', anchor='center')
def populate_table(self):
data = db_manage.db_manager.retrieve_fund_data(db)
for fund in data:
self.fund_tree.insert('', 'end', iid=None, values=(fund[1],fund[2], fund[3], fund[4]))
if __name__ == "__main__":
#initialize database
db = db_manage.db_manager('fund_manager.db')
db.load_database()
db.update_funds()
root = tk.Tk(className="Fund Manager")
root.attributes('-zoomed', True)
root.title('Fund Manager')
app = fund_manager(root)
app.populate_table()
root.mainloop()
```
#### File: joaopequenovieira/fund-manager/fund_manager.py
```python
import requests
from bs4 import BeautifulSoup
class main_funcs:
def add_new_fund(link):
if link == "":
return
page = requests.get(link)
source_page = BeautifulSoup(page.content, 'html.parser')
#get fund title
fund_title = (source_page.select(".snapshotTitleTable h1"))
fund_title = fund_title[0].get_text()
#get overview table elements
overview_table = (source_page.select(".overviewKeyStatsTable"))
overview_table = overview_table[0].find_all("td")
#get fund isin
fund_isin = overview_table[15].get_text()
#get fund nav
fund_nav_temp = overview_table[3].get_text()
#remove letters, consider splitting string in future for currency and value
fund_nav = ""
for c in fund_nav_temp:
if (ord(c) == 46 or (ord(c) >= 48 and ord(c) <= 57)):
fund_nav += c
#get daily change
fund_daily_change = overview_table[6].get_text()
values = [link, fund_isin, fund_title, fund_nav, fund_daily_change]
return(values)
``` |
{
"source": "joao-pereira5/id-gen",
"score": 3
} |
#### File: joao-pereira5/id-gen/randgen.py
```python
import sys, random
# Attended in the catch block
generator = None
try:
# Available values
formats = [ "plain", "csv", "json" ]
col_types = [ "name", "firstname", "surname", "email" ]
emails = [ "@gmail.com", "@outlook.com", "@hotmail.com", "@mail.com", "@yahoo.com" ]
stderr = sys.stderr
output = sys.stdout
# Infinite source of names
# __next__ returns a dictionary of values to be formatted by this class' implementations
class RandomTupleGenerator:
def __init__(self, names, surnames, cols, iterations):
self.names = names # collection of firstnames to pick from
self.surnames = surnames # collection of surnames to pick from
self.cols = cols # column labels
self.iterations = iterations # maximum iterations. -1 for infinity
self.idx = 0 # iteration counter
# Prevent collision of unique values
self.emails = []
def __iter__(self):
return self
def __next__(self):
if self.iterations == 0:
raise StopIteration()
if self.iterations > 0:
self.iterations -= 1
self.idx += 1
attributes = {
'firstname' : random.choice(self.names),
'surname' : random.choice(self.surnames)
}
attributes.update({'name': "%s %s" % (attributes["firstname"], attributes["surname"])})
# Generate pseudo-random email and check if it already exists
while True:
email = "%s%d%s" % (attributes["name"].lower().replace(" ", random.choice([ "", "-", "_" ])), random.randint(0, 999), random.choice(emails))
if self.emails.count(email) == 0:
self.emails.append(email)
attributes.update({'email' : email})
break
return attributes
class RandomJsonGenerator(RandomTupleGenerator):
def __next__(self):
attributes = RandomTupleGenerator.__next__(self)
elem = "{"
if self.idx == 1:
elem = "[\n{"
for c in self.cols:
elem += ' "%s": "%s",' % (c, attributes[c])
elem = elem[:-1] # trim last ","
elem += " },"
if self.iterations == 0:
elem = elem[:-1] + "\n]"
return elem
class RandomCsvGenerator(RandomTupleGenerator):
def __next__(self):
attributes = RandomTupleGenerator.__next__(self)
elem = ""
if self.idx == 1:
elem += ",".join(cols) + "\n"
for c in self.cols:
elem += '"' + attributes[c] + "\","
return elem[:-1] # trim last ","
class RandomPlainGenerator(RandomTupleGenerator):
def __next__(self):
attributes = RandomTupleGenerator.__next__(self)
elem = ""
for c in self.cols:
elem += attributes[c] + " "
return elem[:-1] # trim last " "
def pick_generator(firstnames, surnames, cols, iterations, outformat):
if outformat == "json":
return RandomJsonGenerator(firstnames, surnames, cols, iterations)
if outformat == "csv":
return RandomCsvGenerator(firstnames, surnames, cols, iterations)
return RandomPlainGenerator(firstnames, surnames, cols, iterations)
# Save file contents in a list and close handle ASAP
def file_to_list(filename):
f = open(filename, "r", encoding="utf-8")
arr = []
for l in f:
arr.append(l.strip('\n'))
f.close()
return arr
# Properties and arguments' interpretation
## Defaults
lang = "us"
filename = None # default = stdout
iterations = -1 # default = infinity
outformat = formats[0]
cols = [ col_types[0] ]
for arg in sys.argv:
pair = arg.split('=')
if len(pair) != 2:
continue
prop = pair[0]
value = pair[1]
if prop == "lang":
lang = value
elif prop == "file":
filename = value
elif prop == "iter":
iterations = int(value)
elif prop == "format":
if formats.count(value) == 0:
print("'%s' is not an available format. Using 'plain', instead." % value, file=stderr)
else:
outformat = value
elif prop == "cols":
mycols = value.split(',')
ret = []
for c in mycols:
if col_types.count(c) == 0:
print("'%s' is not an available column type. Skipping." % c, file=stderr)
continue
ret.append(c)
if len(ret) > 0:
cols = ret
del sys
# Store all names in auxiliar collections
try:
firstnames = file_to_list("names/%s/boynames" % lang) + file_to_list("names/%s/girlnames" % lang)
surnames = file_to_list("names/%s/surnames" % lang)
except FileNotFoundError:
print("Language %s is not supported" % lang, file=stderr)
quit(128)
# Printing output is stdout (default), or a specified file
if filename != None:
output = open(filename, "a", encoding="utf-8")
# Iterate through a random tuple source
generator = pick_generator(firstnames, surnames, cols, iterations, outformat)
for l in generator:
print(l, file=output)
output.close()
except KeyboardInterrupt:
if generator is not None:
generator.iterations = 1
for l in generator:
print(l, file=output)
output.close()
quit(1)
``` |
{
"source": "joaoperfig/Evike",
"score": 3
} |
#### File: joaoperfig/Evike/evike.py
```python
import example
import random
def initial_generation(creator, quantity):
pop = []
for i in range(quantity):
pop += [creator()]
return pop
def fitnesses(generation, fitness_getter):
fitnesses = []
for member in generation:
fitnesses += [fitness_getter(member)]
return fitnesses
def create_mating_pool(generation, fitnesses, quantity, mode):
if mode == "top":
selectids = []
result = []
while len(result) < quantity:
bestm = None
besti = 0
bestf = 9999999999
for i, member in enumerate(generation):
f = fitnesses[i]
if f <= bestf and not i in selectids:
bestf = f
bestm = member
besti = i
result += [bestm]
selectids += [besti]
return result
elif mode == "roulette":
result = []
invert = []
for f in fitnesses:
invert += [1/f]
totalf = sum(invert)
while len(result) < quantity:
selected = random.random() * totalf
counter = 0
for i, inv in enumerate(invert):
if inv + counter >= selected:
result += [generation[i]]
break
else:
counter += inv
return result
elif mode == "tournament":
result = []
while len(result) < quantity:
i1, f1 = random.choice(list(enumerate(fitnesses)))
i2, f2 = random.choice(list(enumerate(fitnesses)))
if f1 < f2:
result += [generation[i1]]
else:
result += [generation[i2]]
return result
else:
raise ValueError("No such selection mode")
def mate(mating_pool, quantity, mating_method):
result = []
while len(result) <= quantity:
newmember = mating_method(random.choice(mating_pool), random.choice(mating_pool))
result += [newmember]
return result
def mutate(generation, mutate_method):
result = []
for member in generation:
result += [mutate_method(member)]
return result
def do_full_next_generation(generation, fitness_getter, mating_size, mating_mode, mating_method, mutate_method):
fit = fitnesses(generation, fitness_getter)
mp = create_mating_pool(generation, fit, mating_size, mating_mode)
nex = mate(mp, len(generation), mating_method)
nex = mutate(nex, mutate_method)
return (max(fit), min(fit), nex)
def do_everything(duration, members, creator_method, fitness_getter, mating_size, mating_mode, mating_method, mutate_method):
generation = initial_generation(creator_method, members)
for i in range(duration):
worst, best, generation = do_full_next_generation(generation, fitness_getter, mating_size, mating_mode, mating_method, mutate_method)
print(best)
``` |
{
"source": "Joaopeuko/binanceSpotEasyT",
"score": 3
} |
#### File: binanceSpotEasyT/binanceSpotEasyT/tick.py
```python
import datetime
import requests
from abstractEasyT import tick
from supportLibEasyT import log_manager
from binanceSpotEasyT.util import setup_environment
class Tick(tick.Tick):
"""
Tick class is the responsible to retrieve every tick information.
"""
def __init__(self,
symbol: str):
"""
Args:
symbol:
It is the symbol you want information about. You can have information about time, bid, ask, last, volume.
"""
self._log = log_manager.LogManager('binance-spot')
self._log.logger.info('Logger Initialized in Tick')
self.url_base, self._key, self._secret = setup_environment(self._log)
self._symbol = symbol
self.time = None
self.bid = None
self.ask = None
self.last = None
self.volume = None
def change_symbol(self, new_symbol: str) -> None:
"""
This function changes the symbol.
Args:
new_symbol:
It receives the new symbol
Returns:
It updates the self._symbol to the new symbol.
"""
self._symbol = new_symbol.upper()
def get_new_tick(self):
"""
Everytime this function is called it update the last tick information, it is important to have update
information know the most recent information.
Returns:
It updates the attributes in the constructor.
Examples:
>>> # All the code you need to execute the function:
>>> from binanceSpotEasyT.initialization import Initialize
>>> from binanceSpotEasyT.tick import Tick
>>> initialize = Initialize()
>>> initialize.initialize_platform()
>>> initialize.initialize_symbol('BTCUSDT')
>>> # It will return the most recent information, but it will return None at the first time.
>>> # The tick need the information to be updated everytime.
>>> btcusdt_tick = Tick(symbol='BTCUSDT')
>>> btcusdt_tick.ask
None
>>> # When you update the tick:
>>> btcusdt_tick.get_new_tick()
>>> btcusdt_tick.ask
1.09975
>>> btcusdt_tick.bid
1.09975
>>> # You must have notice that I used bid and ask, some exchanges do not return the last value
>>> # You can find only the information for bid and ask. If you try to return last it will print 0.0.
>>> # But remember, not all the exchanges do that, you must check it. Binance return the last value.
>>> btcusdt_tick.last
47572.46
You can ask for this information: time, bid, ask, last, volume.
"""
self._log.logger.info('Tick updated')
url_ticker = '/api/v3/ticker/24hr'
result = requests.get(self.url_base + url_ticker, {'symbol': self._symbol})
result.raise_for_status()
ticker = result.json()
self.time = datetime.datetime.fromtimestamp(ticker['closeTime'] / 1000)
self.bid = float(ticker['bidPrice'])
self.ask = float(ticker['askPrice'])
self.last = float(ticker['lastPrice'])
self.volume = float(ticker['volume'])
```
#### File: binanceSpotEasyT/binanceSpotEasyT/trade.py
```python
import hmac
import math
import time
import hashlib
import numpy as np
import requests
from abstractEasyT import trade
from urllib.parse import urlencode
from supportLibEasyT import log_manager
from binanceSpotEasyT.util import get_price_last
from binanceSpotEasyT.util import setup_environment
from binanceSpotEasyT.util import get_symbol_asset_balance
class Trade(trade.Trade):
"""
This class is responsible to handle all the trade requests.
"""
def __init__(self,
symbol: str,
lot: float,
stop_loss: float,
take_profit: float
):
"""
It is allowed to have only one position at time per symbol, right now it is not possible to open a position and
increase the size of it or to open opposite position. Open an open position will close the other direction one.
Args:
symbol:
It is the symbol you want to open or close or check if already have an operation opened.
lot:
It is how many shares you want to trade, many symbols allow fractions and others requires a
certain amount. It can be 0.01, 100.0, 1000.0, 10000.0.
stop_loss:
It is how much you accept to lose. Example: If you buy a share for US$10.00, and you accept to lose US$1.00
you set this variable at 1.00, you will be out of the operation at US$9.00 (sometimes more, somtime less,
the US$9.00 is the trigger). Keep in mind that some symbols has different points metrics, US$1.00 sometimes
can be 1000 points.
take_profit:
It is how much you accept to win. Example: If you buy a share for US$10.00, and you accept to win US$1.00
you set this variable at 1.00, you will be out of the operation at US$11.00 (sometimes more, somtime less,
the US$11.00 is the trigger). Keep in mind that some symbols has different points metrics, US$1.00 sometimes
can be 1000 points.
"""
self._log = log_manager.LogManager('binance-spot')
self._log.logger.info('Logger Initialized in Trade')
self.symbol = symbol.upper()
self.lot = lot
self.stop_loss = stop_loss
self.take_profit = take_profit
self.points = 8
self.url_base, self._key, self._secret = setup_environment(self._log)
self._trade_allowed = False
self.trade_direction = None # 'buy', 'sell', or None for no position
self.position_check()
def normalize(self, price: float) -> float:
"""
This function normalize the price to ensure a precision that is required by the platform
Args:
price:
It is the price that you want to be normalized, usually is the last price to open a market position.
Returns:
It returns the float price normalized under a precision that is accepted by the platform.
Examples:
>>> # All the code you need to execute the function:
>>> from binanceSpotEasyT.initialization import Initialize
>>> from binanceSpotEasyT.trade import Trade
>>> initialize = Initialize()
>>> initialize.initialize_platform()
>>> initialize.initialize_symbol('BTCUSDT')
>>> btcusdt_trade = Trade(symbol='BTCUSDT', lot=1.0, stop_loss=1.0, take_profit=1.0)
>>> # The normalize function is used inside other functions, but the idea is to normalize the value to
>>> # be accepted in the trade request. If you want to see this function in action you can look at
>>> # open_buy() and open_sell()
>>> btcusdt_trade.normalize(12.3456789101112131415)
12.3456789
"""
self._log.logger.info('Normalizing the price')
return np.round_(price, self.points)
def open_buy(self):
"""
This functions when called send a buy request to Binance with the parameters in the attributes.
Returns:
It returns None, but if an error occurs when open a position it will break.
Examples:
Try this on your demo account with fake money, a position will be opened.
>>> # All the code you need to execute the function:
>>> from binanceSpotEasyT.initialization import Initialize
>>> from binanceSpotEasyT.trade import Trade
>>> initialize = Initialize()
>>> initialize.initialize_platform()
True
>>> initialize.initialize_symbol('BTCUSDT')
>>> # Notice that the stop and profit are zero, that is because the Spot do not use it.
>>> btcusdt_trade = Trade(symbol='BTCUSDT', lot=0.01, stop_loss=0.0, take_profit=0.0)
>>> # When it works it returns None
>>> btcusdt_trade.open_buy()
None
>>> # Just for curiosity, if you want to try to open a buy position with this sell opened you will close
>>> # the sell position
>>> btcusdt_trade.open_sell()
None
"""
price_last = get_price_last(self.url_base, self.symbol)
self._log.logger.info(f'BUY Order sent: {self.symbol},'
f' {self.lot} lot(s),'
f' at {price_last}')
url_order = "/api/v3/order"
time_stamp = int(time.time() * 1000)
payload = {
"symbol": self.symbol,
"side": 'BUY',
"type": 'MARKET',
"quantity": self.lot,
"recvWindow": 5000,
"timestamp": time_stamp,
}
payload_encoded = urlencode(payload)
signature = hmac.new(self._secret.encode('utf-8'), payload_encoded.encode('utf-8'), hashlib.sha256).hexdigest()
payload['signature'] = signature
order = requests.post(self.url_base + url_order,
params=payload,
headers={"X-MBX-APIKEY": self._key, })
order.raise_for_status()
self._log.logger.info('Change trade direction to BUY.')
self.trade_direction = 'buy'
def open_sell(self):
"""
This functions when called send a sell request to Binance with the parameters in the attributes.
Returns:
It returns None, but if an error occurs when open a position it will break.
Examples:
Try this on your demo account with fake money, a position will be opened.
>>> # All the code you need to execute the function:
>>> from binanceSpotEasyT.initialization import Initialize
>>> from binanceSpotEasyT.trade import Trade
>>> initialize = Initialize()
>>> initialize.initialize_platform()
True
>>> initialize.initialize_symbol('BTCUSDT')
>>> # Notice that the stop and profit are zero, that is because the Spot do not use it.
>>> btcusdt_trade = Trade(symbol='BTCUSDT', lot=0.01, stop_loss=0.0, take_profit=0.0)
>>> # When it works it returns None
>>> btcusdt_trade.open_sell()
None
>>> # Just for curiosity, if you want to try to open a buy position with this sell opened you will close
>>> # the sell position
>>> btcusdt_trade.open_buy()
None
"""
price_last = get_price_last(self.url_base, self.symbol)
self._log.logger.info(f'SELL Order sent: {self.symbol},'
f' {self.lot} lot(s),'
f' at {price_last}')
url_order = "/api/v3/order"
time_stamp = int(time.time() * 1000)
payload = {
"symbol": self.symbol,
"side": 'SELL',
"type": 'MARKET',
"quantity": self.lot,
'recvWindow': 5000,
"timestamp": time_stamp,
}
payload_encoded = urlencode(payload)
signature = hmac.new(self._secret.encode('utf-8'), payload_encoded.encode('utf-8'), hashlib.sha256).hexdigest()
payload['signature'] = signature
order = requests.post(self.url_base + url_order,
params=payload,
headers={"X-MBX-APIKEY": self._key, })
order.raise_for_status()
self._log.logger.info('Change trade direction to SELL.')
self.trade_direction = 'sell'
def position_open(self, buy: bool, sell: bool) -> str or None:
"""
This function receives two bool variables, buy and sell, if one of this variable is true and the other is false,
it opens a position to the side that is true, if both variable is true or both variable is false, it does not
open a position.
Args:
buy:
When buy is TRUE it receives a positive signal to open a position. When false, it is ignored.
sell:
When sell is TRUE it receives a positive signal to open a position. When false, it is ignored.
Returns:
It opens the position.
Examples:
Try this on your demo account with fake money, a position will be opened.
>>> # All the code you need to execute the function:
>>> from binanceSpotEasyT.initialization import Initialize
>>> from binanceSpotEasyT.trade import Trade
>>> initialize = Initialize()
>>> initialize.initialize_platform()
True
>>> initialize.initialize_symbol('BTCUSDT')
True
>>> # Notice that the stop and profit are zero, that is because the Spot do not use it.
>>> btcusdt_trade = Trade(symbol='BTCUSDT', lot=0.01, stop_loss=0.0, take_profit=0.0)
>>> # _trade_allowed is False by default, this attribute will be handled in another project,
>>> # that is why it exists. Let assign to True and see what happens:
>>> btcusdt_trade._trade_allowed = True
>>> btcusdt_trade._trade_allowed
True
>>> # Currently, I do not have a position for BTC, so first I will try to open a SELL position to see
>>> # what happens since it is not allowed.
>>> btcusdt_trade.position_open(False, True)
2022-03-29 10:36:38,580 WARNING - warning - A SELL position are not allowed in Binance Spot, you can only sell a symbol if you have it.
2022-03-29 10:36:38,580 WARNING - warning - A SELL position are not allowed in Binance Spot, you can only sell a symbol if you have it.
# Now that you know what happens lets try to BUY a position.
>>> btcusdt_trade.position_open(True, False)
'buy'
>>> # It worked as a expected, let close it. You can check, the position will be closed.
>>> btcusdt_trade.position_close()
None
>>> # To finish, let see what happens if both arguments are True
>>> btcusdt_trade.position_open(True, True)
None
>>> # Nothing happens, but when both are False?
>>> btcusdt_trade.position_open(False, False)
None
>>> # Nothing happens
"""
self._log.logger.info(f'Open position called. BUY is {str(buy)}, and SELL is {str(sell)}. Trade allowed is '
f'{self._trade_allowed}.')
if self._trade_allowed and self.trade_direction is None:
if buy and not sell:
self.open_buy()
self.position_check()
if sell and not buy:
self._log.warning('A SELL position are not allowed in Binance Spot, you can only sell a symbol'
' if you have it.')
self.position_check()
return self.trade_direction
def position_close(self) -> None:
"""
This functions checks the trade direction, and it opens an opposite position to the current one to close it.
If there is no position nothing happens.
Returns:
Close the current position by opening an opposite one.
Examples:
Try this on your demo account with fake money, a position will be opened.
>>> # All the code you need to execute the function:
>>> from binanceSpotEasyT.initialization import Initialize
>>> from binanceSpotEasyT.trade import Trade
>>> initialize = Initialize()
>>> initialize.initialize_platform()
True
>>> initialize.initialize_symbol('BTCUSDT')
>>> btcusdt_trade = Trade(symbol='BTCUSDT', lot=0.01, stop_loss=0.0, take_profit=0.0)
>>> btcusdt_trade._trade_allowed = True
>>> # To know more about btcusdt_trade._trade_allowed look the Examples in position_open() documentation.
>>> btcusdt_trade.position_open(True, Sell)
'buy'
>>> # When there is a position opened, btcusdt_trade.position_close() will open a position in a different
>>> # direction to close it.
>>> btcusdt_trade.position_close()
None
# It checks the trading direction, return none when there is no trade opened.
>>> btcusdt_trade.trade_direction
None
# I will open a buy position, check the trade direction and close it!
>>> btcusdt_trade.position_open(True, False)
'buy'
>>> btcusdt_trade.trade_direction
'buy'
>>> btcusdt_trade.position_close()
>>> # We can see that it worked!
>>> # What happens when I call btcusdt_trade.position_close() with no position opened?
>>> btcusdt_trade.position_close()
None
>>> # Nothing happens, there are no position to be closed.
"""
self._log.logger.info('Close position called.')
self.position_check()
if self.trade_direction == 'buy':
self.open_sell()
self.position_check()
elif self.trade_direction == 'sell':
self.open_buy()
self.position_check()
def position_check(self) -> None:
"""
This function checks if there are a position opened and update the variable self.trade_direction.
If there is no position, the self.trade_direction will be updated to None, else, it updates with the trade
direction, which can be 'sell' or 'buy'.
Returns:
This function update the variable self.trade_direction and do not return a result.
Examples:
Try this on your demo account with fake money, a position will be opened.
>>> # All the code you need to execute the function:
>>> from binanceSpotEasyT.initialization import Initialize
>>> from binanceSpotEasyT.trade import Trade
>>> initialize = Initialize()
>>> initialize.initialize_platform()
True
>>> initialize.initialize_symbol('BTCUSDT')
>>> btcusdt_trade = Trade(symbol='BTCUSDT', lot=0.01, stop_loss=0.0, take_profit=0.0)
>>> btcusdt_trade._trade_allowed = True
>>> # Position check it is just to ensure that the btcusdt_trade.trade_direction are in the right direction.
>>> # The btcusdt_trade.trade_direction is automatically handled by buy open_sell() and open_buy() and
>>> # it returns the trade direction or None when there is no trade opened.
>>> btcusdt_trade.trade_direction
None
>>> btcusdt_trade.position_open(True, False)
'buy'
>>> btcusdt_trade.trade_direction
'buy'
>>> # After I open a buy position, it returns 'buy' to trade_direction, but, what happens if I manually
>>> # change the direction?
>>> btcusdt_trade.trade_direction = 'coffee shop'
>>> btcusdt_trade.trade_direction
'coffee shop'
>>> # It is possible to see that the trade_direction was changed.
>>> # and the position_check() is called in all the functions that opens and closes position
>>> # to ensure that direction is correct, I will call position_check() to fix my change to 'coffee shop'
>>> btcusdt_trade.position_check()
None
>>> btcusdt_trade.trade_direction
'buy'
>>> # It worked.
>>> # That is it, I will just the position that I opened before.
>>> btcusdt_trade.position_close()
"""
self._log.logger.info('Calls Binance-Spot to check if there is a position opened.')
balance_asset = get_symbol_asset_balance(self._log, self.url_base, self._key, self._secret, self.symbol)
if float(balance_asset) != 0.00000000:
self.trade_direction = 'buy'
else:
self.trade_direction = None
``` |
{
"source": "joaopfg/QA-ZettaByte-Flask-app",
"score": 2
} |
#### File: QA-ZettaByte-Flask-app/flaskr/book.py
```python
import functools
import warnings
warnings.filterwarnings('ignore')
import pickle
import numpy as np
import pandas as pd
import json
from textblob import TextBlob
import ast
import nltk
nltk.download('punkt')
from scipy import spatial
import torch
import spacy
import PyPDF2 as PyPDF2
import tabula as tabula
import tika
tika.initVM()
from tika import parser
from models import InferSent
from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from flaskr.db import get_db
bp = Blueprint('book', __name__, url_prefix='/book')
@bp.route('/initialpage', methods=('GET', 'POST'))
def initialpage():
if request.method == 'POST':
book_file = request.form['book']
parsed = parser.from_file(book_file)
book = parsed["content"]
question = request.form['question']
db = get_db()
error = None
if not book:
error = 'Book is required.'
elif not question:
error = 'Question is required.'
if error is None:
if db.execute('SELECT book, question FROM bq').fetchone() is None:
db.execute('INSERT INTO bq (book, question) VALUES (?, ?)',(book, question))
db.commit()
bq = db.execute('SELECT * FROM bq WHERE (book, question) = (?, ?)',(book, question)).fetchone()
session.clear()
session['bq_id'] = bq['id']
return redirect(url_for('book.finalpage'))
flash(error)
return render_template('book/initialpage.html')
@bp.route('/finalpage')
def finalpage():
bq_id = session.get('bq_id')
if bq_id is None:
g.bq = None
else:
g.bq = get_db().execute('SELECT * FROM bq WHERE id = ?', (bq_id,)).fetchone()
context = g.bq['book']
questions = []
contexts = []
questions.append(g.bq['question'])
contexts.append(g.bq['book'])
df = pd.DataFrame({"context":contexts, "question": questions})
df.to_csv("flaskr/data/train.csv", index = None)
blob = TextBlob(context)
sentences = [item.raw for item in blob.sentences]
from models import InferSent
V = 1
MODEL_PATH = 'flaskr/encoder/infersent%s.pkl' % V
params_model = {'bsize': 64, 'word_emb_dim': 300, 'enc_lstm_dim': 2048,'pool_type': 'max', 'dpout_model': 0.0, 'version': V}
infersent = InferSent(params_model)
infersent.load_state_dict(torch.load(MODEL_PATH))
W2V_PATH = 'flaskr/GloVe/glove.840B.300d.txt'
infersent.set_w2v_path(W2V_PATH)
infersent.build_vocab(sentences, tokenize=True)
dict_embeddings = {}
for i in range(len(sentences)):
dict_embeddings[sentences[i]] = infersent.encode([sentences[i]], tokenize=True)
for i in range(len(questions)):
dict_embeddings[questions[i]] = infersent.encode([questions[i]], tokenize=True)
d1 = {key:dict_embeddings[key] for i, key in enumerate(dict_embeddings) if i % 2 == 0}
d2 = {key:dict_embeddings[key] for i, key in enumerate(dict_embeddings) if i % 2 == 1}
with open('flaskr/data/dict_embeddings1.pickle', 'wb') as handle:
pickle.dump(d1, handle)
with open('flaskr/data/dict_embeddings2.pickle', 'wb') as handle:
pickle.dump(d2, handle)
del dict_embeddings
train = pd.read_csv("flaskr/data/train.csv")
with open("flaskr/data/dict_embeddings1.pickle", "rb") as f:
d1 = pickle.load(f)
with open("flaskr/data/dict_embeddings2.pickle", "rb") as f:
d2 = pickle.load(f)
dict_emb = dict(d1)
dict_emb.update(d2)
del d1, d2
train.dropna(inplace=True)
train['sentences'] = train['context'].apply(lambda x: [item.raw for item in TextBlob(x).sentences])
train['sent_emb'] = train['sentences'].apply(lambda x: [dict_emb[item][0] if item in dict_emb else np.zeros(4096) for item in x])
train['quest_emb'] = train['question'].apply(lambda x: dict_emb[x] if x in dict_emb else np.zeros(4096) )
li = []
for i in range(len(train['question'])):
laux = []
for item in train["sent_emb"][i]:
laux.append(spatial.distance.cosine(item,train["quest_emb"][i]))
li.append(laux)
train['cosine_sim'] = li
train["pred_idx_cos"] = train["cosine_sim"].apply(lambda x: np.argmin(x))
g.ans = train["sentences"][0][train['pred_idx_cos'][0]]
get_db().execute('''DELETE FROM bq WHERE id = ? ''', (bq_id,))
get_db().commit()
return render_template('book/finalpage.html')
``` |
{
"source": "joaopfonseca/cluster-over-sampling",
"score": 3
} |
#### File: clover/distribution/base.py
```python
from collections import Counter
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_X_y, check_array
class BaseDistributor(BaseEstimator):
"""The base class for distributors. A distributor sets the proportion of
samples to be generated inside each cluster and between clusters.
Warning: This class should not be used directly. Use the derive classes
instead.
"""
def _intra_distribute(self, X, y, labels, neighbors):
"""Distribute the generated samples in each cluster based on their density."""
return self
def _inter_distribute(self, X, y, labels, neighbors):
"""Distribute the generated samples between clusters based on their density."""
return self
def _validate_fitting(self):
"""Validate consistency of fitting procedure."""
# Check labels
if len(self.labels_) != self.n_samples_:
raise ValueError(
f'Number of labels should be equal to the number of samples. '
f'Got {len(self.labels_)} and {self.n_samples_} instead.'
)
# Check neighbors
if not set(self.labels_).issuperset(self.neighbors_.flatten()):
raise ValueError('Attribute `neighbors_` contains unknown labels.')
unique_neighbors = set([tuple(set(pair)) for pair in self.neighbors_])
if len(unique_neighbors) < len(self.neighbors_):
raise ValueError('Elements of `neighbors_` attribute are not unique.')
# Check distribution
proportions = {
class_label: 0.0
for class_label in self.unique_class_labels_
if class_label not in self.majority_class_labels_
}
for (_, class_label), proportion in self.intra_distribution_.items():
proportions[class_label] += proportion
for (
((cluster_label1, class_label1), (cluster_label2, class_label2)),
proportion,
) in self.inter_distribution_.items():
if class_label1 != class_label2:
multi_label = (
(cluster_label1, class_label1),
(cluster_label2, class_label2),
)
raise ValueError(
f'Multi-labels for neighboring cluster pairs should '
f'have a common class label. Got {multi_label} instead.'
)
proportions[class_label1] += proportion
if not all(
[
np.isclose(val, 0.0) or np.isclose(val, 1.0)
for val in proportions.values()
]
):
raise ValueError(
f'Intra-distribution and inter-distribution sum of proportions '
f'for each class label should be either equal to 0.0 or 1.0. '
f'Got {proportions} instead.'
)
return self
def _fit(self, X, y, labels, neighbors):
if labels is not None:
self._intra_distribute(X, y, labels, neighbors)
if neighbors is not None:
self._inter_distribute(X, y, labels, neighbors)
return self
def fit(self, X, y, labels=None, neighbors=None):
"""Generate the intra-label and inter-label distribution.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like, shape (n_samples,)
Corresponding label for each sample in X.
labels : array-like, shape (n_samples,)
Labels of each sample.
neighbors : array-like, (n_neighboring_pairs, 2)
An array that contains all neighboring pairs. Each row is
a unique neighboring pair.
Returns
-------
self : object,
Return self.
"""
# Check data
X, y = check_X_y(X, y, dtype=None)
# Set statistics
counts = Counter(y)
self.majority_class_labels_ = [
class_label
for class_label, class_label_count in counts.items()
if class_label_count == max(counts.values())
]
self.unique_cluster_labels_ = (
np.unique(labels) if labels is not None else np.array(0, dtype=int)
)
self.unique_class_labels_ = np.unique(y)
self.n_samples_ = len(X)
# Set default attributes
self.labels_ = (
np.repeat(0, len(X))
if labels is None
else check_array(labels, ensure_2d=False)
)
self.neighbors_ = (
np.empty((0, 2), dtype=int)
if neighbors is None
else check_array(neighbors, ensure_2d=False)
)
self.intra_distribution_ = {
(0, class_label): 1.0
for class_label in np.unique(y)
if class_label not in self.majority_class_labels_
}
self.inter_distribution_ = {}
# Fit distributor
self._fit(X, y, labels, neighbors)
# Validate fitting procedure
self._validate_fitting()
return self
def fit_distribute(self, X, y, labels=None, neighbors=None):
"""Return the intra-label and inter-label distribution.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like, shape (n_samples,)
Corresponding label for each sample in X.
labels : array-like, shape (n_samples,)
Labels of each sample.
neighbors : array-like, shape (n_neighboring_pairs, 2)
An array that contains all neighboring pairs. Each row is
a unique neighboring pair.
Returns
-------
distributions : tuple of (intra_distribution, inter_distribution) arrays
A tuple with the two distributions.
"""
self.fit(X, y, labels, neighbors)
return self.intra_distribution_, self.inter_distribution_
```
#### File: clover/over_sampling/_somo.py
```python
from math import sqrt
from sklearn.base import clone
from sklearn.utils import check_scalar
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling.base import BaseOverSampler
from imblearn.utils import Substitution
from imblearn.utils._docstring import _random_state_docstring, _n_jobs_docstring
from ._cluster import ClusterOverSampler
from ..distribution._density import DensityDistributor
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class SOMO(ClusterOverSampler):
"""Applies the SOM algorithm to the input space before applying SMOTE.
This is an implementation of the algorithm described in [1]_.
Read more in the :ref:`user guide <user_guide>`.
Parameters
----------
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=5
Defines the number of nearest neighbors to be used by SMOTE.
- If ``int``, this number is used to construct synthetic
samples.
- If ``object``, an estimator that inherits from
:class:`sklearn.neighbors.base.KNeighborsMixin` that will be
used to find the number of nearest neighbors.
som_estimator : None or object or int or float, default=None
Defines the SOM clusterer applied to the input space.
- If ``None``, :class:`` is used which
tends to be better with large number of samples.
- If KMeans object, then an instance from either
:class:`sklearn.cluster.KMeans` or :class:`sklearn.cluster.MiniBatchKMeans`.
- If ``int``, the number of clusters to be used.
- If ``float``, the proportion of the number of clusters over the number
of samples to be used.
distribution_ratio : float, default=0.8
The ratio of intra-cluster to inter-cluster generated samples. It is a
number in the :math:`[0.0, 1.0]` range. The default value is ``0.8``, a
number equal to the proportion of intra-cluster generated samples over
the total number of generated samples. As the number decreases, less
intra-cluster and more inter-cluster samples are generated.
raise_error : boolean, default=True
{n_jobs}
Attributes
----------
clusterer_ : object
A fitted :class:`somlearn.SOM` instance.
distributor_ : object
A fitted :class:`clover.distribution.DensityDistributor` instance.
labels_ : array, shape (n_samples,)
Labels of each sample.
neighbors_ : array, (n_neighboring_pairs, 2) or None
An array that contains all neighboring pairs with each row being
a unique neighboring pair.
oversampler_ : object
A fitted :class:`imblearn.over_sampling.SMOTE` instance.
random_state_ : object
An instance of ``RandomState`` class.
sampling_strategy_ : dict
Actual sampling strategy.
References
----------
.. [1] <NAME>, <NAME>, "Self-Organizing Map
Oversampling (SOMO) for imbalanced data set learning"
https://www.sciencedirect.com/science/article/abs/pii/S0957417417302324?via%3Dihub
Examples
--------
>>> import numpy as np
>>> from clover.over_sampling import SOMO # doctest: +SKIP
>>> from sklearn.datasets import make_blobs
>>> blobs = [100, 800, 100]
>>> X, y = make_blobs(blobs, centers=[(-10, 0), (0,0), (10, 0)])
>>> # Add a single 0 sample in the middle blob
>>> X = np.concatenate([X, [[0, 0]]])
>>> y = np.append(y, 0)
>>> # Make this a binary classification problem
>>> y = y == 1
>>> somo = SOMO(random_state=42) # doctest: +SKIP
>>> X_res, y_res = somo.fit_resample(X, y) # doctest: +SKIP
>>> # Find the number of new samples in the middle blob
>>> right, left = X_res[:, 0] > -5, X_res[:, 0] < 5 # doctest: +SKIP
>>> n_res_in_middle = (right & left).sum() # doctest: +SKIP
>>> print("Samples in the middle blob: %s" % n_res_in_middle) # doctest: +SKIP
Samples in the middle blob: 801
>>> unchanged = n_res_in_middle == blobs[1] + 1 # doctest: +SKIP
>>> print("Middle blob unchanged: %s" % unchanged) # doctest: +SKIP
Middle blob unchanged: True
>>> more_zero_samples = (y_res == 0).sum() > (y == 0).sum() # doctest: +SKIP
>>> print("More 0 samples: %s" % more_zero_samples) # doctest: +SKIP
More 0 samples: True
"""
def __init__(
self,
sampling_strategy='auto',
random_state=None,
k_neighbors=5,
som_estimator=None,
distribution_ratio=0.8,
raise_error=True,
n_jobs=None,
):
self.sampling_strategy = sampling_strategy
self.random_state = random_state
self.k_neighbors = k_neighbors
self.som_estimator = som_estimator
self.distribution_ratio = distribution_ratio
self.raise_error = raise_error
self.n_jobs = n_jobs
def _check_estimators(self, X, y):
"""Check various estimators."""
# Import SOM
try:
from somlearn import SOM
except ImportError:
raise ImportError(
'SOMO class requires the package `som-learn` to be installed.'
)
# Check oversampler
self.oversampler_ = SMOTE(
sampling_strategy=self.sampling_strategy,
k_neighbors=self.k_neighbors,
random_state=self.random_state_,
n_jobs=self.n_jobs,
)
# Check clusterer and number of clusters
if self.som_estimator is None:
self.clusterer_ = SOM(random_state=self.random_state_)
elif isinstance(self.som_estimator, int):
check_scalar(self.som_estimator, 'som_estimator', int, min_val=1)
n = round(sqrt(self.som_estimator))
self.clusterer_ = SOM(
n_columns=n, n_rows=n, random_state=self.random_state_
)
elif isinstance(self.som_estimator, float):
check_scalar(
self.som_estimator, 'som_estimator', float, min_val=0.0, max_val=1.0
)
n = round(sqrt((X.shape[0] - 1) * self.som_estimator + 1))
self.clusterer_ = SOM(
n_columns=n, n_rows=n, random_state=self.random_state_
)
elif isinstance(self.som_estimator, SOM):
self.clusterer_ = clone(self.som_estimator)
else:
raise TypeError(
'Parameter `som_estimator` should be '
'either `None` or the number of clusters '
'or a float in the [0.0, 1.0] range equal to'
' the number of clusters over the number of '
'samples or an instance of the `SOM` class.'
)
# Check distributor
self.distributor_ = DensityDistributor(
distribution_ratio=self.distribution_ratio,
filtering_threshold=1.0,
distances_exponent=2.0,
)
return self
``` |
{
"source": "joaopfonseca/data-filtering",
"score": 2
} |
#### File: src/models/oversampling.py
```python
import numpy as np
from gsmote import GeometricSMOTE
from sklearn.neighbors import NearestNeighbors
class DenoisedGeometricSMOTE(GeometricSMOTE):
def __init__(self,
sampling_strategy='auto',
random_state=None,
truncation_factor=1.0,
deformation_factor=0.0,
selection_strategy='combined',
k_neighbors=5,
k_neighbors_filter=3,
n_jobs=1,):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
truncation_factor=truncation_factor,
deformation_factor=deformation_factor,
selection_strategy=selection_strategy,
k_neighbors=k_neighbors,
n_jobs=n_jobs)
self.k_neighbors_filter = k_neighbors_filter
def _fit_resample(self, X, y):
_, indices = NearestNeighbors(n_neighbors=self.k_neighbors_filter, algorithm='auto')\
.fit(X)\
.kneighbors(X)
labels = np.vectorize(lambda x: y[x])(indices)
status = np.equal(np.expand_dims(y,-1), labels).astype(int).sum(axis=1)/self.k_neighbors_filter>=0.5
return super()._fit_resample(X[status], y[status])
``` |
{
"source": "joaopfonseca/ml-research",
"score": 3
} |
#### File: mlresearch/active_learning/_active_learning.py
```python
import numpy as np
from copy import deepcopy
from sklearn.base import clone
from sklearn.base import ClassifierMixin, BaseEstimator
from sklearn.utils import check_X_y
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestClassifier
from imblearn.pipeline import Pipeline
from ..metrics import SCORERS
from ._selection_methods import UNCERTAINTY_FUNCTIONS
from ._init_methods import init_strategy
class ALSimulation(ClassifierMixin, BaseEstimator):
"""
Class to simulate Active Learning experiments.
This algorithm is an implementation of an Active Learning framework as
presented in [1]_. The initialization strategy is WIP.
Parameters
----------
classifier : classifier object, default=None
Classifier to be used as Chooser and Predictor, or a pipeline
containing both the generator and the classifier.
generator : generator estimator, default=None
Generator to be used for artificial data generation within Active
Learning iterations.
use_sample_weight : bool, default=False
Pass ``sample_weights`` as a fit parameter to the generator object. Used to
generate artificial data around samples with high uncertainty. ``sample_weights``
is an array-like of shape (n_samples,) containing the probabilities (based on
uncertainty) for selecting a sample as a center point.
init_clusterer : clusterer estimator, default=None
WIP
init_strategy : WIP, default='random'
WIP
selection_strategy : function or {'entropy', 'breaking_ties',\
'random'}, default='entropy'
Method used to quantify the chooser's uncertainty level. All predefined functions
are set up so that a higher value means higher uncertainty (higher likelihood of
selection) and vice-versa. The uncertainty estimate is used to select the
instances to be added to the labeled/training dataset. Selection strategies may
be added or changed in the ``UNCERTAINTY_FUNCTIONS`` dictionary.
param_grid : dict or list of dictionaries
Used to optimize the classifier and generator hyperparameters at each iteration.
Dictionary with parameters names (``str``) as keys and lists of parameter
settings to try as values, or a list of such dictionaries, in which case the
grids spanned by each dictionary in the list are explored. This enables searching
over any sequence of parameter settings.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy. Used to optimize the
classifier and generator hyperparameters at each iteration.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
max_iter : int, default=None
Maximum number of iterations allowed. If None, the experiment will run until 100%
of the dataset is added to the training set.
n_initial : int, default=.02
Number of observations to include in the initial training dataset. If
``n_initial`` < 1, then the corresponding percentage of the original dataset
will be used as the initial training set.
increment : int, default=.02
Number of observations to be added to the training dataset at each
iteration. If ``n_initial`` < 1, then the corresponding percentage of the
original dataset will be used as the initial training set.
save_classifiers : bool, default=False
Save classifiers fit at each iteration. These classifiers are stored
in a list ``self.classifiers_``.
save_test_scores : bool, default=True
If ``True``, test scores are saved in the list ``self.test_scores_``.
Size of the test set is defined with the ``test_size`` parameter.
auto_load : bool, default=True
If `True`, the classifier with the best training score is saved in the
method ``self.classifier_``. It's the classifier object used in the
``predict`` method.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the proportion of
the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to 0.25.
evaluation_metric : string, default='accuracy'
Metric used to calculate the test scores. See
``research.metrics`` for info on available
performance metrics.
random_state : int, RandomState instance, default=None
Control the randomization of the algorithm.
- If int, ``random_state`` is the seed used by the random number
generator;
- If ``RandomState`` instance, random_state is the random number
generator;
- If ``None``, the random number generator is the ``RandomState``
instance used by ``np.random``.
Attributes
----------
init_clusterer_ : clusterer estimator
Clustering object used to determined the initial training dataset.
n_initial_ : int
Number of observations included in the initial training dataset.
selection_strategy_ : function
Method used to calculate the classification uncertainty per iteration.
evaluation_metric_ : scorer
Metric used to estimate the performance of the AL classifier per iteration.
top_score_iter_ : int
Iteration that returns the best found performance over the test dataset.
classifier_ : estimator object
The classifier used in the iterative process.
data_utilization_ : list
Amount of data used at each iteration, in absolute and relative values.
max_iter_ : int
Maximum number of iterations allowed.
increment_ : int
Number of observations to be added to the training set per iteration. Also known
as budget.
test_scores_ : list
Classification performance per iteration over the test set.
References
----------
.. [1] <NAME>., <NAME>., <NAME>. (2021). Increasing the
Effectiveness of Active Learning: Introducing Artificial Data Generation
in Active Learning for Land Use/Land Cover Classification. Remote
Sensing, 13(13), 2619. https://doi.org/10.3390/rs13132619
"""
def __init__(
self,
classifier=None,
generator=None,
use_sample_weight=False,
init_clusterer=None,
init_strategy="random",
selection_strategy="entropy",
param_grid=None,
cv=None,
max_iter=None,
n_initial=0.02,
increment=0.02,
save_classifiers=False,
save_test_scores=True,
auto_load=True,
test_size=None,
evaluation_metric="accuracy",
random_state=None,
):
self.classifier = classifier
self.generator = generator
self.use_sample_weight = use_sample_weight
self.init_clusterer = init_clusterer
self.init_strategy = init_strategy
self.param_grid = param_grid
self.cv = cv
self.selection_strategy = selection_strategy
self.max_iter = max_iter
self.n_initial = n_initial
self.increment = increment
# Used to find the optimal classifier
self.auto_load = auto_load
self.test_size = test_size
self.save_classifiers = save_classifiers
self.save_test_scores = save_test_scores
self.evaluation_metric = evaluation_metric
self.random_state = random_state
def _check(self, X, y):
"""Set up simple initialization parameters to run an AL simulation."""
X, y = check_X_y(X, y)
if self.evaluation_metric is None:
self.evaluation_metric_ = SCORERS["accuracy"]
elif type(self.evaluation_metric) == str:
self.evaluation_metric_ = SCORERS[self.evaluation_metric]
else:
self.evaluation_metric_ = self.evaluation_metric
if self.classifier is None:
self._classifier = RandomForestClassifier(random_state=self.random_state)
else:
self._classifier = clone(self.classifier)
if type(self.selection_strategy) == str:
self.selection_strategy_ = UNCERTAINTY_FUNCTIONS[self.selection_strategy]
else:
self.selection_strategy_ = self.selection_strategy
if type(self.use_sample_weight) != bool:
raise TypeError(
"``use_sample_weight`` must be of type ``bool``. Got"
f" {self.use_sample_weight} instead."
)
if self.increment < 1:
inc_ = int(np.round(self.increment * X.shape[0]))
self.increment_ = inc_ if inc_ >= 1 else 1
else:
self.increment_ = self.increment
self.max_iter_ = (
self.max_iter
if self.max_iter is not None
else int(np.round(X.shape[0] / self.increment_) + 1)
)
if self.save_classifiers or self.save_test_scores:
self.data_utilization_ = []
if self.save_classifiers:
self.classifiers_ = []
if self.save_test_scores:
self.test_scores_ = []
if self.auto_load:
self.classifier_ = None
self._top_score = 0
if self.auto_load or self.save_test_scores:
X, X_test, y, y_test = train_test_split(
X,
y,
test_size=self.test_size,
random_state=self.random_state,
stratify=y,
)
else:
X_test, y_test = (None, None)
if self.n_initial < 1:
n_initial = int(np.round(self.n_initial * X.shape[0]))
self.n_initial_ = n_initial if n_initial >= 2 else 2
else:
self.n_initial_ = self.n_initial
return X, X_test, y, y_test
def _check_cross_validation(self, y):
min_frequency = np.unique(y, return_counts=True)[-1].min()
cv = deepcopy(self.cv)
if hasattr(self.cv, "n_splits"):
cv.n_splits = min(min_frequency, self.cv.n_splits)
elif type(self.cv) == int:
cv = min(min_frequency, self.cv)
return cv
def _get_performance_scores(self):
data_utilization = [i[1] for i in self.data_utilization_]
test_scores = self.test_scores_
return data_utilization, test_scores
def _save_metadata(self, iter_n, classifier, X_test, y_test, selection):
"""Save metadata from a completed iteration."""
# Get score for current iteration
if self.save_test_scores or self.auto_load:
score = self.evaluation_metric_(classifier, X_test, y_test)
# Save classifier
if self.save_classifiers:
self.classifiers_.append(classifier)
# Save test scores
if self.save_test_scores:
self.test_scores_.append(score)
self.data_utilization_.append(
(selection.sum(), selection.sum() / selection.shape[0])
)
# Replace top classifier
if self.auto_load:
if score > self._top_score:
self._top_score = score
self.classifier_ = classifier
self.top_score_iter_ = iter_n
def fit(self, X, y):
"""
Run an Active Learning procedure from training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
Returns
-------
self : ALWrapper
Completed Active Learning procedure
"""
# Original "unlabeled" dataset
X, X_test, y, y_test = self._check(X, y)
selection = np.zeros(shape=(X.shape[0])).astype(bool)
sample_weight = None
# Supervisor - Get data according to passed initialization method
self.init_clusterer_, ids = init_strategy(
X=X,
y=y,
n_initial=self.n_initial_,
clusterer=self.init_clusterer,
init_strategy=self.init_strategy,
selection_strategy=self.selection_strategy_,
random_state=self.random_state,
)
selection[ids] = True
for iter_n in range(self.max_iter_):
# Generator + Chooser (in this case chooser==Predictor)
if self.generator is not None:
generator = clone(self.generator)
chooser = clone(self._classifier)
classifier = Pipeline([("generator", generator), ("chooser", chooser)])
else:
classifier = clone(self._classifier)
# Set up parameter tuning within iterations
if self.param_grid is not None:
cv = self._check_cross_validation(y[selection])
classifier = GridSearchCV(
estimator=classifier,
param_grid=self.param_grid,
scoring=self.evaluation_metric,
cv=cv,
refit=True,
)
# Generate artificial data and train classifier
if self.use_sample_weight:
# Save oversampler name to pass sample weight
ovr_name = (
classifier.steps[-2][0]
if self.param_grid is None
else classifier.estimator.steps[-2][0]
)
classifier.fit(
X[selection],
y[selection],
**{f"{ovr_name}__sample_weight": sample_weight},
)
# Compute the class probabilities of labeled observations
labeled_ids = np.argwhere(selection).squeeze()
probabs_labeled = classifier.predict_proba(X[selection])
probabs_labeled = np.where(
probabs_labeled == 0.0, 1e-10, probabs_labeled
)
else:
classifier.fit(X[selection], y[selection])
# Save metadata from current iteration
self._save_metadata(iter_n, classifier, X_test, y_test, selection)
# Compute the class probabilities of unlabeled observations
unlabeled_ids = np.argwhere(~selection).squeeze()
probabs = classifier.predict_proba(X[~selection])
probabs = np.where(probabs == 0.0, 1e-10, probabs)
# Calculate uncertainty
uncertainty = self.selection_strategy_(probabs)
if self.use_sample_weight:
uncertainty = (
MinMaxScaler().fit_transform(uncertainty.reshape(-1, 1)).squeeze()
)
uncertainty_labeled = (
MinMaxScaler()
.fit_transform(
self.selection_strategy_(probabs_labeled).reshape(-1, 1)
)
.squeeze()
)
# Get data according to passed selection criterion
if self.selection_strategy != "random":
ids = (
unlabeled_ids[np.argsort(uncertainty)[::-1][: self.increment_]]
if unlabeled_ids.ndim >= 1
else unlabeled_ids.flatten()[0]
)
else:
rng = np.random.RandomState(self.random_state)
ids = rng.choice(unlabeled_ids, self.increment_, replace=False)
selection[ids] = True
# Update sample weights for the following iteration
if self.use_sample_weight:
sample_weight = np.zeros(selection.shape)
sample_weight[labeled_ids] = uncertainty_labeled
sample_weight[unlabeled_ids] = uncertainty
sample_weight = sample_weight[selection]
# Corner case: when there is no uncertainty
if np.isnan(sample_weight).all():
sample_weight = np.ones(sample_weight.shape)
# stop if all examples have been included
if selection.all():
break
elif selection.sum() + self.increment_ > y.shape[0]:
self.increment_ = y.shape[0] - selection.sum()
return self
def load_best_classifier(self, X, y):
"""
Loads the best classifier in the ``self.classifiers_`` list.
The best classifier is used in the predict method according to the
performance metric passed.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The test input samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
Returns
-------
self : ALWrapper
Completed Active Learning procedure
"""
scores = []
for classifier in self.classifiers_:
scores.append(self.evaluation_metric_(classifier, X, y))
self.classifier_ = self.classifiers_[np.argmax(scores)]
return self
def predict(self, X):
"""
Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The test input samples.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes, or the predict values.
"""
return self.classifier_.predict(X)
``` |
{
"source": "joaopfonseca/remote_sensing",
"score": 2
} |
#### File: pipelines/classification/full_pipeline_sentinel_HSN.py
```python
import sys
import os
#PROJ_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), '../../'))
#sys.path.append(PROJ_PATH)
#print(PROJ_PATH)
PROJ_PATH = '.'
## data manipulation and transformation
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.decomposition import PCA
## filters
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
## own libraries
from src.preprocess.readers import SentinelProductReader
from src.models.HybridSpectralNet import HybridSpectralNet
from src.preprocess.utils import (
ZScoreNormalization,
pad_X,
get_patches,
get_2Dcoordinates_matrix
)
from src.reporting.reports import reports # make this structure more proper
from src.preprocess.data_selection import KMeans_filtering
from src.reporting.visualize import plot_image
################################################################################
# CONFIGS
################################################################################
## data path configs
DATA_PATH = PROJ_PATH+'/data/sentinel_coimbra/raw/'
PRODUCT_PATH = DATA_PATH+'S2A_MSIL1C_20150725T112046_N0204_R037_T29TNE_20150725T112540.SAFE/'
BANDS_PATH = PRODUCT_PATH+'/GRANULE/L1C_T29TNE_A000463_20150725T112540/IMG_DATA/'
COS_PATH = DATA_PATH+'COS2015-V1-PT16E_Regiao_Coimbra/COS2015-V1-PT16E_Regiao_Coimbra.shx'
INTERIM_PATH = PROJ_PATH+'/data/sentinel_coimbra/interim/'
PROCESSED_PATH = DATA_PATH+'../processed/'
CSV_PATH = PROCESSED_PATH+'picture_data.csv'
## preprocessing configs
read_csv = False # Read saved csv with band values
random_state = 0 # Random state for everything. Generally left unchanged.
debuffer_cos = True # Removes labels of pixels close to the border of each polygon
K = 10 # Number of components for PCA
center_pixel = (4500,4500) # Center for the study area
width_height = 750 # Height and width of study (test) area
## pixel selection configs
use_ps = False
filters = (
('RandomForestClassifier', RandomForestClassifier(random_state=random_state)),
('DecisionTreeClassifier', DecisionTreeClassifier(random_state=random_state)),
('LogisticRegression', LogisticRegression(random_state=random_state)),
('MLPClassifier', MLPClassifier(random_state=random_state))
) # Classifiers used for filtering data
n_splits_ps = 7 # Numb of splits. total filters = len(filters)*n_splits_ps
granularity = 10 # Hyperparam used to compute the number of clusters. More granularity = more clusters
keep_rate = 0.7 # Rough % of observations to keep for each class
## Hybrid Spectral Net configs
n_epochs = 3 # Numb of epochs
n_splits_cnn = 100 # Numb of splits on the training data. This is done to lower memory usage
window_size = 25 # Size of the window to pass to the CNN
output_units = 9 # Number of labels
################################################################################
# DATA READING AND PREPROCESSING
################################################################################
if read_csv:
df = pd.read_csv(CSV_PATH)
else:
print('Reading image...')
coimbrinhas = SentinelProductReader(
bands_path=BANDS_PATH,
labels_shapefile=COS_PATH,
label_col='Megaclasse'
)
coimbrinhas.add_indices(['NDVI', 'NDBI', 'NDMI', 'NDWI'])
coimbrinhas.get_X_array()
coimbrinhas.get_y_array()
print('Selecting study area subset...')
x_center, y_center = center_pixel
margin = int((width_height/2))
x_lowlim = x_center-margin
x_highlim = x_center+margin
y_lowlim = y_center-margin
y_highlim = y_center+margin
coimbrinhas.X_array = coimbrinhas.X_array\
[x_lowlim-margin:x_highlim+margin,
y_lowlim-margin:y_highlim+margin]
coimbrinhas.y_array = coimbrinhas.y_array\
[x_lowlim-margin:x_highlim+margin,
y_lowlim-margin:y_highlim+margin]
df = coimbrinhas.to_pandas()
del coimbrinhas
## Normalize and dimensionality reduction
bands_list = df.drop(columns=['y','x','Megaclasse']).sort_index(axis=1).columns
zscorers = df[bands_list].apply(lambda x: ZScoreNormalization(x, axes=(0))[1])
pca = PCA(n_components=10, random_state=random_state)
norm_pca_vals = pca.fit_transform(df[bands_list].apply(lambda x: zscorers[x.name](x)))
norm_pca_cols = ['comp_'+str(x) for x in range(10)]
df_norm_pca = pd.DataFrame(norm_pca_vals, columns=norm_pca_cols)
df = df.drop(columns=bands_list).join([df[bands_list], df_norm_pca])
## divide data from dataframe into train and testing
margin = int((width_height/2))
df['train_set'] = df.apply(lambda row: (
row['x']<margin or row['x']>=3*margin or row['y']<margin or row['y']>=3*margin
), axis=1)
################################################################################
# PIXEL SELECTION
################################################################################
# TODO
################################################################################
# Hybrid Spectral Net
################################################################################
def pivot_multispectral(df, xy_cols, bands):
rgb = []
for band in bands:
rgb.append(df.pivot(xy_cols[0], xy_cols[1], band).values)
return np.moveaxis(np.array(rgb), 0, -1)#.T.swapaxes(0,1)
## model setup
print('Setting model...')
ConvNet = HybridSpectralNet(input_shape=(window_size, window_size, K), output_units=output_units)
ConvNet.load_weights('best_model.hdf5')
coords = df[['y','x']]
criteria = (coords>coords.min()+int(window_size/2)+1).all(axis=1) & (coords<coords.max()-int(window_size/2)).all(axis=1)
X_coords = df[['y','x']][df['train_set']&criteria].astype(int).values
y_labels = df['Megaclasse'][df['train_set']&criteria].astype(int).values
X_lookup = pivot_multispectral(df, ['y','x'], norm_pca_cols)
print(f'Stratified Splitting: {n_splits_cnn} splits, {int(X_coords.shape[0]/n_splits_cnn)} pixels per split')
skf = StratifiedKFold(n_splits = n_splits_cnn, shuffle=True, random_state=random_state)
for epoch in range(1,n_epochs+1):
i = 0
for _, split_indices in skf.split(X_coords, y_labels):
i+=1
print(f'Epoch {epoch}/{n_epochs}, Split {i}/{n_splits_cnn}')
X_split, y_split = X_coords[split_indices], y_labels[split_indices]
X_patches = get_patches(X_split, X_lookup, window_size)
ConvNet.fit(X_patches, y_split, batch_size=256, epochs=1)
################################################################################
# REPORTING AND IMAGE GENERATION
################################################################################
X_lookup = pivot_multispectral(df, ['y','x'], norm_pca_cols)
coords = df[['y','x']]
criteria = (coords>coords.min()+int(window_size/2)+1).all(axis=1) & (coords<coords.max()-int(window_size/2)).all(axis=1)
X_coords = coords[criteria].astype(int).values
indices = []
y_pre = []
i = 0
skf = StratifiedKFold(n_splits = n_splits_cnn, shuffle=True, random_state=random_state)
for _, split_indices in skf.split(X_coords, np.zeros(X_coords.shape[0])):
i+=1; print(f'Prediction progress: {(i/n_splits_cnn)*100}%')
X_split = X_coords[split_indices]
X_patches = get_patches(X_split, X_lookup, window_size)
indices.append(X_split)
y_pre.append(ConvNet.predict(X_patches))
#df_final = coords.copy()
#y_pred = pd.Series(data=np.concatenate(y_pre), index=np.concatenate(indices), name='y_pred').sort_index()
#df_final = df_final.join(y_pred)
y_pred = pd.DataFrame(
data=np.concatenate([np.expand_dims(np.concatenate(y_pre),1), np.concatenate(indices)], axis=1),
columns=['y_pred', 'y', 'x']
)
df = df.join(y_pred.set_index(['y','x']), on=['y','x'])
df.to_csv(PROCESSED_PATH+'classification_results.csv')
reports(df[~df['train_set']].dropna()['Megaclasse'], df[~df['train_set']].dropna()['y_pred'], {i:i for i in df['Megaclasse'].unique()})[-1]
plt.imshow(df[~df['train_set']].pivot('y','x','y_pred'))
```
#### File: src/models/AutoEncoder.py
```python
import pickle # remove after getting final version of code
import os
import numpy as np
from keras.utils import np_utils
from keras.layers import (
Input,
Conv2D,
Conv3D,
Flatten,
Dense,
Dropout,
Reshape,
BatchNormalization,
Concatenate
)
from keras.models import Model, load_model
from keras.optimizers import Adam, RMSprop
from keras.callbacks import ModelCheckpoint
class ExperimentalHSNAutoEncoder:
"""
Convolutional layers on the encoder part were inspired on the
HybridSpectralNet architecture.
"""
def __init__(self, window_shape, filepath='best_model.hdf5'):
self._decoder(self._encoder((25,25,10)))
self.model = Model(inputs=self.input_layer, outputs=self.decoder_output)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())#, metrics=['accuracy'])
self.model.summary()
abspath = os.path.abspath('.')
self.filepath = os.path.abspath(os.path.join(abspath,filepath))
checkpoint = ModelCheckpoint(self.filepath, monitor='accuracy', verbose=1, save_best_only=True, mode='max')
self.callbacks_list = [checkpoint]
def _encoder(self, window_shape):
"""input_shape: (height, width, num_bands)"""
self.height, self.width, self.num_bands = window_shape
## input layer
self.input_layer = Input(
(
self.height,
self.width,
self.num_bands,
1
)
)
########################################################################
# convolutional layers
########################################################################
conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 2), activation='relu')(self.input_layer) # 23, 23, 9, 8
conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 5), activation='relu')(conv_layer1) # 21, 21, 5, 16
conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu')(conv_layer2) # 19, 19, 3, 32
conv3d_shape = conv_layer3._keras_shape
conv_layer3 = Reshape((conv3d_shape[1],conv3d_shape[2],conv3d_shape[3]*conv3d_shape[4]))(conv_layer3) # 19, 19, 96
conv2 = Conv2D(
filters=64,
kernel_size=(4,4),
activation='relu'
)(conv_layer3) # 16 x 16 x 64
conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
conv2 = BatchNormalization()(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) #8 x 8 x 64
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2) #8 x 8 x 128 (small and thick)
conv3 = BatchNormalization()(conv3)
conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
conv3 = BatchNormalization()(conv3)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv3) #8 x 8 x 256 (small and thick)
conv4 = BatchNormalization()(conv4)
conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)
self.encoder_output = BatchNormalization()(conv4)
return self.encoder_output
def _decoder(self, encoder_output):
"""
"""
conv5 = Conv2D(128, (3, 3), activation='relu', padding='same')(encoder_output) #8 x 8 x 128
conv5 = BatchNormalization()(conv5)
conv5 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv5)
conv5 = BatchNormalization()(conv5)
conv6 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv5) #8 x 8 x 64
conv6 = BatchNormalization()(conv6)
conv6 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv6)
conv6 = BatchNormalization()(conv6)
up1 = UpSampling2D((3,3))(conv6) # 24 x 24 x 64
conv7 = Conv2D(96, (6, 6), activation='relu')(up1) # 19 x 19 x 96
conv7 = BatchNormalization()(conv7)
conv7 = Conv2D(96, (6, 6), activation='relu', padding='same')(conv7)
conv7 = BatchNormalization()(conv7)
up2 = UpSampling2D((2,2))(conv7)
up2shp = up2._keras_shape
conv7 = Reshape((up2shp[1], up2shp[2], 3, int(up2shp[3]/3)))(up2) # 38, 38, 3, 32
conv8 = Conv3D(16, kernel_size=(18,18,1), activation='relu')(conv7)
conv8 = BatchNormalization()(conv8)
conv8 = Conv3D(16, kernel_size=(18,18,1), activation='relu', padding='same')(conv8)
conv8 = BatchNormalization()(conv8)
up3 = UpSampling3D((2,2,4))(conv8)
conv9 = Conv3D(8, kernel_size=(18,18,3), activation='relu')(up3)
conv9 = BatchNormalization()(conv9)
conv9 = Conv3D(8, kernel_size=(3,3,3), activation='relu', padding='same')(conv9)
conv9 = BatchNormalization()(conv9)
conv10 = Conv3D(1, kernel_size=(3,3,2), activation='relu', padding='same')(conv9)
self.decoder_output = BatchNormalization()(conv10)
return self.decoder_output
def load_weights(self, filepath):
self.filepath = filepath
self.model = load_model(filepath)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())
def fit(self, X, y, batch_size=256, epochs=100):
# transform matrices to correct format
self.num_bands = X.shape[-1]
X = X.reshape(
-1,
self.height,
self.width,
self.num_bands,
1
)
y = y.reshape(
-1,
self.height,
self.width,
self.num_bands,
1
)
self.history = self.model.fit(
x=X,
y=y,
batch_size=batch_size,
epochs=epochs,
callbacks=self.callbacks_list
)
def predict(self, X, filepath=None):
# assert: self.filepath or filepath must exist
if filepath:
self.load_weights(filepath)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())
self.num_bands = X.shape[-1]
X = X.reshape(
-1,
self.height,
self.width,
self.num_bands,
1
)
y_pred = np.argmax(self.model.predict(X), axis=1)
return y_pred
class MLPAutoEncoder:
"""
"""
def __init__(self, num_bands, filepath='best_model.hdf5'):
self.num_bands = num_bands
self._decoder(self._encoder(num_bands))
self.model = Model(inputs=self.input_layer, outputs=self.decoder_output)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())#, metrics=['accuracy'])
self.model.summary()
abspath = os.path.abspath('.')
self.filepath = os.path.abspath(os.path.join(abspath,filepath))
checkpoint = ModelCheckpoint(self.filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
self.callbacks_list = [checkpoint]
def _encoder(self, num_bands):
"""
"""
self.input_layer = Input((num_bands,))
layer1 = Dense(32, input_shape=self.input_layer._keras_shape, activation='relu')(self.input_layer)
layer1 = BatchNormalization()(layer1)
layer2 = Dense(16, activation='relu')(layer1)
layer2 = BatchNormalization()(layer2)
layer3 = Dense(4, activation='relu')(layer2)
self.encoder_output = BatchNormalization()(layer3)
return self.encoder_output
def _decoder(self, encoder_output):
"""
"""
layer4 = Dense(16, input_shape=self.encoder_output._keras_shape, activation='relu')(encoder_output)
layer4 = BatchNormalization()(layer4)
layer5 = Dense(32, activation='relu')(layer4)
layer5 = BatchNormalization()(layer5)
self.decoder_output = Dense(10, activation=None)(layer5)
return self.decoder_output
def load_weights(self, filepath):
self.filepath = filepath
self.model = load_model(filepath)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())
def fit(self, X, y, batch_size=256, epochs=100):
# transform matrices to correct format
self.num_bands = X.shape[-1]
X = X.reshape(-1, self.num_bands,)
y = y.reshape(-1, self.num_bands,)
self.history = self.model.fit(
x=X,
y=y,
batch_size=batch_size,
epochs=epochs,
callbacks=self.callbacks_list
)
def predict(self, X, filepath=None):
# assert: self.filepath or filepath must exist
if filepath:
self.load_weights(filepath)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())
#else:
# self.load_model(self.filepath)
#self.model.compile(loss='categorical_crossentropy', optimizer=self.adam, metrics=['accuracy'])
X_pred = self.model.predict(X)
mse = ((X_pred-X)**2).mean(axis=1)
return mse
class MLPEncoderClassifier:
def __init__(self, encoder_list, num_targets, filepath='best_model.hdf5'):
self.num_targets = num_targets
self.num_encoders = len(encoder_list)
MergedEncoders = Concatenate()([model.encoder_output for model in encoder_list])
self._MLPClassifier(MergedEncoders)
self.model = Model(inputs=[model.input_layer for model in encoder_list], outputs=self.output_layer)
self.adam = Adam(lr=0.001, decay=1e-06)
self.model.compile(loss='categorical_crossentropy', optimizer=self.adam, metrics=['accuracy'])
self.model.summary()
abspath = os.path.abspath('.')
self.filepath = os.path.abspath(os.path.join(abspath,filepath))
checkpoint = ModelCheckpoint(self.filepath, monitor='accuracy', verbose=1, save_best_only=True, mode='max')
self.callbacks_list = [checkpoint]
def _MLPClassifier(self, merged_encoders_outputs):
layer1 = BatchNormalization()(merged_encoders_outputs)
layer1 = Dense(32, activation='relu')(layer1)
layer1 = BatchNormalization()(layer1)
layer2 = Dense(16, activation='relu')(layer1)
layer2 = BatchNormalization()(layer2)
self.output_layer = Dense(self.num_targets, activation='sigmoid')(layer2)
return self.output_layer
def fit(self, X, y, batch_size=256, epochs=100):
# transform matrices to correct format
self.num_bands = X.shape[-1]
X = X.reshape(-1, self.num_bands,)
y = np_utils.to_categorical(y, num_classes=self.num_targets)
self.history = self.model.fit(
x=[X for i in range(self.num_encoders)],
y=y,
batch_size=batch_size,
epochs=epochs,
callbacks=self.callbacks_list
)
def predict(self, X, filepath=None):
# assert: self.filepath or filepath must exist
if filepath:
self.load_weights(filepath)
self.model.compile(loss='mean_squared_error', optimizer=RMSprop())
#else:
# self.load_model(self.filepath)
#self.model.compile(loss='categorical_crossentropy', optimizer=self.adam, metrics=['accuracy'])
y_pred = np.argmax(self.model.predict([X for i in range(self.num_encoders)]), axis=1)
return y_pred
```
#### File: src/models/denoiser.py
```python
import numpy as np
from keras.layers import (
Input,
Conv2D,
BatchNormalization,
Flatten,
Dense,
Reshape,
Conv2DTranspose,
Activation,
ZeroPadding2D
)
from keras.models import Model, load_model
from keras.backend import int_shape
from keras.callbacks import ModelCheckpoint
class DenoiserAE:
def __init__(self, input_shape, kernel_size=3, latent_num_distributions=10, filepath='best_model.hdf5'):
"""
input_shape: (height, width, num_classes)
latent_num_distributions: number of distributions per class in the latent space
"""
self.height, self.width, self.num_classes = input_shape
self.kernel_size = kernel_size
# maybe doing it this way doesn't make sense, I'll definitely need to review this part
self.latent_num_distributions = latent_num_distributions
# Encoder/Decoder number of CNN layers and filters per layer
self.layer_filters = [32, 64]
## input layer
self.input_layer = Input(
(
self.height,
self.width,
self.num_classes
)
)
## encoder
encoder = self._encoder(self.input_layer)
## latent space
self.latent_input = Input(shape=(self.latent_num_distributions*self.num_classes,), name='decoder_input')
## decoder
decoder = self._decoder(self.latent_input)
# Autoencoder = Encoder + Decoder
# Instantiate Autoencoder Model
self.model = Model(self.input_layer, decoder(encoder(self.input_layer)), name='autoencoder')
self.model.summary()
self.model.compile(loss='mse', optimizer='adam')
abspath = os.path.abspath('.')
self.filepath = os.path.abspath(os.path.join(abspath,filepath))
checkpoint = ModelCheckpoint(self.filepath, monitor='accuracy', verbose=1, save_best_only=True, mode='max')
self.callbacks_list = [checkpoint]
def _encoder(self, input_layer):
x = input_layer
for filters in self.layer_filters:
x = Conv2D(filters=filters,
kernel_size=self.kernel_size,
strides=2,
activation='relu',
padding='same')(x)
x = BatchNormalization()(x)
# Shape info needed to build Decoder Model
self._shape = int_shape(x)
# Generate the latent vector
x = Flatten()(x)
latent = Dense(self.latent_num_distributions*self.num_classes, name='latent_vector')(x)
# Instantiate Encoder Model
encoder = Model(input_layer, latent, name='encoder')
encoder.summary()
return encoder
def _decoder(self, latent_inputs):
x = Dense(self._shape[1] * self._shape[2] * self._shape[3])(latent_inputs)
x = Reshape((self._shape[1], self._shape[2], self._shape[3]))(x)
# Stack of Transposed Conv2D blocks
# Notes:
# 1) Use Batch Normalization before ReLU on deep networks
# 2) Use UpSampling2D as alternative to strides>1
# - faster but not as good as strides>1
for filters in self.layer_filters[::-1]:
x = Conv2DTranspose(filters=filters,
kernel_size=self.kernel_size,
strides=2,
activation='relu',
padding='same')(x)
x = Conv2DTranspose(filters=self.num_classes,
kernel_size=self.kernel_size,
padding='same')(x)
outputs = Activation('sigmoid', name='decoder_output')(x)
_, self._out_height, self._out_width, _ = int_shape(outputs)
# Instantiate Decoder Model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
return decoder
def load_weights(self, filepath):
self.filepath = filepath
self.model = load_model(filepath)
self.model.compile(loss='mse', optimizer='adam')
def fit(self, X, y, batch_size=256, epochs=100):
# transform matrices to correct format
X = X.reshape(-1, self.height, self.width)
y = y.reshape(-1, self.height, self.width)
X = np.array([np.moveaxis(np.array([arr==i for i in range(self.num_classes)]),0,-1) for arr in X])
y = np.array([np.moveaxis(np.array([arr==i for i in range(self.num_classes)]),0,-1) for arr in y])
y = np.pad(
y,
((0,0), (self._out_height-self.height, 0), (self._out_width-self.width, 0), (0,0)),
)
self.history = self.model.fit(
x=X,
y=y,
batch_size=batch_size,
epochs=epochs,
callbacks=self.callbacks_list
)
def predict(self, X, filepath=None):
if filepath:
self.load_weights(filepath)
X = X.reshape(
-1,
self.height,
self.width
)
y_pred = np.argmax(self.model.predict(X), axis=1)
return y_pred
```
#### File: src/reporting/visualize.py
```python
import matplotlib.pyplot as plt
import numpy as np
def _plot_image(X, figsize=(20, 20), dpi=80, *args):
if X.ndim==3 and (X>1.0).any():
X = np.clip(X, 0, 3000)/3000
plt.imshow(
X,
*args
)
plt.axis('off')
def plot_image(arrays, num_rows=1, figsize=(40, 20), dpi=80, *args):
assert type(arrays) in [np.ndarray, list], '\'arrays\' must be either a list of arrays, or a single 2-dimensional array'
if type(arrays)==np.ndarray:
plt.figure(
figsize=figsize,
dpi=dpi
)
_plot_image(arrays.astype(int), figsize=figsize, dpi=dpi, *args)
else:
num_arrays = len(arrays)
plt.figure(
figsize=figsize,
dpi=dpi
)
for i in range(num_arrays):
plt.subplot(num_rows, int(np.ceil(num_arrays/num_rows)), i+1)
_plot_image(arrays[i].astype(float))
```
#### File: scripts/2_data_preprocessing/feature_selection_experiment.py
```python
import pandas as pd
import numpy as np
# utilities
import sys
import os
import pickle
# utilities v2
from src.experiment.utils import check_pipelines, check_fit_params
from rlearn.model_selection import ModelSearchCV
from rlearn.tools.reporting import report_model_search_results
from sklearn.model_selection import StratifiedKFold, train_test_split
# data normalization
from sklearn.preprocessing import StandardScaler
# feature selection
from sklearn.feature_selection import SelectFromModel
from src.preprocess.feature_selection import (
CorrelationBasedFeatureSelection,
PermutationRF,
)
from src.preprocess.relieff import ReliefF
from src.preprocess.utils import SelectFeaturesFromList
# classifiers
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# scorers
from sklearn.metrics import SCORERS, make_scorer
from imblearn.metrics import geometric_mean_score
## configs
DATA_PATH = 'T29SNC/data/preprocessed/2019_02_RS_0_n_features_320.csv'
RESULTS_PATH = 'T29SNC/results/'
random_state=0
## set up experiment objects
feature_selection = [
('NONE', None, {}),
('CorrelationBased', CorrelationBasedFeatureSelection(), {
'corr_type':['pearson'], 'threshold':[.7, .8, .9]
}
),
('Permutation', PermutationRF(), {'n_estimators': [100], 'max_features': [None, 30, 50, 70]}),
('RFGini', SelectFromModel(
estimator=RandomForestClassifier(n_estimators=100, criterion='gini', random_state=0),
prefit=False), {
'max_features': [15, 30, 40, 50, 60, 70]
}
),
('RFEntropy', SelectFromModel(
estimator=RandomForestClassifier(n_estimators=100, criterion='entropy', random_state=0),
prefit=False), {
'max_features': [15, 30, 40, 50, 60, 70]
}
),
('ReliefF', ReliefF(), {'n_neighbors': [40, 100], 'n_features_to_keep': [30, 50, 70]})
]
classifiers = [
('RFC', RandomForestClassifier(n_estimators=100), {})
]
## setup scorers
def geometric_mean_macro(X, y):
return geometric_mean_score(X, y, average='macro')
SCORERS['geometric_mean_macro'] = make_scorer(geometric_mean_macro)
scorers = ['accuracy', 'f1_macro', 'geometric_mean_macro']
## read data, sample, normalize and split among feature types
# read and drop missing values (it's not our goal to study imputation methods)
df = pd.read_csv(DATA_PATH).dropna()
# split by feature type
df_meta = df[['x','y','Megaclasse']]
df_bands = df.drop(columns=df_meta.columns)
# normalize
znorm = StandardScaler()
df_bands = pd.DataFrame(znorm.fit_transform(df_bands.values), columns=df_bands.columns, index=df_bands.index)
X = df_bands.values
y = df_meta['Megaclasse'].values
# sample data
X, _, y, _ = train_test_split(X, y, train_size=.1, shuffle=True, stratify=y, random_state=random_state)
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=0)
## Experiment 1 (feature selection)
pipelines_feature, param_grid_feature = check_pipelines(
[feature_selection, classifiers],
random_state=0,
n_runs=1
)
model_search_feature = ModelSearchCV(
pipelines_feature,
param_grid_feature,
scoring=scorers,
refit='accuracy',
n_jobs=-1,
cv=cv,
verbose=1
)
model_search_feature.fit(X,y)
df_results_feature = report_model_search_results(model_search_feature)\
.sort_values('mean_test_accuracy', ascending=False)
df_results_feature.to_csv('results_feature_selection.csv')
pickle.dump(model_search_feature, open('model_search_feature_selection.pkl','wb'))
best_feature_selection_model = model_search_feature.best_estimator_.named_steps['ReliefF']
features = pd.DataFrame(
np.array(
[df_bands.columns,best_feature_selection_model.top_features]
).T,
columns=['feature', 'rank']).sort_values('rank')
features.to_csv(RESULTS_PATH+'feature_rankings.csv', index=False)
## Select optimal number of features
optimal_num_features = [
(
'DimReduct',
SelectFeaturesFromList(feature_rankings=best_feature_selection_model.top_features),
{'n_features': list(range(1, len(best_feature_selection_model.top_features)+1))}
)
]
pipelines_dimreduct, param_grid_dimreduct = check_pipelines(
[optimal_num_features, classifiers],
random_state=0,
n_runs=1
)
model_search_dimreduct = ModelSearchCV(
pipelines_dimreduct,
param_grid_dimreduct,
scoring=scorers,
refit='accuracy',
n_jobs=-1,
cv=cv,
verbose=1
)
model_search_dimreduct.fit(X,y)
df_results_dimreduct = report_model_search_results(model_search_dimreduct)\
.sort_values('mean_test_accuracy', ascending=False)
df_results_dimreduct.to_csv(RESULTS_PATH+'results_dimreduct.csv')
pickle.dump(model_search_dimreduct, open(RESULTS_PATH+'model_search_dimreduct.pkl','wb'))
``` |
{
"source": "joaopfonseca/research-learn",
"score": 2
} |
#### File: model_selection/tests/test_search.py
```python
import pytest
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.datasets import make_regression, make_classification
from rlearn.model_selection.search import (
MultiEstimatorMixin,
MultiRegressor,
MultiClassifier,
ModelSearchCV,
)
RND_SEED = 0
X_reg, y_reg = make_regression(random_state=RND_SEED)
X_clf, y_clf = make_classification(random_state=RND_SEED)
REGRESSORS = [
('lr', LinearRegression()),
('dtr', DecisionTreeRegressor()),
('pip', make_pipeline(MinMaxScaler(), LinearRegression())),
]
CLASSIFIERS = [
('lr', LogisticRegression(solver='lbfgs')),
('dtc', DecisionTreeClassifier()),
('pip', make_pipeline(MinMaxScaler(), LogisticRegression(solver='lbfgs'))),
]
REGRESSORS_PARAM_GRIDS = [
{'dtr__max_depth': [3, 5], 'dtr__random_state': [RND_SEED, RND_SEED + 3]},
{
'pip__minmaxscaler__feature_range': [(0, 1), (0, 10)],
'pip__linearregression__normalize': [True, False],
},
]
CLASSIFIERS_PARAM_GRIDS = {
'dtc__max_depth': [3, 5],
'dtc__criterion': ['entropy', 'gini'],
'dtc__random_state': [RND_SEED, RND_SEED + 5],
}
@pytest.mark.parametrize('estimators', [None, [], DecisionTreeClassifier()])
def test_multi_estimator_wrong_type(estimators):
"""Test the initialization of multi-estimator class with wrong inputs."""
with pytest.raises(TypeError):
MultiEstimatorMixin(estimators, 'est').fit(X_clf, y_clf)
def test_multi_estimator_unique_names():
"""Test the initialization of multi-estimator class with duplicate names."""
estimators = [('est', LinearRegression()), ('est', DecisionTreeRegressor())]
with pytest.raises(ValueError):
MultiEstimatorMixin(estimators, 'estimator').fit(X_clf, y_clf)
def test_multi_estimator_wrong_name():
"""Test the initialization of multi-estimator class with wrong estimator name."""
estimators = [('lr', LinearRegression()), ('dtr', DecisionTreeRegressor())]
with pytest.raises(ValueError):
MultiEstimatorMixin(estimators, 'est').fit(X_clf, y_clf)
def test_multi_estimator_params_methods():
"""Test the set and get parameters methods."""
# Get parameters
est_name = 'dtr'
multi_estimator = MultiEstimatorMixin(REGRESSORS, est_name)
params = multi_estimator.get_params(deep=False)
assert params['est_name'] == 'dtr'
# Set parameters
est_name = 'reg'
multi_estimator.set_params(est_name='reg')
params = multi_estimator.get_params(deep=False)
assert params['est_name'] == est_name
@pytest.mark.parametrize(
'estimators,est_name,X,y',
[
(REGRESSORS, 'lr', X_reg, y_reg),
(REGRESSORS, 'dtr', X_reg, y_reg),
(CLASSIFIERS, 'lr', X_clf, y_clf),
(CLASSIFIERS, 'dtc', X_clf, y_clf),
],
)
def test_multi_estimator_fitting(estimators, est_name, X, y):
"""Test multi-estimator fitting process."""
multi_estimator = MultiEstimatorMixin(estimators, est_name)
multi_estimator.fit(X, y)
fitted_estimator = dict(estimators)[est_name]
assert isinstance(fitted_estimator, multi_estimator.estimator_.__class__)
assert fitted_estimator.get_params() == multi_estimator.estimator_.get_params()
@pytest.mark.parametrize(
'estimators,X,y,est_name',
[(REGRESSORS, X_reg, y_reg, 'reg'), (CLASSIFIERS, X_clf, y_clf, None)],
)
def test_multi_estimator_fitting_error(estimators, X, y, est_name):
"""Test parametrized estimators fitting error."""
with pytest.raises(ValueError):
MultiEstimatorMixin(estimators, est_name).fit(X, y)
def test_multi_classifier_type():
"""Test multi-classifier type of estimator attribute."""
multi_clf = MultiClassifier(CLASSIFIERS)
assert multi_clf._estimator_type == 'classifier'
def test_multi_regressor_type():
"""Test multi-regressor type of estimator attribute."""
multi_reg = MultiRegressor(REGRESSORS)
assert multi_reg._estimator_type == 'regressor'
@pytest.mark.parametrize(
'estimators,param_grids,estimator_type',
[
(REGRESSORS, REGRESSORS_PARAM_GRIDS, 'regressor'),
(CLASSIFIERS, CLASSIFIERS_PARAM_GRIDS, 'classifier'),
],
)
def test_model_search_cv(estimators, param_grids, estimator_type):
"""Test model search cv."""
est_names, *_ = zip(*estimators)
mscv = ModelSearchCV(estimators, param_grids)
if estimator_type == 'regressor':
mscv.fit(X_reg, y_reg)
elif estimator_type == 'classifier':
mscv.fit(X_clf, y_clf)
assert set(est_names) == set(mscv.cv_results_['models'])
``` |
{
"source": "joaopfonseca/research",
"score": 2
} |
#### File: mlresearch/datasets/base.py
```python
from os.path import join
from urllib.parse import urljoin
from sqlite3 import connect
from rich.progress import track
import pandas as pd
UCI_URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/"
KEEL_URL = "http://sci2s.ugr.es/keel/keel-dataset/datasets/imbalanced/"
GIC_URL = "http://www.ehu.eus/ccwintco/uploads/"
OPENML_URL = "https://www.openml.org/data/get_csv/"
FETCH_URLS = {
"breast_tissue": urljoin(UCI_URL, "00192/BreastTissue.xls"),
"ecoli": urljoin(UCI_URL, "ecoli/ecoli.data"),
"eucalyptus": urljoin(OPENML_URL, "3625/dataset_194_eucalyptus.arff"),
"glass": urljoin(UCI_URL, "glass/glass.data"),
"haberman": urljoin(UCI_URL, "haberman/haberman.data"),
"heart": urljoin(UCI_URL, "statlog/heart/heart.dat"),
"iris": urljoin(UCI_URL, "iris/bezdekIris.data"),
"libras": urljoin(UCI_URL, "libras/movement_libras.data"),
"liver": urljoin(UCI_URL, "liver-disorders/bupa.data"),
"pima": "https://gist.githubusercontent.com/ktisha/c21e73a1bd1700294ef790c56c8aec1f"
"/raw/819b69b5736821ccee93d05b51de0510bea00294/pima-indians-diabetes.csv",
"vehicle": urljoin(UCI_URL, "statlog/vehicle/"),
"wine": urljoin(UCI_URL, "wine/wine.data"),
"new_thyroid_1": urljoin(
urljoin(KEEL_URL, "imb_IRlowerThan9/"), "new-thyroid1.zip"
),
"new_thyroid_2": urljoin(
urljoin(KEEL_URL, "imb_IRlowerThan9/"), "new-thyroid2.zip"
),
"cleveland": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p2/"), "cleveland-0_vs_4.zip"
),
"led": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p2/"), "led7digit-0-2-4-5-6-7-8-9_vs_1.zip"
),
"page_blocks_1_3": urljoin(
urljoin(KEEL_URL, "imb_IRhigherThan9p1/"), "page-blocks-1-3_vs_4.zip"
),
"vowel": urljoin(urljoin(KEEL_URL, "imb_IRhigherThan9p1/"), "vowel0.zip"),
"yeast_1": urljoin(urljoin(KEEL_URL, "imb_IRlowerThan9/"), "yeast1.zip"),
"banknote_authentication": urljoin(
UCI_URL, "00267/data_banknote_authentication.txt"
),
"arcene": urljoin(UCI_URL, "arcene/"),
"audit": urljoin(UCI_URL, "00475/audit_data.zip"),
"spambase": urljoin(UCI_URL, "spambase/spambase.data"),
"parkinsons": urljoin(UCI_URL, "parkinsons/parkinsons.data"),
"ionosphere": urljoin(UCI_URL, "ionosphere/ionosphere.data"),
"breast_cancer": urljoin(UCI_URL, "breast-cancer-wisconsin/wdbc.data"),
"adult": urljoin(UCI_URL, "adult/adult.data"),
"abalone": urljoin(UCI_URL, "abalone/abalone.data"),
"acute": urljoin(UCI_URL, "acute/diagnosis.data"),
"annealing": urljoin(UCI_URL, "annealing/anneal.data"),
"census": urljoin(UCI_URL, "census-income-mld/census-income.data.gz"),
"contraceptive": urljoin(UCI_URL, "cmc/cmc.data"),
"covertype": urljoin(UCI_URL, "covtype/covtype.data.gz"),
"credit_approval": urljoin(UCI_URL, "credit-screening/crx.data"),
"dermatology": urljoin(UCI_URL, "dermatology/dermatology.data"),
"echocardiogram": urljoin(UCI_URL, "echocardiogram/echocardiogram.data"),
"flags": urljoin(UCI_URL, "flags/flag.data"),
"heart_disease": [
urljoin(UCI_URL, "heart-disease/processed.cleveland.data"),
urljoin(UCI_URL, "heart-disease/processed.hungarian.data"),
urljoin(UCI_URL, "heart-disease/processed.switzerland.data"),
urljoin(UCI_URL, "heart-disease/processed.va.data"),
],
"hepatitis": urljoin(UCI_URL, "hepatitis/hepatitis.data"),
"german_credit": urljoin(UCI_URL, "statlog/german/german.data"),
"thyroid": urljoin(UCI_URL, "thyroid-disease/thyroid0387.data"),
"first_order_theorem": urljoin(OPENML_URL, "1587932/phpPbCMyg"),
"gas_drift": urljoin(OPENML_URL, "1588715/phpbL6t4U"),
"autouniv_au7": urljoin(OPENML_URL, "1593748/phpmRPvKy"),
"autouniv_au4": urljoin(OPENML_URL, "1593744/phpiubDlf"),
"mice_protein": urljoin(OPENML_URL, "17928620/phpchCuL5"),
"steel_plates": urljoin(OPENML_URL, "18151921/php5s7Ep8"),
"cardiotocography": urljoin(OPENML_URL, "1593756/phpW0AXSQ"),
"waveform": urljoin(OPENML_URL, "60/dataset_60_waveform-5000.arff"),
"volkert": urljoin(OPENML_URL, "19335689/file1c556e3db171.arff"),
"asp_potassco": urljoin(OPENML_URL, "21377447/file18547f421393.arff"),
"wine_quality": urljoin(OPENML_URL, "4965268/wine-quality-red.arff"),
"mfeat_zernike": urljoin(OPENML_URL, "22/dataset_22_mfeat-zernike.arff"),
"gesture_segmentation": urljoin(OPENML_URL, "1798765/phpYLeydd"),
"texture": urljoin(OPENML_URL, "4535764/phpBDgUyY"),
"usps": urljoin(OPENML_URL, "19329737/usps.arff"),
"japanese_vowels": urljoin(OPENML_URL, "52415/JapaneseVowels.arff"),
"pendigits": urljoin(OPENML_URL, "32/dataset_32_pendigits.arff"),
"image_segmentation": urljoin(OPENML_URL, "18151937/phpyM5ND4"),
"baseball": urljoin(OPENML_URL, "3622/dataset_189_baseball.arff"),
"indian_pines": [
urljoin(GIC_URL, "2/22/Indian_pines.mat"),
urljoin(GIC_URL, "c/c4/Indian_pines_gt.mat"),
],
"salinas": [
urljoin(GIC_URL, "f/f1/Salinas.mat"),
urljoin(GIC_URL, "f/fa/Salinas_gt.mat"),
],
"salinas_a": [
urljoin(GIC_URL, "d/df/SalinasA.mat"),
urljoin(GIC_URL, "a/aa/SalinasA_gt.mat"),
],
"pavia_centre": [
urljoin(GIC_URL, "e/e3/Pavia.mat"),
urljoin(GIC_URL, "5/53/Pavia_gt.mat"),
],
"pavia_university": [
urljoin(GIC_URL, "e/ee/PaviaU.mat"),
urljoin(GIC_URL, "5/50/PaviaU_gt.mat"),
],
"kennedy_space_center": [
urljoin(GIC_URL, "2/26/KSC.mat"),
urljoin(GIC_URL, "a/a6/KSC_gt.mat"),
],
"botswana": [
urljoin(GIC_URL, "7/72/Botswana.mat"),
urljoin(GIC_URL, "5/58/Botswana_gt.mat"),
],
}
RANDOM_STATE = 0
class Datasets:
"""Base class to download and save datasets."""
def __init__(self, names="all"):
self.names = names
@staticmethod
def _modify_columns(data):
"""Rename and reorder columns of dataframe."""
X, y = data.drop(columns="target"), data.target
X.columns = range(len(X.columns))
return pd.concat([X, y], axis=1)
def download(self):
"""Download the datasets."""
if self.names == "all":
func_names = [func_name for func_name in dir(self) if "fetch_" in func_name]
else:
func_names = [
f"fetch_{name}".lower().replace(" ", "_") for name in self.names
]
self.content_ = []
for func_name in track(func_names, description="Datasets"):
name = func_name.replace("fetch_", "").upper().replace("_", " ")
fetch_data = getattr(self, func_name)
data = self._modify_columns(fetch_data())
self.content_.append((name, data))
return self
def save(self, path, db_name):
"""Save datasets."""
with connect(join(path, f"{db_name}.db")) as connection:
for name, data in self.content_:
data.to_sql(name, connection, index=False, if_exists="replace")
```
#### File: mlresearch/utils/_visualization.py
```python
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex, Normalize
from matplotlib.cm import ScalarMappable
def load_plt_sns_configs(font_size=8):
"""
Load LaTeX style configurations for Matplotlib/Seaborn
Visualizations.
"""
sns.set_style("whitegrid")
tex_fonts = {
# Use LaTeX to write all text
"text.usetex": True,
"font.family": "serif",
# Use 10pt font in plots, to match 10pt font in document
"axes.labelsize": (10 / 8) * font_size,
"font.size": (10 / 8) * font_size,
# Make the legend/label fonts a little smaller
"legend.fontsize": font_size,
"xtick.labelsize": font_size,
"ytick.labelsize": font_size,
# Subplots size/shape
"figure.subplot.left": 0.098,
"figure.subplot.right": 0.938,
"figure.subplot.bottom": 0.12,
"figure.subplot.top": 0.944,
"figure.subplot.wspace": 0.071,
"figure.subplot.hspace": 0.2,
}
plt.rcParams.update(tex_fonts)
def val_to_color(col, cmap="RdYlBu_r"):
"""
Converts a column of values to hex-type colors.
Parameters
----------
col : array-like of shape (n_samples,)
Values to convert to hex-type color code
cmap : str or `~matplotlib.colors.Colormap`
The colormap used to map normalized data values to RGBA colors
Returns
-------
colors : array-like of shape (n_samples,)
Array with hex values as string type.
"""
norm = Normalize(vmin=col.min(), vmax=col.max(), clip=True)
mapper = ScalarMappable(norm=norm, cmap=cmap)
rgba = mapper.to_rgba(col)
return np.apply_along_axis(rgb2hex, 1, rgba)
```
#### File: 2019-lucas/scripts/analysis.py
```python
import sys
from os.path import join, dirname
from collections import Counter, OrderedDict
from scipy.stats import wilcoxon
import numpy as np
import pandas as pd
from rlearn.tools import (
combine_results,
select_results,
calculate_wide_optimal,
calculate_ranking,
calculate_mean_sem_perc_diff_scores,
)
from utils import (
sort_tbl,
generate_paths,
load_datasets,
make_bold,
generate_pvalues_tbl,
SCORERS,
)
LABELS_MAPPING = {"A": 1, "B": 2, "C": 0, "D": 3, "E": 4, "F": 5, "G": 6, "H": 7}
RESULTS_NAMES = ("none", "ros", "smote", "bsmote", "adasyn", "gsmote")
OVRS_NAMES = ("NONE", "ROS", "SMOTE", "B-SMOTE", "ADASYN", "G-SMOTE")
CLFS_NAMES = ("LR", "KNN", "DT", "GBC", "RF")
METRICS_MAPPING = OrderedDict(
[
("accuracy", "Accuracy"),
("f1_macro", "F-score"),
("geometric_mean_score_macro", "G-mean"),
]
)
BASELINE_OVRS = ("NONE", "ROS", "SMOTE")
MAIN_RESULTS_NAMES = (
"dataset_description",
"wide_optimal",
"ranking",
"perc_diff_scores",
"wilcoxon_results",
)
ALPHA = 0.01
def describe_dataset(dataset):
"""Generates dataframe with dataset description."""
name, (X, y) = dataset
counts = Counter(y)
description = [
["Dataset", name],
["Features", X.shape[-1] - 1],
["Instances", X.shape[0]],
["Instances of class C", counts[LABELS_MAPPING["C"]]],
["Instances of class H", counts[LABELS_MAPPING["H"]]],
["IR of class H", counts[LABELS_MAPPING["C"]] / counts[LABELS_MAPPING["H"]]],
]
return pd.DataFrame(description)
def generate_main_results(data_path, results_path):
"""Generate the main results of the experiment."""
# Load dataset
dataset = load_datasets(data_dir=data_path)[0]
# Load results
results = []
for name in RESULTS_NAMES:
file_path = join(results_path, f"{name}.pkl")
results.append(pd.read_pickle(file_path))
# Combine and select results
results = combine_results(*results)
results = select_results(
results, oversamplers_names=OVRS_NAMES, classifiers_names=CLFS_NAMES
)
# Extract metrics names
metrics_names, *_ = zip(*METRICS_MAPPING.items())
# Dataset description
dataset_description = describe_dataset(dataset)
# Scores
wide_optimal = calculate_wide_optimal(results).drop(columns="Dataset")
# Ranking
ranking = calculate_ranking(results).drop(columns="Dataset")
ranking.iloc[:, 2:] = ranking.iloc[:, 2:].astype(int)
# Percentage difference
perc_diff_scores = []
for oversampler in BASELINE_OVRS:
perc_diff_scores_ovs = calculate_mean_sem_perc_diff_scores(
results, [oversampler, "G-SMOTE"]
)[0]
perc_diff_scores_ovs = perc_diff_scores_ovs[["Difference"]].rename(
columns={"Difference": oversampler}
)
perc_diff_scores.append(perc_diff_scores_ovs)
perc_diff_scores = sort_tbl(
pd.concat(
[ranking[["Classifier", "Metric"]], pd.concat(perc_diff_scores, axis=1)],
axis=1,
),
clfs_order=CLFS_NAMES,
ovrs_order=OVRS_NAMES,
metrics_order=metrics_names,
)
perc_diff_scores.iloc[:, 2:] = round(perc_diff_scores.iloc[:, 2:], 2)
# Wilcoxon test
pvalues = []
for ovr in OVRS_NAMES[:-1]:
mask = (
(wide_optimal["Metric"] != "accuracy")
if ovr == "NONE"
else np.repeat(True, len(wide_optimal))
)
pvalues.append(
wilcoxon(
wide_optimal.loc[mask, ovr], wide_optimal.loc[mask, "G-SMOTE"]
).pvalue
)
wilcoxon_results = pd.DataFrame(
{
"Oversampler": OVRS_NAMES[:-1],
"p-value": pvalues,
"Significance": np.array(pvalues) < ALPHA,
}
)
# Format results
main_results = [(MAIN_RESULTS_NAMES[0], dataset_description)]
for name, result in zip(
MAIN_RESULTS_NAMES[1:],
(wide_optimal, ranking, perc_diff_scores, wilcoxon_results),
):
if name != "wilcoxon_results":
result = sort_tbl(
result,
clfs_order=CLFS_NAMES,
ovrs_order=OVRS_NAMES,
metrics_order=metrics_names,
)
result["Metric"] = result["Metric"].apply(
lambda metric: METRICS_MAPPING[metric]
)
if name == "wide_optimal":
result.iloc[:, 2:] = result.iloc[:, 2:].apply(
lambda row: make_bold(row, True, 3), axis=1
)
elif name == "ranking":
result.iloc[:, 2:] = result.iloc[:, 2:].apply(
lambda row: make_bold(row, False, 0), axis=1
)
elif name == "wilcoxon_results":
wilcoxon_results = generate_pvalues_tbl(wilcoxon_results)
main_results.append((name, result))
return main_results
if __name__ == "__main__":
# Extract paths
data_path, results_path, analysis_path = generate_paths()
# Generate and save main results
results = generate_main_results(data_path, results_path)
for name, result in results:
result.to_csv(
join(analysis_path, f"{name}.csv"),
index=False,
header=(name != "dataset_description"),
)
```
#### File: 2021-al-generator-lulc/scripts/analysis.py
```python
from os import listdir
from os.path import join
from itertools import product
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
from rlearn.tools import summarize_datasets
from research.datasets import RemoteSensingDatasets
from research.utils import (
generate_paths,
generate_mean_std_tbl_bold,
load_datasets,
load_plt_sns_configs,
make_bold,
)
from scipy.stats import wilcoxon
DATASETS_NAMES = [
d.replace("fetch_", "")
for d in dir(RemoteSensingDatasets())
if d.startswith("fetch_")
]
DATASETS_MAPPING = dict(
[
(d, "".join([i[0] for i in d.split("_")]).upper())
if (len(d.split("_")) > 1)
else (d, d.title())
for d in DATASETS_NAMES
]
)
METRICS_MAPPING = dict(
[
("accuracy", "Accuracy"),
("f1_macro", "F-score"),
("geometric_mean_score_macro", "G-mean"),
]
)
GROUP_KEYS = ["Dataset", "Estimator", "Evaluation Metric", "Selection Criterion"]
GENERATOR_NAMES = ["NONE", "G-SMOTE"]
def _make_bold_stat_signif(value, sig_level=0.05):
"""Make bold the lowest or highest value(s)."""
val = "%.1e" % value
val = "\\textbf{%s}" % val if value <= sig_level else val
return val
def generate_pvalues_tbl_bold(tbl, sig_level=0.05):
"""Format p-values."""
for name in tbl.dtypes[tbl.dtypes == float].index:
tbl[name] = tbl[name].apply(
lambda pvalue: _make_bold_stat_signif(pvalue, sig_level)
)
return tbl
def summarize_multiclass_datasets(datasets):
summarized = (
summarize_datasets(datasets)
.rename(
columns={
"Dataset name": "Dataset",
"Imbalance Ratio": "IR",
"Minority instances": "Min. Instances",
"Majority instances": "Maj. Instances",
}
)
.set_index("Dataset")
.join(
pd.Series(
dict([(name, dat[-1].unique().size) for name, dat in datasets]),
name="Classes",
)
)
.reset_index()
)
summarized.loc[:, "Dataset"] = summarized.loc[:, "Dataset"].apply(
lambda x: x.title()
)
return summarized
def plot_lulc_images():
arrays_x = []
arrays_y = []
for dat_name in DATASETS_NAMES:
X, y = RemoteSensingDatasets()._load_gic_dataset(dat_name)
arrays_x.append(X[:, :, 100])
arrays_y.append(np.squeeze(y))
for X, y, figname in zip(arrays_x, arrays_y, DATASETS_NAMES):
plt.figure(figsize=(20, 10), dpi=320)
if figname == "kennedy_space_center":
X = np.clip(X, 0, 350)
for i, (a, cmap) in enumerate(zip([X, y], ["gist_gray", "terrain"])):
plt.subplot(2, 1, i + 1)
plt.imshow(a, cmap=plt.get_cmap(cmap))
plt.axis("off")
plt.savefig(join(analysis_path, figname), bbox_inches="tight", pad_inches=0)
def select_results(results):
"""
Computes mean and std across all splits and runs from the original
experiment's data.
"""
results = results.copy()
# Extract info from the params dict
for param in ["evaluation_metric", "selection_strategy"]:
results[param] = results.params.apply(
lambda x: (x[param] if param in x.keys() else np.nan)
)
# Format column names
results.rename(
columns={
"param_est_name": "Estimator",
"evaluation_metric": "Evaluation Metric",
"selection_strategy": "Selection Criterion",
},
inplace=True,
)
# Drop random states from params
# Convert to params to string in order to use groupby
results.params = results.params.apply(
lambda x: {
k: v
for k, v in x.items()
if ("random_state" not in k)
and ("evaluation_metric" not in k)
and ("selection_strategy" not in k)
}
).astype(str)
scoring_cols = {
col: "_".join(col.split("_")[2:])
for col in results.columns
if "mean_test" in col
}
# Group data using GROUP_KEYS
scoring_mapping = {
scorer_name: [np.mean, np.std] for scorer_name in scoring_cols.values()
}
results_ = results.rename(columns=scoring_cols).groupby(GROUP_KEYS, dropna=False)
# Get standard deviations
stds = results_.apply(
lambda dat: [
np.std(
dat[
dat.columns[
dat.columns.str.contains(scorer)
& dat.columns.str.contains("split")
]
].values.flatten()
)
for scorer in scoring_mapping.keys()
]
)
results = results_.agg(scoring_mapping)
mask_cols = np.array(list(results.columns))[:, 1] == "std"
values_arr = results.values
values_arr[:, mask_cols] = np.array(stds.tolist())
return pd.DataFrame(values_arr, columns=results.columns, index=results.index)
def get_mean_std_data(results):
mask = results.columns.get_level_values(1).isin(["mean", ""])
df_mean = results.iloc[:, mask].copy()
df_mean.columns = df_mean.columns.get_level_values(0)
df_std = results.iloc[:, ~mask].copy()
df_std.columns = df_std.columns.get_level_values(0)
return df_mean, df_std
def calculate_wide_optimal(results):
core_metrics = results.reset_index()["Evaluation Metric"].dropna().unique()
res_ = []
for m in ["mean", "std"]:
res = results.loc[:, results.columns.get_level_values(1) == m]
res.columns = res.columns.get_level_values(0)
res = (
res.reset_index()
.drop(columns=["Evaluation Metric", "Selection Criterion"])
.loc[:, ["Dataset", "Estimator", *core_metrics]]
.melt(id_vars=["Dataset", "Estimator"])
.rename(columns={"value": m})
.set_index(["Dataset", "Estimator", "variable"])
)
res_.append(res)
wide_optimal = (
pd.concat(res_, axis=1)
.reset_index()
.groupby(["Dataset", "Estimator", "variable"])
.apply(lambda dat: dat.iloc[np.argmax(dat["mean"])])
.reset_index(drop=True)
)
(_, wide_optimal["Generator"], wide_optimal["Classifier"]) = np.array(
wide_optimal.Estimator.apply(
lambda x: x.split("|")
if len(x.split("|")) == 3
else [np.nan, np.nan, x.split("|")[1]]
).tolist()
).T
wide_optimal = wide_optimal.drop(columns="Estimator").pivot(
["Dataset", "Classifier", "variable"], "Generator", ["mean", "std"]
)
return (
wide_optimal["mean"].drop(columns="SMOTE"),
wide_optimal["std"].drop(columns="SMOTE"),
)
def calculate_wide_optimal_al(results):
core_metrics = results.reset_index()["Evaluation Metric"].dropna().unique()
res_ = []
for m in ["mean", "std"]:
res = results.loc[:, results.columns.get_level_values(1) == m]
res.columns = res.columns.get_level_values(0)
res = (
res.reset_index()
.drop(columns=[*core_metrics, "Selection Criterion"])
.melt(id_vars=["Dataset", "Estimator", "Evaluation Metric"])
.set_index(["Dataset", "Estimator", "Evaluation Metric", "variable"])
.rename(columns={"value": m})
)
res_.append(res)
wide_optimal = (
pd.concat(res_, axis=1)
.reset_index()
.groupby(["Dataset", "Estimator", "variable", "Evaluation Metric"])
.apply(
lambda dat: (
dat.iloc[np.argmax(dat["mean"])]
if not dat.variable.iloc[0].startswith("dur")
else dat.iloc[np.argmin(dat["mean"])]
)
)
.reset_index(drop=True)
)
(_, wide_optimal["Generator"], wide_optimal["Classifier"]) = np.array(
wide_optimal.Estimator.apply(
lambda x: x.split("|")
if len(x.split("|")) == 3
else [np.nan, np.nan, x.split("|")[1]]
).tolist()
).T
wide_optimal = wide_optimal.drop(columns="Estimator").pivot(
["Dataset", "Classifier", "Evaluation Metric", "variable"],
"Generator",
["mean", "std"],
)
return (
wide_optimal["mean"].drop(columns="SMOTE"),
wide_optimal["std"].drop(columns="SMOTE"),
)
def calculate_mean_std_table(wide_optimal):
df = wide_optimal[0].copy()
df_grouped = (
df.reset_index()
.rename(columns={"variable": "Evaluation Metric"})
.groupby(["Classifier", "Evaluation Metric"])
)
return df_grouped.mean(), df_grouped.std(ddof=0)
def calculate_mean_std_table_al(wide_optimal_al, al_metric="area_under_learning_curve"):
df = wide_optimal_al[0].copy()
df_grouped = (
df.loc[df.index.get_level_values(3) == al_metric]
.reset_index()
.groupby(["Classifier", "Evaluation Metric"])
)
return df_grouped.mean(), df_grouped.std(ddof=0)
def mean_std_ranks(wide_optimal):
ranks = (
wide_optimal.rank(axis=1, ascending=False)
.reset_index()
.groupby(["Classifier", "variable"])
)
return ranks.mean(), ranks.std(ddof=0)
def mean_std_ranks_al(wide_optimal, al_metric="area_under_learning_curve"):
asc = False if not al_metric.startswith("dur") else True
ranks = (
wide_optimal.loc[wide_optimal.index.get_level_values(3) == al_metric]
.rank(axis=1, ascending=asc)
.reset_index()
.groupby(["Classifier", "Evaluation Metric"])
)
return ranks.mean(), ranks.std(ddof=0)
def data_utilization_rate(*wide_optimal):
df = wide_optimal[0]
df = df.div(df["NONE"], axis=0)
dur_grouped = (
df.loc[df.index.get_level_values(3).str.startswith("dur")]
.reset_index()
.melt(id_vars=df.index.names)
.pivot(
["Dataset", "Classifier", "Evaluation Metric", "Generator"],
"variable",
"value",
)
.reset_index()
.groupby(["Classifier", "Evaluation Metric", "Generator"])
)
return dur_grouped.mean(), dur_grouped.std(ddof=0)
def deficiency_scores(wide_optimal, wide_optimal_al):
wo_mp = wide_optimal[0]["nan"].to_frame()
wo_al = (
wide_optimal_al[0]
.loc[
wide_optimal_al[0].index.get_level_values("variable")
== "area_under_learning_curve"
]
.droplevel("variable", axis=0)
)
wo_a = wo_al.drop(columns="NONE")
wo_b = wo_al["NONE"].to_frame()
deficiency = (wo_mp.values - wo_a.values) / (
2 * wo_mp.values - wo_a.values - wo_b.values
)
deficiency = (
pd.DataFrame(deficiency, columns=wo_a.columns, index=wo_a.index)
.reset_index()
.groupby(["Classifier", "Evaluation Metric"])
)
return deficiency.mean(), deficiency.std(ddof=0)
def generate_main_results(results):
"""Generate the main results of the experiment."""
wide_optimal_al = calculate_wide_optimal_al(results)
wide_optimal = calculate_wide_optimal(results)
# Wide optimal AULC
wide_optimal_aulc = generate_mean_std_tbl_bold(
*(
df.loc[
df.index.get_level_values(3) == "area_under_learning_curve"
].droplevel("variable", axis=0)
for df in wide_optimal_al
),
decimals=3,
)
wide_optimal_aulc.index.rename(["Dataset", "Classifier", "Metric"], inplace=True)
# Mean ranking analysis
mean_std_aulc_ranks = generate_mean_std_tbl_bold(
*mean_std_ranks_al(wide_optimal_al[0], "area_under_learning_curve"),
maximum=False,
decimals=2,
)
# Mean scores analysis
optimal_mean_std_scores = generate_mean_std_tbl_bold(
*calculate_mean_std_table(wide_optimal), maximum=True, decimals=3
)
mean_std_aulc_scores = generate_mean_std_tbl_bold(
*calculate_mean_std_table_al(wide_optimal_al, "area_under_learning_curve"),
maximum=True,
decimals=3,
)
# Deficiency scores analysis
mean_std_deficiency = generate_mean_std_tbl_bold(
*deficiency_scores(wide_optimal, wide_optimal_al),
maximum=False,
decimals=3,
threshold=0.5,
)
# Return results and names
main_results_names = (
"wide_optimal_aulc",
"mean_std_aulc_ranks",
"mean_std_aulc_scores",
"optimal_mean_std_scores",
"mean_std_deficiency",
)
return zip(
main_results_names,
(
wide_optimal_aulc,
mean_std_aulc_ranks,
mean_std_aulc_scores,
optimal_mean_std_scores,
mean_std_deficiency,
),
)
def generate_data_utilization_tables(wide_optimal_al):
# Mean data utilization to reach the .85 g-mean threshold
data_utilization = wide_optimal_al[0].reset_index()
# Data utilization per dataset and performance threshold
optimal_du = data_utilization[
(data_utilization["Evaluation Metric"] == "geometric_mean_score_macro")
& (data_utilization.variable.str.startswith("dur_"))
].drop(columns="Evaluation Metric")
optimal_du = (
optimal_du.groupby(["Classifier", "variable"])
.mean()
.apply(lambda row: make_bold(row * 100, maximum=False, num_decimals=1), axis=1)
.reset_index()
)
optimal_du["G-mean Score"] = optimal_du.variable.str.replace("dur_", "")
optimal_du["G-mean Score"] = (optimal_du["G-mean Score"].astype(int) / 100).apply(
lambda x: "{0:.2f}".format(x)
)
for generator in GENERATOR_NAMES:
optimal_du[generator] = optimal_du[generator].apply(
lambda x: x[:-1] + "\\%}" if x.endswith("}") else x + "\\%"
)
return optimal_du[["G-mean Score", "Classifier", "NONE", "G-SMOTE"]].sort_values(
["G-mean Score", "Classifier"]
)
def generate_dur_visualization(wide_optimal_al):
"""Visualize data utilization rates"""
dur = data_utilization_rate(*wide_optimal_al)
dur_mean, dur_std = (
df.loc[
df.index.get_level_values("Evaluation Metric").isin(
["geometric_mean_score_macro", "f1_macro"]
)
]
.rename(columns={col: int(col.replace("dur_", "")) for col in df.columns})
.rename(index={"NONE": "Standard", "G-SMOTE": "Proposed"})
for df in dur
)
load_plt_sns_configs(10)
col_values = dur_mean.index.get_level_values("Evaluation Metric").unique()
row_values = dur_mean.index.get_level_values("Classifier").unique()
# Set and format main content of the visualization
fig, axes = plt.subplots(
row_values.shape[0],
col_values.shape[0],
figsize=(7, 6),
sharex="col",
sharey="row",
constrained_layout=True,
)
for (row, clf), (col, metric) in product(
enumerate(row_values), enumerate(col_values)
):
ax = axes[row, col]
dur_mean.loc[(clf, metric)].T.plot.line(
ax=ax, xlabel="", color={"Standard": "indianred", "Proposed": "steelblue"}
)
ax.set_ylabel(clf)
ax.set_ylim(
bottom=(
dur_mean.loc[clf].values.min() - 0.05
if dur_mean.loc[clf].values.min() < 0.6
else 0.8
),
top=(
dur_mean.loc[clf].values.max()
if dur_mean.loc[clf].values.max() >= 1.05
else 1.05
),
)
ax.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
ax.set_xticks(dur_mean.columns)
# Set legend
if (row == 1) and (col == 1):
ax.legend(
loc="center left",
bbox_to_anchor=(1, 0.5),
ncol=1,
borderaxespad=0,
frameon=False,
fontsize=10,
)
else:
ax.get_legend().remove()
fig.text(0.45, -0.025, "Performance Thresholds", ha="center", va="bottom")
for ax, metric in zip(axes[0, :], col_values):
ax.set_title(METRICS_MAPPING[metric])
fig.savefig(
join(analysis_path, "data_utilization_rate.pdf"),
format="pdf",
bbox_inches="tight",
)
plt.close()
def generate_mean_rank_bar_chart(wide_optimal_al):
"""Generates bar chart."""
load_plt_sns_configs()
ranks, ranks_std = (
df.reset_index()
for df in mean_std_ranks_al(wide_optimal_al[0], "area_under_learning_curve")
)
ranks["Evaluation Metric"] = ranks["Evaluation Metric"].apply(
lambda x: METRICS_MAPPING[x]
)
fig, axes = plt.subplots(
ranks["Classifier"].unique().shape[0],
ranks["Evaluation Metric"].unique().shape[0],
figsize=(5, 6),
)
lranks = ranks.set_index(["Classifier", "Evaluation Metric"])
for (row, clf), (col, metric) in product(
enumerate(ranks["Classifier"].unique()),
enumerate(ranks["Evaluation Metric"].unique()),
):
dat = (
len(GENERATOR_NAMES)
- lranks.loc[(clf, metric)].loc[list(GENERATOR_NAMES[::-1])]
)
axes[row, col].bar(
dat.index,
dat.values,
color=["steelblue" for i in range(len(GENERATOR_NAMES) - 1)]
+ ["indianred"],
)
plt.sca(axes[row, col])
plt.yticks(
range(len(GENERATOR_NAMES)),
[None] + list(range(1, len(GENERATOR_NAMES)))[::-1],
)
plt.xticks(rotation=90)
if row == 0:
plt.title(metric)
if col == 0:
plt.ylabel(f"{clf}")
if row != len(ranks.Classifier.unique()) - 1:
plt.xticks(range(len(GENERATOR_NAMES)), [])
if col != 0:
plt.yticks(range(len(GENERATOR_NAMES)), [])
sns.despine(left=True)
plt.grid(b=None, axis="x")
fig.savefig(
join(analysis_path, "mean_rankings_bar_chart.pdf"),
format="pdf",
bbox_inches="tight",
)
plt.close()
def apply_wilcoxon_test(wide_optimal, dep_var, OVRS_NAMES, alpha):
"""Performs a Wilcoxon signed-rank test"""
pvalues = []
for ovr in OVRS_NAMES:
mask = np.repeat(True, len(wide_optimal))
pvalues.append(
wilcoxon(
wide_optimal.loc[mask, ovr], wide_optimal.loc[mask, dep_var]
).pvalue
)
wilcoxon_results = pd.DataFrame(
{
"Oversampler": OVRS_NAMES,
"p-value": pvalues,
"Significance": np.array(pvalues) < alpha,
}
)
return wilcoxon_results
def generate_statistical_results(wide_optimal_al, alpha=0.1, control_method="NONE"):
"""Generate the statistical results of the experiment."""
# Get results
results = (
wide_optimal_al[0][GENERATOR_NAMES]
.reset_index()[wide_optimal_al[0].reset_index().variable.str.startswith("dur_")]
.drop(columns=["variable"])
.rename(columns={"Evaluation Metric": "Metric"})
)
results = results[results["Metric"] == "geometric_mean_score_macro"]
# Wilcoxon signed rank test
# Optimal proposed framework vs baseline framework
wilcoxon_test = []
for dataset in results.Dataset.unique():
wilcoxon_results = apply_wilcoxon_test(
results[results["Dataset"] == dataset], "G-SMOTE", ["NONE"], alpha
).drop(columns="Oversampler")
wilcoxon_results["Dataset"] = dataset.replace("_", " ").title()
wilcoxon_test.append(wilcoxon_results[["Dataset", "p-value", "Significance"]])
wilcoxon_test = pd.concat(wilcoxon_test, axis=0)
wilcoxon_test["p-value"] = wilcoxon_test["p-value"].apply(
lambda x: "{:.1e}".format(x)
)
return "wilcoxon_test", wilcoxon_test
if __name__ == "__main__":
data_path, results_path, analysis_path = generate_paths(__file__)
# load datasets
datasets = load_datasets(data_dir=data_path)
# datasets description
summarize_multiclass_datasets(datasets).to_csv(
join(analysis_path, "datasets_description.csv"), index=False
)
# datasets visualization
# plot_lulc_images()
# load results
res_names = [r for r in listdir(results_path) if r.endswith(".pkl")]
results = []
for name in res_names:
file_path = join(results_path, name)
df_results = pd.read_pickle(file_path)
df_results["Dataset"] = name.replace("_base.pkl", "").replace("_al.pkl", "")
results.append(df_results)
# Combine and select results
results = select_results(pd.concat(results))
# Main results - dataframes
main_results = generate_main_results(results)
for name, result in main_results:
# Format results
result = result.rename(index={**METRICS_MAPPING, **DATASETS_MAPPING}).rename(
columns={"nan": "MP"}
)
result = result[
[col for col in ["MP"] + GENERATOR_NAMES if col in result.columns]
]
result.reset_index(inplace=True)
# Keep only G-mean and F-score
if "Evaluation Metric" in result.columns or "Metric" in result.columns:
query_col = (
"Evaluation Metric"
if "Evaluation Metric" in result.columns
else "Metric"
)
result = result[result[query_col].isin(["G-mean", "F-score"])]
# Export LaTeX-ready dataframe
result.rename(columns={"NONE": "Standard", "G-SMOTE": "Proposed"}).to_csv(
join(analysis_path, f"{name}.csv"), index=False
)
# Main results - visualizations
wide_optimal_al = calculate_wide_optimal_al(results)
generate_dur_visualization(wide_optimal_al)
# generate_mean_rank_bar_chart(wide_optimal_al)
# Data utilization - dataframes
optimal_data_utilization = generate_data_utilization_tables(wide_optimal_al)
optimal_data_utilization = optimal_data_utilization.rename(
columns={"NONE": "Standard", "G-SMOTE": "Proposed"}
).to_csv(join(analysis_path, "optimal_data_utilization.csv"), index=False)
# Statistical results
name, result = generate_statistical_results(
wide_optimal_al, alpha=0.05, control_method="NONE"
)
result.to_csv(join(analysis_path, f"{name}.csv"), index=False)
```
#### File: 2021-al-generator-lulc/scripts/results.py
```python
from os.path import join
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from imblearn.over_sampling import SMOTE
from gsmote import GeometricSMOTE
from clover.over_sampling import ClusterOverSampler
from sklearn.model_selection import StratifiedKFold
from rlearn.model_selection import ModelSearchCV
from imblearn.base import SamplerMixin
from sklearn.model_selection import train_test_split
from research.utils import (
load_datasets,
generate_paths,
check_pipelines,
check_pipelines_wrapper,
)
from research.active_learning import ALWrapper
from research.metrics import ALScorer, data_utilization_rate, SCORERS
TEST_SIZE = 0.2
def make_dur(threshold):
def dur(test_scores, data_utilization):
return data_utilization_rate(test_scores, data_utilization, threshold=threshold)
return dur
for i in range(60, 100, 5):
SCORERS[f"dur_{i}"] = ALScorer(make_dur(i / 100))
class remove_test(SamplerMixin):
"""
Used to ensure the data used to train classifiers with and without AL
is the same.
"""
def __init__(self, test_size=0.2):
self.test_size = test_size
def _fit_resample(self, X, y):
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=self.test_size, random_state=42
)
return X_train, y_train
def fit_resample(self, X, y):
return self._fit_resample(X, y)
CONFIG = {
# Remove .2 of the dataset from training, to replicate the training data
# for AL methods
"remove_test": [("remove_test", remove_test(TEST_SIZE), {})],
"classifiers": [
(
"LR",
LogisticRegression(
multi_class="multinomial", solver="sag", penalty="none", max_iter=1e4
),
{},
),
("KNN", KNeighborsClassifier(), {}),
("RF", RandomForestClassifier(), {}),
],
"scoring": ["accuracy", "f1_macro", "geometric_mean_score_macro"],
"n_splits": 5,
"n_runs": 3,
"rnd_seed": 42,
"n_jobs": -1,
"verbose": 1,
}
CONFIG_AL = {
"generator": [
("NONE", None, {}),
("SMOTE", ClusterOverSampler(SMOTE(k_neighbors=5), n_jobs=1), {}),
(
"G-SMOTE",
ClusterOverSampler(
GeometricSMOTE(
k_neighbors=5, deformation_factor=0.5, truncation_factor=0.5
),
n_jobs=1,
),
{},
),
],
"wrapper": (
"AL",
ALWrapper(
n_initial=15,
increment=15,
max_iter=49,
test_size=TEST_SIZE,
random_state=42,
),
{
"evaluation_metric": ["accuracy", "f1_macro", "geometric_mean_score_macro"],
"selection_strategy": ["random", "entropy", "breaking_ties"],
},
),
"scoring": [
"accuracy",
"f1_macro",
"geometric_mean_score_macro",
"area_under_learning_curve",
]
+ [f"dur_{i}" for i in range(60, 100, 5)],
}
if __name__ == "__main__":
# Extract paths
data_dir, results_dir, _ = generate_paths(__file__)
# Load datasets
datasets = load_datasets(data_dir=data_dir)
# Extract pipelines and parameter grids
estimators_al, param_grids_al = check_pipelines_wrapper(
[CONFIG_AL["generator"], CONFIG["classifiers"]],
CONFIG_AL["wrapper"],
CONFIG["rnd_seed"],
CONFIG["n_runs"],
wrapped_only=True,
)
estimators_base, param_grids_base = check_pipelines(
[CONFIG["remove_test"], CONFIG["classifiers"]],
CONFIG["rnd_seed"],
CONFIG["n_runs"],
)
for name, (X, y) in datasets:
# Define and fit AL experiment
experiment_al = ModelSearchCV(
estimators_al,
param_grids_al,
scoring=CONFIG_AL["scoring"],
n_jobs=CONFIG["n_jobs"],
cv=StratifiedKFold(
n_splits=CONFIG["n_splits"],
shuffle=True,
random_state=CONFIG["rnd_seed"],
),
verbose=CONFIG["verbose"],
return_train_score=True,
refit=False,
).fit(X, y)
# Save results
file_name = f'{name.replace(" ", "_").lower()}_al.pkl'
pd.DataFrame(experiment_al.cv_results_).to_pickle(join(results_dir, file_name))
# Define and fit baseline experiment
experiment_base = ModelSearchCV(
estimators_base,
param_grids_base,
scoring=CONFIG["scoring"],
n_jobs=CONFIG["n_jobs"],
cv=StratifiedKFold(
n_splits=CONFIG["n_splits"],
shuffle=True,
random_state=CONFIG["rnd_seed"],
),
verbose=CONFIG["verbose"],
return_train_score=True,
refit=False,
).fit(X, y)
# Save results
file_name = f'{name.replace(" ", "_").lower()}_base.pkl'
pd.DataFrame(experiment_base.cv_results_).to_pickle(
join(results_dir, file_name)
)
```
#### File: research/active_learning/_selection_methods.py
```python
import numpy as np
def breaking_ties(probabilities):
"""Breaking Ties uncertainty measurement. The output is scaled and reversed."""
probs_sorted = np.sort(probabilities, axis=1)[:, ::-1]
# The extra minus is redundant but I kept as a highlight of the change in the
# original formula.
bt = -(probs_sorted[:, 0] - probs_sorted[:, 1])
return bt
UNCERTAINTY_FUNCTIONS = {
"entropy": (
lambda probabilities: (-probabilities * np.log2(probabilities)).sum(axis=1)
),
"breaking_ties": breaking_ties,
"random": (
lambda probabilities: np.ones(probabilities.shape[0]) / probabilities.shape[0]
),
}
```
#### File: research/data_augmentation/_gsmote.py
```python
import math
import numpy as np
from collections import Counter
from numpy.linalg import norm
from scipy import sparse
from sklearn.utils import check_random_state, check_array
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import _check_sample_weight
from sklearn.utils.sparsefuncs_fast import (
csr_mean_variance_axis0,
csc_mean_variance_axis0,
)
from sklearn.preprocessing import OneHotEncoder, label_binarize
from imblearn.over_sampling.base import BaseOverSampler
from imblearn.utils import (
check_neighbors_object,
Substitution,
check_target_type,
check_sampling_strategy,
)
from imblearn.utils._docstring import _random_state_docstring
from imblearn.utils._validation import ArraysTransformer
SELECTION_STRATEGY = ("combined", "majority", "minority")
def _make_geometric_sample(
center, surface_point, truncation_factor, deformation_factor, random_state
):
"""A support function that returns an artificial point inside
the geometric region defined by the center and surface points.
Parameters
----------
center : ndarray, shape (n_features, )
Center point of the geometric region.
surface_point : ndarray, shape (n_features, )
Surface point of the geometric region.
truncation_factor : float, optional (default=0.0)
The type of truncation. The values should be in the [-1.0, 1.0] range.
deformation_factor : float, optional (default=0.0)
The type of geometry. The values should be in the [0.0, 1.0] range.
random_state : int, RandomState instance or None
Control the randomization of the algorithm.
Returns
-------
point : ndarray, shape (n_features, )
Synthetically generated sample.
"""
# Zero radius case
if np.array_equal(center, surface_point):
return center
# Generate a point on the surface of a unit hyper-sphere
radius = norm(center - surface_point)
normal_samples = random_state.normal(size=center.size)
point_on_unit_sphere = normal_samples / norm(normal_samples)
point = (random_state.uniform(size=1) ** (1 / center.size)) * point_on_unit_sphere
# Parallel unit vector
parallel_unit_vector = (surface_point - center) / norm(surface_point - center)
# Truncation
close_to_opposite_boundary = (
truncation_factor > 0
and np.dot(point, parallel_unit_vector) < truncation_factor - 1
)
close_to_boundary = (
truncation_factor < 0
and np.dot(point, parallel_unit_vector) > truncation_factor + 1
)
if close_to_opposite_boundary or close_to_boundary:
point -= 2 * np.dot(point, parallel_unit_vector) * parallel_unit_vector
# Deformation
parallel_point_position = np.dot(point, parallel_unit_vector) * parallel_unit_vector
perpendicular_point_position = point - parallel_point_position
point = (
parallel_point_position
+ (1 - deformation_factor) * perpendicular_point_position
)
# Translation
point = center + radius * point
return point
def _make_categorical_sample(X_new, all_neighbors, categories_size, random_state):
"""A support function that populates categorical features' values
in an artificial point.
Parameters
----------
X_new : ndarray, shape (n_features, )
Artificial point to populate categorical features.
all_neighbors: ndarray, shape (n_features, k_neighbors)
Nearest neighbors used for majority voting.
categories_size: list
Used to tell apart one-hot encoded features.
random_state : int, RandomState instance or None
Control the randomization of the algorithm. Used
for tie breaking when there are two majority values.
Returns
-------
point : ndarray, shape (n_features, )
Synthetically generated sample.
"""
for start_idx, end_idx in zip(
np.cumsum(categories_size)[:-1], np.cumsum(categories_size)[1:]
):
col_maxs = all_neighbors[:, start_idx:end_idx].sum(axis=0)
# tie breaking argmax
is_max = np.isclose(col_maxs, col_maxs.max(axis=0))
max_idxs = random_state.permutation(np.argwhere(is_max))
col_sels = max_idxs[0]
ys = start_idx + col_sels
X_new[start_idx:end_idx] = 0
X_new[ys] = 1
return X_new
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
random_state=_random_state_docstring,
)
class GeometricSMOTE(BaseOverSampler):
"""Class to to perform over-sampling using Geometric SMOTE.
This algorithm is an implementation of Geometric SMOTE, a geometrically
enhanced drop-in replacement for SMOTE as presented in [1]_.
Read more in the :ref:`User Guide <user_guide>`.
Parameters
----------
categorical_features : ndarray of shape (n_cat_features,) or (n_features,)
Specified which features are categorical. Can either be:
- array of indices specifying the categorical features;
- mask array of shape (n_features, ) and ``bool`` dtype for which
``True`` indicates the categorical features.
{sampling_strategy}
{random_state}
truncation_factor : float, optional (default=0.0)
The type of truncation. The values should be in the [-1.0, 1.0] range.
deformation_factor : float, optional (default=0.0)
The type of geometry. The values should be in the [0.0, 1.0] range.
selection_strategy : str, optional (default='combined')
The type of Geometric SMOTE algorithm with the following options:
``'combined'``, ``'majority'``, ``'minority'``.
k_neighbors : int or object, optional (default=5)
If ``int``, number of nearest neighbours to use when synthetic
samples are constructed for the minority method. If object, an estimator
that inherits from :class:`sklearn.neighbors.base.KNeighborsMixin` that
will be used to find the k_neighbors.
n_jobs : int, optional (default=1)
The number of threads to open if possible.
Notes
-----
See the original paper: [1]_ for more details.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [2]_.
References
----------
.. [1] <NAME>, <NAME>, "Geometric SMOTE:
a geometrically enhanced drop-in replacement for SMOTE",
Information Sciences, vol. 501, pp. 118-135, 2019.
.. [2] <NAME>, <NAME>, <NAME>, <NAME>, "SMOTE:
synthetic minority over-sampling technique", Journal of Artificial
Intelligence Research, vol. 16, pp. 321-357, 2002.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from gsmote import GeometricSMOTE # doctest: +NORMALIZE_WHITESPACE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> gsmote = GeometricSMOTE(random_state=1)
>>> X_res, y_res = gsmote.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
def __init__(
self,
sampling_strategy="auto",
random_state=None,
truncation_factor=1.0,
deformation_factor=0.0,
selection_strategy="combined",
k_neighbors=5,
categorical_features=None,
n_jobs=1,
):
super(GeometricSMOTE, self).__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.truncation_factor = truncation_factor
self.deformation_factor = deformation_factor
self.selection_strategy = selection_strategy
self.k_neighbors = k_neighbors
self.categorical_features = categorical_features
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Create the necessary attributes for Geometric SMOTE."""
# Check random state
self.random_state_ = check_random_state(self.random_state)
# Validate strategy
if self.selection_strategy not in SELECTION_STRATEGY:
error_msg = (
"Unknown selection_strategy for Geometric SMOTE algorithm. "
"Choices are {}. Got {} instead."
)
raise ValueError(
error_msg.format(SELECTION_STRATEGY, self.selection_strategy)
)
# Create nearest neighbors object for positive class
if self.selection_strategy in ("minority", "combined"):
self.nns_pos_ = check_neighbors_object(
"nns_positive", self.k_neighbors, additional_neighbor=1
)
self.nns_pos_.set_params(n_jobs=self.n_jobs)
# Create nearest neighbors object for negative class
if self.selection_strategy in ("majority", "combined"):
self.nn_neg_ = check_neighbors_object("nn_negative", nn_object=1)
self.nn_neg_.set_params(n_jobs=self.n_jobs)
def _validate_categorical(self):
"""Create the necessary attributes for Geometric SMOTE
with categorical features"""
if self.categorical_features is None:
return self
categorical_features = np.asarray(self.categorical_features)
if categorical_features.dtype.name == "bool":
self.categorical_features_ = np.flatnonzero(categorical_features)
else:
if any(
[cat not in np.arange(self.n_features_) for cat in categorical_features]
):
raise ValueError(
"Some of the categorical indices are out of range. Indices"
" should be between 0 and {}".format(self.n_features_)
)
self.categorical_features_ = categorical_features
self.continuous_features_ = np.setdiff1d(
np.arange(self.n_features_), self.categorical_features_
)
if self.categorical_features_.size == self.n_features_in_:
raise ValueError(
"GeometricSMOTE is not designed to work only with categorical "
"features. It requires some numerical features."
)
return self
def _check_X_y(self, X, y):
"""Overwrite the checking to let pass some string for categorical
features.
"""
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X, y = self._validate_data(
X, y, reset=True, dtype=None, accept_sparse=["csr", "csc"]
)
return X, y, binarize_y
def _make_geometric_samples(
self, X, y, pos_class_label, n_samples, sample_weight=None
):
"""A support function that returns an artificials samples inside
the geometric region defined by nearest neighbors.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like, shape (n_samples, )
Corresponding label for each sample in X.
pos_class_label : str or int
The minority class (positive class) target value.
n_samples : int
The number of samples to generate.
sample_weight : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given, the sample assumes a uniform distribution over all
entries.
Returns
-------
X_new : ndarray, shape (n_samples_new, n_features)
Synthetically generated samples.
y_new : ndarray, shape (n_samples_new, )
Target values for synthetic samples.
"""
# Return zero new samples
if n_samples == 0:
return (
np.array([], dtype=X.dtype).reshape(0, X.shape[1]),
np.array([], dtype=y.dtype),
np.array([], dtype=X.dtype),
)
# Select positive class samples
X_pos = X[y == pos_class_label]
if sample_weight is not None:
sample_weight_pos = (
sample_weight[y == pos_class_label]
if sample_weight[y == pos_class_label].sum() != 0
else None
)
else:
sample_weight_pos = None
# Force minority strategy if no negative class samples are present
self.selection_strategy_ = (
"minority" if X.shape[0] == X_pos.shape[0] else self.selection_strategy
)
# Minority or combined strategy
if self.selection_strategy_ in ("minority", "combined"):
self.nns_pos_.fit(X_pos)
points_pos = self.nns_pos_.kneighbors(X_pos)[1][:, 1:]
weight_pos = (
np.repeat(sample_weight_pos, self.k_neighbors)
/ (sample_weight_pos.sum() * self.k_neighbors)
if sample_weight_pos is not None
else None
)
samples_indices = self.random_state_.choice(
range(0, len(points_pos.flatten())), size=n_samples, p=weight_pos
)
rows = np.floor_divide(samples_indices, points_pos.shape[1])
cols = np.mod(samples_indices, points_pos.shape[1])
# Majority or combined strategy
if self.selection_strategy_ in ("majority", "combined"):
X_neg = X[y != pos_class_label]
self.nn_neg_.fit(X_neg)
points_neg = self.nn_neg_.kneighbors(X_pos)[1]
weight_neg = (
sample_weight_pos / sample_weight_pos.sum()
if sample_weight_pos is not None
else None
)
if self.selection_strategy_ == "majority":
samples_indices = self.random_state_.choice(
range(0, len(points_neg.flatten())), size=n_samples, p=weight_neg
)
rows = np.floor_divide(samples_indices, points_neg.shape[1])
cols = np.mod(samples_indices, points_neg.shape[1])
# In the case that the median std was equal to zeros, we have to
# create non-null entry based on the encoded of OHE
if self.categorical_features is not None:
if math.isclose(self.median_std_, 0):
X[:, self.continuous_features_.size :] = self._X_categorical_encoded
# Select positive class samples
X_pos = X[y == pos_class_label]
if self.selection_strategy_ in ("majority", "combined"):
X_neg = X[y != pos_class_label]
# Generate new samples
X_new = np.zeros((n_samples, X.shape[1]))
all_neighbors_ = []
for ind, (row, col) in enumerate(zip(rows, cols)):
# Define center point
center = X_pos[row]
# Minority strategy
if self.selection_strategy_ == "minority":
surface_point = X_pos[points_pos[row, col]]
all_neighbors = (
(X_pos[points_pos[row]])
if self.categorical_features is not None
else None
)
# Majority strategy
elif self.selection_strategy_ == "majority":
surface_point = X_neg[points_neg[row, col]]
all_neighbors = (
(X_neg[points_neg[row]])
if self.categorical_features is not None
else None
)
# Combined strategy
else:
surface_point_pos = X_pos[points_pos[row, col]]
surface_point_neg = X_neg[points_neg[row, 0]]
radius_pos = norm(center - surface_point_pos)
radius_neg = norm(center - surface_point_neg)
surface_point = (
surface_point_neg if radius_pos > radius_neg else surface_point_pos
)
all_neighbors = (
np.vstack([X_pos[points_pos[row]], X_neg[points_neg[row]]])
if self.categorical_features is not None
else None
)
if self.categorical_features is not None:
all_neighbors_.append(all_neighbors)
# Append new sample - no categorical features
X_new[ind] = _make_geometric_sample(
center,
surface_point,
self.truncation_factor,
self.deformation_factor,
self.random_state_,
)
# Create new samples for target variable
y_new = np.array([pos_class_label] * len(samples_indices))
return X_new, y_new, all_neighbors_
def _make_categorical_samples(self, X_new, y_new, categories_size, all_neighbors_):
for ind, all_neighbors in enumerate(all_neighbors_):
# Append new sample - continuous features
X_new[ind] = _make_categorical_sample(
X_new[ind], all_neighbors, categories_size, self.random_state_
)
return X_new, y_new
def _encode_categorical(self, X, y):
"""TODO"""
# compute the median of the standard deviation of the minority class
target_stats = Counter(y)
class_minority = min(target_stats, key=target_stats.get)
# Separate categorical features from continuous features
X_continuous = X[:, self.continuous_features_]
X_continuous = check_array(X_continuous, accept_sparse=["csr", "csc"])
X_categorical = X[:, self.categorical_features_].copy()
X_minority = X_continuous[np.flatnonzero(y == class_minority)]
if sparse.issparse(X):
if X.format == "csr":
_, var = csr_mean_variance_axis0(X_minority)
else:
_, var = csc_mean_variance_axis0(X_minority)
else:
var = X_minority.var(axis=0)
self.median_std_ = np.median(np.sqrt(var))
if X_continuous.dtype.name != "object":
dtype_ohe = X_continuous.dtype
else:
dtype_ohe = np.float64
self.ohe_ = OneHotEncoder(sparse=True, handle_unknown="ignore", dtype=dtype_ohe)
# the input of the OneHotEncoder needs to be dense
X_ohe = self.ohe_.fit_transform(
X_categorical.toarray() if sparse.issparse(X_categorical) else X_categorical
)
# we can replace the 1 entries of the categorical features with the
# median of the standard deviation. It will ensure that whenever
# distance is computed between 2 samples, the difference will be equal
# to the median of the standard deviation as in the original paper.
# In the edge case where the median of the std is equal to 0, the 1s
# entries will be also nullified. In this case, we store the original
# categorical encoding which will be later used for inversing the OHE
if math.isclose(self.median_std_, 0):
self._X_categorical_encoded = X_ohe.toarray()
X_ohe.data = np.ones_like(X_ohe.data, dtype=X_ohe.dtype) * self.median_std_ / 2
if self._issparse:
X_encoded = np.hstack([X_continuous.toarray(), X_ohe.toarray()])
else:
X_encoded = np.hstack([X_continuous, X_ohe.toarray()])
return X_encoded
def _decode_categorical(self, X_resampled):
"""Reverses the encoding of the categorical features to match
the dataset's original structure."""
if math.isclose(self.median_std_, 0):
X_resampled[
: self._X_categorical_encoded.shape[0], self.continuous_features_.size :
] = self._X_categorical_encoded
X_resampled = sparse.csr_matrix(X_resampled)
X_res_cat = X_resampled[:, self.continuous_features_.size :]
X_res_cat.data = np.ones_like(X_res_cat.data)
X_res_cat_dec = self.ohe_.inverse_transform(X_res_cat)
if self._issparse:
X_resampled = sparse.hstack(
(X_resampled[:, : self.continuous_features_.size], X_res_cat_dec),
format="csr",
)
else:
X_resampled = np.hstack(
(
X_resampled[:, : self.continuous_features_.size].toarray(),
X_res_cat_dec,
)
)
indices_reordered = np.argsort(
np.hstack((self.continuous_features_, self.categorical_features_))
)
if sparse.issparse(X_resampled):
col_indices = X_resampled.indices.copy()
for idx, col_idx in enumerate(indices_reordered):
mask = X_resampled.indices == col_idx
col_indices[mask] = idx
X_resampled.indices = col_indices
else:
X_resampled = X_resampled[:, indices_reordered]
return X_resampled
def _fit_resample(self, X, y, sample_weight=None):
# Save basic data
self.n_features_ = X.shape[1]
self._issparse = sparse.issparse(X)
X_dtype = X.dtype
# Validate estimator's parameters
self._validate_categorical()._validate_estimator()
# Preprocess categorical data
if self.categorical_features is not None:
X = self._encode_categorical(X, y)
categories_size = [self.continuous_features_.size] + [
cat.size for cat in self.ohe_.categories_
]
# Copy data
X_resampled, y_resampled = X.copy(), y.copy()
# Resample
for class_label, n_samples in self.sampling_strategy_.items():
# Apply gsmote mechanism
X_new, y_new, all_neighbors_ = self._make_geometric_samples(
X, y, class_label, n_samples, sample_weight=sample_weight
)
# Apply smotenc mechanism
if self.categorical_features is not None:
X_new, y_new = self._make_categorical_samples(
X_new, y_new, categories_size, all_neighbors_
)
# Append new data
X_resampled, y_resampled = (
np.vstack((X_resampled, X_new)),
np.hstack((y_resampled, y_new)),
)
# reverse the encoding of the categorical features
if self.categorical_features is not None:
X_resampled = self._decode_categorical(X_resampled).astype(X_dtype)
else:
X_resampled = X_resampled.astype(X_dtype)
return X_resampled, y_resampled
def fit_resample(self, X, y, sample_weight=None):
"""Resample the dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like of shape (n_samples,)
Corresponding label for each sample in X.
sample_weight : array-like of shape (n_samples,), default=None
Individual weights for each sample. Assigns probabilities for selecting a
sample as a center point.
Returns
-------
X_resampled : {array-like, sparse matrix} of shape \
(n_samples_new, n_features)
The array containing the resampled data.
y_resampled : array-like of shape (n_samples_new,)
The corresponding label of `X_resampled`.
"""
check_classification_targets(y)
arrays_transformer = ArraysTransformer(X, y)
X, y, binarize_y = self._check_X_y(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self.sampling_strategy_ = check_sampling_strategy(
self.sampling_strategy, y, self._sampling_type
)
output = self._fit_resample(X, y, sample_weight)
y_ = label_binarize(output[1], np.unique(y)) if binarize_y else output[1]
X_, y_ = arrays_transformer.transform(output[0], y_)
return (X_, y_) if len(output) == 2 else (X_, y_, output[2])
```
#### File: research/data_augmentation/_oversampling_augmentation.py
```python
import warnings
from collections import Counter, OrderedDict
import numpy as np
from sklearn.base import clone
from sklearn.neighbors import NearestNeighbors
from imblearn.over_sampling.base import BaseOverSampler
from imblearn.over_sampling import RandomOverSampler
from ._gsmote import GeometricSMOTE
AUGMENTATION_STRATEGIES = ["oversampling", "constant", "proportional"]
def _modify_nn(n_neighbors, n_samples):
"""Modify nearest neighbors object or integer."""
if isinstance(n_neighbors, NearestNeighbors):
n_neighbors = (
clone(n_neighbors).set_params(n_neighbors=n_samples - 1)
if n_neighbors.n_neighbors >= n_samples
else clone(n_neighbors)
)
elif isinstance(n_neighbors, int) and n_neighbors >= n_samples:
n_neighbors = n_samples - 1
return n_neighbors
def _clone_modify(oversampler, y):
"""Clone and modify attributes of oversampler for corner cases."""
# Clone oversampler
oversampler = clone(oversampler)
# Not modify attributes case
if isinstance(oversampler, RandomOverSampler):
return oversampler
# Select and modify oversampler
n_minority_samples = Counter(y).most_common()[-1][1]
if n_minority_samples == 1:
oversampler = RandomOverSampler()
else:
if hasattr(oversampler, "k_neighbors"):
oversampler.k_neighbors = _modify_nn(
oversampler.k_neighbors, n_minority_samples
)
if hasattr(oversampler, "m_neighbors"):
oversampler.m_neighbors = _modify_nn(oversampler.m_neighbors, y.size)
if hasattr(oversampler, "n_neighbors"):
oversampler.n_neighbors = _modify_nn(
oversampler.n_neighbors, n_minority_samples
)
return oversampler
class OverSamplingAugmentation(BaseOverSampler):
"""
A wrapper to facilitate the use of `imblearn.over_sampling` objects for
data augmentation.
Parameters
----------
oversampler : oversampler estimator, default=None
Over-sampler to be used for data augmentation.
augmentation_strategy : float, dict or {'oversampling', 'constant', 'proportional'}\
, default='oversampling'
Specifies how the data augmentation is done.
- When ``float`` or ``int``, each class' frequency is augmented
according to the specified ratio (which is equivalent to the ``proportional``
strategy).
- When ``oversampling``, the data augmentation is done according to the
sampling strategy passed in the ``oversampler`` object. If ``value`` is not
`None`, then the number of samples generated for each class equals the number
of samples in the majority class multiplied by ``value``.
- When ``constant``, each class frequency is augmented to match
the value passed in the parameter ``value``.
- When ``proportional``, relative class frequencies are preserved and the
number of samples in the dataset is matched with the value passed in the
parameter ``value``.
value : int, float, default=None
Value to be used as the new frequency of each class. It is ignored unless the
augmentation strategy is set to ``constant`` or ``oversampling``.
random_state : int, RandomState instance, default=None
Control the randomization of the algorithm.
- If int, ``random_state`` is the seed used by the random number
generator;
- If ``RandomState`` instance, random_state is the random number
generator;
- If ``None``, the random number generator is the ``RandomState``
instance used by ``np.random``.
"""
def __init__(
self,
oversampler=None,
augmentation_strategy="oversampling",
value=None,
random_state=None,
):
super(OverSamplingAugmentation, self).__init__(sampling_strategy="auto")
self.oversampler = oversampler
self.augmentation_strategy = augmentation_strategy
self.value = value
self.random_state = random_state
warnings.filterwarnings("ignore")
def fit(self, X, y):
"""
Check inputs and statistics of the sampler.
You should use ``fit_resample`` in all cases.
Parameters
----------
X : {array-like, dataframe, sparse matrix} of shape \
(n_samples, n_features)
Data array.
y : array-like of shape (n_samples,)
Target array.
Returns
-------
self : object
Return the instance itself.
"""
X, y, _ = self._check_X_y(X, y)
if (
type(self.augmentation_strategy) not in [int, float, dict]
and self.augmentation_strategy not in AUGMENTATION_STRATEGIES
):
raise ValueError(
f"When 'augmentation_strategy' in neither an int or float,"
f" it needs to be one of {AUGMENTATION_STRATEGIES}. Got "
f"'{self.augmentation_strategy}' instead."
)
if (type(self.value) not in [int, float]) and (
self.augmentation_strategy in ["constant", "proportional"]
):
raise ValueError(
f"When 'augmentation_strategy' is 'constant' or 'proportional',"
f" 'value' needs to be an int or float. Got "
f"{self.value} instead."
)
# Setup the sampling strategy based on the augmentation strategy
if self.augmentation_strategy == "constant":
counts = OrderedDict(Counter(y))
self.sampling_strategy_ = {
k: int(np.round(self.value)) if self.value > freq else freq
for k, freq in counts.items()
}
elif self.augmentation_strategy == "proportional":
counts = OrderedDict(Counter(y))
ratio = self.value / y.shape[0]
if ratio > 1:
self.sampling_strategy_ = {
k: int(np.round(freq * ratio)) for k, freq in counts.items()
}
else:
raise ValueError(
"The new size of the augmented dataset must be larger than the"
f" original dataset. Originally, there are {y.shape[0]} samples"
f" and {self.value} samples are asked."
)
elif self.augmentation_strategy == "oversampling" and self.value is None:
self.sampling_strategy_ = self.oversampler.sampling_strategy
elif self.augmentation_strategy == "oversampling":
counts = OrderedDict(Counter(y))
max_freq = max(counts.values())
self.sampling_strategy_ = {
k: int(np.round(max_freq * self.value))
if max_freq * self.value > freq
else freq
for k, freq in counts.items()
}
elif type(self.augmentation_strategy) in [int, float]:
counts = OrderedDict(Counter(y))
self.sampling_strategy_ = {
k: int(np.round(v * self.augmentation_strategy))
for k, v in counts.items()
}
else:
self.sampling_strategy_ = self.augmentation_strategy
return self
def fit_resample(self, X, y, **fit_params):
"""
Resample the dataset.
Parameters
----------
X : {array-like, dataframe, sparse matrix} of shape \
(n_samples, n_features)
Matrix containing the data which have to be sampled.
y : array-like of shape (n_samples,)
Corresponding label for each sample in X.
Returns
-------
X_resampled : {array-like, dataframe, sparse matrix} of shape \
(n_samples_new, n_features)
The array containing the resampled data.
y_resampled : array-like of shape (n_samples_new,)
The corresponding label of `X_resampled`.
"""
self.fit(X, y)
if self.oversampler is not None:
self.oversampler_ = _clone_modify(self.oversampler, y).set_params(
random_state=self.random_state,
sampling_strategy=self.sampling_strategy_,
)
if isinstance(self.oversampler_, GeometricSMOTE):
return self.oversampler_.fit_resample(X, y, **fit_params)
else:
return self.oversampler_.fit_resample(X, y)
else:
return X, y
def _fit_resample(self, X, y):
"""A placeholder. It was overriden by the self.fit_resample method."""
return
```
#### File: datasets/tests/test_datasets.py
```python
from urllib.request import urlopen
import multiprocessing.dummy as mp
from multiprocessing import cpu_count
import ssl
from .._base import FETCH_URLS
ssl._create_default_https_context = ssl._create_unverified_context
def test_urls():
"""Test whether URLS are working."""
urls = [
url
for sublist in [[url] for url in list(FETCH_URLS.values()) if type(url) == str]
for url in sublist
]
p = mp.Pool(cpu_count())
url_status = p.map(lambda url: (urlopen(url).status == 200), urls)
assert all(url_status)
```
#### File: research/utils/_check_pipelines.py
```python
from itertools import product
from sklearn.base import clone
from sklearn.preprocessing import FunctionTransformer
from sklearn.model_selection import ParameterGrid
from imblearn.pipeline import Pipeline
from rlearn.utils import check_random_states
def check_pipelines(objects_list, random_state, n_runs):
"""Extract estimators and parameters grids."""
# Create random states
random_states = check_random_states(random_state, n_runs)
pipelines = []
param_grid = []
for comb, rs in product(product(*objects_list), random_states):
name = "|".join([i[0] for i in comb])
# name, object, sub grid
comb = [
(nm, ob, ParameterGrid(sg))
if ob is not None
else (nm, FunctionTransformer(), ParameterGrid(sg))
for nm, ob, sg in comb
]
# Create estimator
if name not in [n[0] for n in pipelines]:
est = Pipeline([(nm, ob) for nm, ob, _ in comb])
pipelines.append((name, est))
# Create intermediate parameter grids
sub_grids = [
[{f"{nm}__{k}": v for k, v in param_def.items()} for param_def in sg]
for nm, obj, sg in comb
]
# Create parameter grids
for sub_grid in product(*sub_grids):
param_prefix = "" if len(comb) == 1 else f"{name}__"
grid = {"est_name": [name]}
grid.update(
{f"{param_prefix}{k}": [v] for d in sub_grid for k, v in d.items()}
)
random_states = {
f"{param_prefix}{param}": [rs]
for param in est.get_params()
if "random_state" in param
}
grid.update(random_states)
# Avoid multiple runs over pipelines without random state
if grid not in param_grid:
param_grid.append(grid)
return pipelines, param_grid
def check_pipelines_wrapper(
objects_list, wrapper, random_state, n_runs, wrapped_only=False
):
wrapper_label = wrapper[0]
wrapper_obj = wrapper[1]
wrapper_grid = wrapper[2]
estimators, param_grids = check_pipelines(objects_list, random_state, n_runs)
wrapped_estimators = [
(
f"{wrapper_label}|{name}",
clone(wrapper_obj).set_params(**{"classifier": pipeline}),
)
for name, pipeline in estimators
]
wrapped_param_grids = [
{
"est_name": [f'{wrapper_label}|{d["est_name"][0]}'],
**{
f'{wrapper_label}|{d["est_name"][0]}__classifier__{k}': v
for k, v in d.items()
if k != "est_name"
},
**{
f'{wrapper_label}|{d["est_name"][0]}__{k}': v
for k, v in wrapper_grid.items()
},
}
for d in param_grids
]
if wrapped_only:
return wrapped_estimators, wrapped_param_grids
else:
return (estimators + wrapped_estimators, param_grids + wrapped_param_grids)
```
#### File: research/utils/_data.py
```python
from os import listdir
from os.path import isdir, join
import pandas as pd
from sqlite3 import connect
def load_datasets(data_dir, suffix="", target_exists=True, **read_csv_kwargs):
"""Load datasets from sqlite database and/or csv files."""
assert isdir(data_dir), "`data_dir` must be a directory."
# Filter data by suffix
dat_names = [dat for dat in listdir(data_dir) if dat.endswith(suffix)]
# Read data
datasets = []
for dat_name in dat_names:
data_path = join(data_dir, dat_name)
# Handle csv data
if dat_name.endswith(".csv"):
ds = pd.read_csv(data_path, **read_csv_kwargs)
name = dat_name.replace(".csv", "").replace("_", " ").upper()
if target_exists:
ds = (ds.iloc[:, :-1], ds.iloc[:, -1])
datasets.append((name, ds))
# Handle sqlite database
elif dat_name.endswith(".db"):
with connect(data_path) as connection:
datasets_names = [
name[0]
for name in connection.execute(
"SELECT name FROM sqlite_master WHERE type='table';"
)
]
for dataset_name in datasets_names:
ds = pd.read_sql(f'select * from "{dataset_name}"', connection)
if target_exists:
ds = (ds.iloc[:, :-1], ds.iloc[:, -1])
datasets.append((dataset_name.replace("_", " ").upper(), ds))
return datasets
``` |
{
"source": "joaopfonseca/social_media_crawler",
"score": 3
} |
#### File: gui/FlaskApp/db_instagram.py
```python
from datetime import datetime
import os
#querying
import pandas as pd
import numpy as np
#plotting
#from plotly.offline import plot #to save graphs as html files, useful when testing
import plotly.graph_objs as go
#dashboarding
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
mapbox_access_token = '<KEY>'
app = dash.Dash()
#app.css.append_css({"external_url": "https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.3/css/bootstrap.min.css"})
kw_filter= open('support/active_keyword', 'r')
header_keyword=kw_filter.readline()
keyword = header_keyword.rstrip()
kw_filter.close()
path = '../../Web_Crawler/_data/'
i = open(path + '%s_instagram_posts.csv' % keyword, 'r')
ig_df = pd.read_csv(i)
def drop_minutes_and_seconds(olddate):
new_date = datetime.fromtimestamp(olddate).replace(minute=0, second=0, microsecond=0)
return new_date
ig_df['date'] = ig_df['taken_at'].apply(drop_minutes_and_seconds)
ig_df.sort_values(['date'], ascending=False)
#yvalues_ig_hist1 = ig_df[['date']].groupby('date').size()
#
#xvalues = []
#for value in ig_df['date']:
# if value not in xvalues:
# xvalues.append(value)
#yvalues = []
#for date in xvalues:
# yvalues.append(yvalues_ig_hist1[date])
#plotting Ig posts
diff = (max(ig_df['date'])-min(ig_df['date']))
number_of_bins= (diff.days+1)*24
#first_ig_histogram = [go.Scatter(x=xvalues, y=yvalues)]
ig_histogram = [go.Histogram(x=ig_df['date'], nbinsx=number_of_bins)]
hist_configuration= go.Layout(title='Instagram posts associated to keyword \'%s\'' % keyword, xaxis=dict(title='Hours'), yaxis=dict(title='Count'))
plot_ig_posts = go.Figure(data=ig_histogram, layout=hist_configuration)
#flnm_posts_ig_histogram= keyword + '_posts_ig_histogram.html'
#plot(plot_ig_posts, filename=flnm_posts_ig_histogram, show_link=False, auto_open=False)
i.close()
# =============================================================================
# plotting geomap
# =============================================================================
lon=[]
for coord in ig_df['lng']:
lon.append(coord)
lat=[]
for coord in ig_df['lat']:
lat.append(coord)
#size=
data = go.Data([
go.Scattermapbox(
lat=ig_df['lat'],
lon=ig_df['lng'],
mode='markers',
# marker=go.Marker(
# size=[endpt_size] + [4 for j in range(len(steps) - 2)] + [endpt_size])
text=ig_df['caption_text']
)
])
layout = go.Layout(
title='Location of Posts',
autosize=True,
hovermode='closest',
mapbox=dict(
accesstoken=mapbox_access_token,
bearing=0,
style='dark',
center=dict(
lat=38.7,
lon=-7.98,
),
pitch=0,
zoom=2.2
),
)
wonder_map = go.Figure(data=data, layout=layout)
#plot(wonder_map, filename='scatterplottest.html', show_link=False, auto_open=True)
# =============================================================================
# last plot
# =============================================================================
comments_data_datecount = ig_df.groupby('date').agg({'comment_count': np.sum}).reset_index()
ig_df['postcount']=ig_df['date']
posts_data_datecount = ig_df.groupby('date').agg({'postcount': np.count_nonzero}).reset_index()
comments_plot= go.Bar(x=comments_data_datecount['date'], y=comments_data_datecount['comment_count'], name='Comments')
posts_plot= go.Bar(x=posts_data_datecount['date'] ,y=posts_data_datecount['postcount'], name='Posts' )
bar_chart_layout= go.Layout(title='Number of comments relative to posts', xaxis=dict(title='Days'), yaxis=dict(title='Count'))
bar_chart_content = [posts_plot,comments_plot]
last_bar_chart = go.Figure(data=bar_chart_content, layout=bar_chart_layout)
#plot(last_bar_chart, filename='barplottest.html', show_link=False, auto_open=True)
# =============================================================================
# Creating dashboard
# =============================================================================
app.layout = html.Div([
# html.H1('Hello Dash'),
# html.Div('''Dash: A web application framework for Python.'''),
dcc.Graph(id='overall-plot',figure=last_bar_chart),
html.Div([
dcc.Graph(id='example-graph',figure=wonder_map)
], style={'width': '49%', 'display': 'inline-block'}),
html.Div([
dcc.Graph(id='whatsthisevenfor',figure=plot_ig_posts)
], style={'width': '49%', 'display': 'inline-block'})
])
if __name__ == '__main__':
app.server.run(host='0.0.0.0', port=8051)
```
#### File: social_media_crawler/smc_no_gui/creds.py
```python
def instagram_creds(n):
if n == 1:
username = 'username1234567890'
pwd = '<PASSWORD>'
elif n == 2:
username = 'username1234567890'
pwd = '<PASSWORD>'
elif n == 3:
username = 'username1234567890'
pwd = '<PASSWORD>'
#elif n == 4:
# username = 'username1234567890'
# pwd = '<PASSWORD>'
# add more users as needed!
return dict(username=username, pwd=pwd)
# Twitter
def twitter_creds(n):
if n == 1:
API_KEY = 'api_key1234567890'
API_SECRET = 'api_secret1234567890'
ACCESS_TOKEN = 'access_token1234567890'
ACCESS_TOKEN_SECRET = 'access_token_secret1234567890'
elif n == 2:
API_KEY = 'api_key1234567890'
API_SECRET = 'api_secret1234567890'
ACCESS_TOKEN = 'access_token1234567890'
ACCESS_TOKEN_SECRET = 'access_token_secret1234567890'
elif n == 3:
API_KEY = 'api_key1234567890'
API_SECRET = 'api_secret1234567890'
ACCESS_TOKEN = 'access_token1234567890'
ACCESS_TOKEN_SECRET = 'access_token_secret1234567890'
#elif n == 4:
# API_KEY = 'api_key1234567890'
# API_SECRET = 'api_secret1234567890'
# ACCESS_TOKEN = 'access_token1234567890'
# ACCESS_TOKEN_SECRET = 'access_token_secret1234567890'
# add more users as needed!
return dict(API_KEY=API_KEY, API_SECRET=API_SECRET, ACCESS_TOKEN=ACCESS_TOKEN, ACCESS_TOKEN_SECRET=ACCESS_TOKEN_SECRET)
```
#### File: smc_no_gui/src/instagram_image_downloader.py
```python
import pandas as pd
import json
import re
import os
import urllib.request
import datetime as dt
def ig_img_downloader(keyword):
path = 'data/'+keyword+'/'
directory = os.path.join(path, 'instagram_img')
if not os.path.exists(directory):
os.makedirs(directory)
instagram = pd.read_csv(path + keyword +'_instagram_posts.csv')
instagram = instagram[instagram['time_crawled'].notnull()]
def check_day(date_str):
current_time = dt.datetime.today()
date = dt.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S.%f')
return (current_time-date).days == 0
instagram_pic = instagram[instagram['time_crawled'].apply(check_day)][pd.notnull(instagram['image_versions2_candidates'])]
i=0
for id, post in enumerate(instagram_pic['image_versions2_candidates']):
pic_json = json.loads(post.replace("'", "\""))
#link = re.search('.*.jpg', pic_json[0]['url']).group(0)
link = pic_json[0]['url']
img_name = str(instagram_pic['pk'].iloc[id]) + '.jpg'
#print(link, img_name)
try:
urllib.request.urlretrieve(link, directory + "/" + img_name)
i+=1
except:
pass
if i % 500 == 0:
print( "%s Pictures downloaded: %s" % (i, dt.datetime.now()) )
print('Done! %s Pictures downloaded' % i)
if __name__ == "__main__":
main()
```
#### File: smc_no_gui/src/twitter_tweets.py
```python
import tweepy
import csv
import datetime
import pandas as pd
import os
def twitter_csvcreatefile_header(keyword):
directory = 'data/'+keyword
if not os.path.exists(directory):
os.makedirs(directory)
f = open(directory+'/%s_tweets.csv' % keyword, 'w')
with f as file:
w = csv.writer(file)
w.writerow(['contributors',
'coordinates',
'created_at',
'entities_hashtags',
'entities_symbols',
'entities_urls',
'entities_user_mentions',
'favorite_count',
'favorited',
'geo',
'id',
'id_str',
'in_reply_to_screen_name',
'in_reply_to_status_id',
'in_reply_to_status_id_str',
'in_reply_to_user_id_iso_language_code',
'in_reply_to_user_id_str_result_type',
'is_quote_status',
'lang',
'metadata_iso_language_code',
'metadata_result_type',
'place',
'retweet_count',
'retweeted',
'retweeted_status_contributors',
'retweeted_status_coordinates',
'retweeted_status_created_at',
'retweeted_status_entities',
'retweeted_status_favorite_count',
'retweeted_status_favorited',
'retweeted_status_geo',
'retweeted_status_id',
'retweeted_status_id_str',
'retweeted_status_in_reply_to_screen_name',
'retweeted_status_in_reply_to_status_id',
'retweeted_status_in_reply_to_status_id_str',
'retweeted_status_in_reply_to_user_id',
'retweeted_status_in_reply_to_user_id_str',
'retweeted_status_is_quote_status',
'retweeted_status_lang',
'retweeted_status_metadata',
'retweeted_status_place',
'retweeted_status_retweet_count',
'retweeted_status_retweeted',
'retweeted_status_source',
'retweeted_status_text',
'retweeted_status_truncated',
'retweeted_status_user',
'source',
'text',
'truncated',
'user_contributors_enabled',
'user_created_at',
'user_default_profile',
'user_default_profile_image',
'user_description',
'user_favourites_count',
'user_follow_request_sent',
'user_followers_count',
'user_following',
'user_friends_count',
'user_geo_enabled',
'user_has_extended_profile',
'user_id',
'user_id_str',
'user_is_translation_enabled',
'user_is_translator',
'user_lang',
'user_listed_count',
'user_location',
'user_name',
'user_notifications',
'user_profile_background_color',
'user_profile_background_image_url',
'user_profile_background_image_url_https',
'user_profile_background_tile',
'user_profile_banner_url',
'user_profile_image_url',
'user_profile_image_url_https',
'user_profile_link_color',
'user_profile_sidebar_border_color',
'user_profile_sidebar_fill_color',
'user_profile_text_color',
'user_profile_use_background_image',
'user_protected',
'user_screen_name',
'user_statuses_count',
'user_time_zone',
'user_translator_type',
'user_url',
'user_utc_offset',
'user_verified',
'time_crawled'
])
def update_tweets(keyword, twitter_creds):
def if_empty(json_input):
if json_input == '':
return ''
else:
return json_input
def json_check_keys(jsono):
print(jsono.keys())
jsono = ['contributors','coordinates','created_at','entities','favorite_count','favorited',
'geo','id','id_str','in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id','in_reply_to_user_id_str',
'is_quote_status','lang','metadata','place','retweet_count','retweeted',
'retweeted_status','source','text','truncated','user']
fields_with_subfields = ['entities','in_reply_to_user_id','in_reply_to_user_id_str',
'metadata','retweeted_status','user']
subfields= {'entities':['hashtags','symbols','urls','user_mentions'],
'in_reply_to_user_id':['iso_language_code'],
'in_reply_to_user_id_str':['result_type'],
'metadata':['iso_language_code','result_type'],
'retweeted_status': ['contributors','coordinates','created_at','entities',
'favorite_count','favorited','geo','id','id_str',
'in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id',
'in_reply_to_user_id_str','is_quote_status','lang',
'metadata','place','retweet_count','retweeted',
'source','text','truncated','user'],
'user':['contributors_enabled','created_at','default_profile',
'default_profile_image','description','favourites_count',
'follow_request_sent','followers_count','following','friends_count',
'geo_enabled','has_extended_profile','id','id_str',
'is_translation_enabled','is_translator','lang','listed_count','location',
'name','notifications','profile_background_color',
'profile_background_image_url','profile_background_image_url_https',
'profile_background_tile','profile_banner_url','profile_image_url',
'profile_image_url_https','profile_link_color',
'profile_sidebar_border_color','profile_sidebar_fill_color',
'profile_text_color','profile_use_background_image','protected',
'screen_name','statuses_count','time_zone','translator_type','url',
'utc_offset','verified']}
API_KEY = twitter_creds['API_KEY']
API_SECRET = twitter_creds['API_SECRET']
ACCESS_TOKEN = twitter_creds['ACCESS_TOKEN']
ACCESS_TOKEN_SECRET = twitter_creds['ACCESS_TOKEN_SECRET']
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api= tweepy.API(auth)
max_tweets = 2000
print ( 'Processing %s Tweets containing the term \"%s\": %s' % (max_tweets,keyword,datetime.datetime.now()) )
try:
searched_tweets = [status for status in tweepy.Cursor(api.search, q=keyword).items(max_tweets)]
directory = 'data/'+keyword
if not os.path.exists(directory):
os.makedirs(directory)
f = open(directory+'/%s_tweets.csv' % keyword, 'a')
with f as file:
i=0
w = csv.writer(file)
for tweet in searched_tweets:
i=i+1
data_row=[]
for field in jsono:
if field in tweet._json.keys():
if field in fields_with_subfields:
for subfield in subfields[field]:
try:
data_row.append(tweet._json[field][subfield])
except:
data_row.append('')
else:
if_empty(data_row.append(tweet._json[field]))
else:
data_row.append('')
if 'retweeted_status' not in tweet._json.keys():
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.append(datetime.datetime.now())
w.writerow(data_row)
df = pd.read_csv(directory+'/%s_tweets.csv' % keyword)
df['id'] = df['id'].apply(str)
df.sort_values(['time_crawled'], ascending=False).drop_duplicates(['id'], keep='first').sort_values(['created_at'], ascending=False).to_csv(directory+'/%s_tweets.csv' % keyword, index=False)
print('Done! %s Tweets processed: %s' % (i, datetime.datetime.now()))
except:
print('Failed to send request: Read timed out.')
def get_tweets(keyword, twitter_creds):
def if_empty(json_input):
if json_input == '':
return ''
else:
return json_input
def json_check_keys(jsono):
print(jsono.keys())
jsono = ['contributors','coordinates','created_at','entities','favorite_count','favorited',
'geo','id','id_str','in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id','in_reply_to_user_id_str',
'is_quote_status','lang','metadata','place','retweet_count','retweeted',
'retweeted_status','source','text','truncated','user']
fields_with_subfields = ['entities','in_reply_to_user_id','in_reply_to_user_id_str',
'metadata','retweeted_status','user']
subfields= {'entities':['hashtags','symbols','urls','user_mentions'],
'in_reply_to_user_id':['iso_language_code'],
'in_reply_to_user_id_str':['result_type'],
'metadata':['iso_language_code','result_type'],
'retweeted_status': ['contributors','coordinates','created_at','entities',
'favorite_count','favorited','geo','id','id_str',
'in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id',
'in_reply_to_user_id_str','is_quote_status','lang',
'metadata','place','retweet_count','retweeted',
'source','text','truncated','user'],
'user':['contributors_enabled','created_at','default_profile',
'default_profile_image','description','favourites_count',
'follow_request_sent','followers_count','following','friends_count',
'geo_enabled','has_extended_profile','id','id_str',
'is_translation_enabled','is_translator','lang','listed_count','location',
'name','notifications','profile_background_color',
'profile_background_image_url','profile_background_image_url_https',
'profile_background_tile','profile_banner_url','profile_image_url',
'profile_image_url_https','profile_link_color',
'profile_sidebar_border_color','profile_sidebar_fill_color',
'profile_text_color','profile_use_background_image','protected',
'screen_name','statuses_count','time_zone','translator_type','url',
'utc_offset','verified']}
API_KEY = twitter_creds['API_KEY']
API_SECRET = twitter_creds['API_SECRET']
ACCESS_TOKEN = twitter_creds['ACCESS_TOKEN']
ACCESS_TOKEN_SECRET = twitter_creds['ACCESS_TOKEN_SECRET']
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api= tweepy.API(auth)
max_tweets = 2000
print ( 'Processing %s Tweets containing the term \"%s\": %s' % (max_tweets,keyword,datetime.datetime.now()) )
searched_tweets = [status for status in tweepy.Cursor(api.search, q=keyword).items(max_tweets)]
directory = 'data/'+keyword
if not os.path.exists(directory):
os.makedirs(directory)
f = open(directory+'/%s_tweets.csv' % keyword, 'a')
with f as file:
i=0
w = csv.writer(file)
for tweet in searched_tweets:
i=i+1
data_row=[]
for field in jsono:
if field in tweet._json.keys():
if field in fields_with_subfields:
for subfield in subfields[field]:
try:
data_row.append(tweet._json[field][subfield])
except:
data_row.append('')
else:
if_empty(data_row.append(tweet._json[field]))
else:
data_row.append('')
if 'retweeted_status' not in tweet._json.keys():
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.append(datetime.datetime.now())
w.writerow(data_row)
print('Done! %s Tweets processed: %s' % (i, datetime.datetime.now()))
def update_tweets_gui(keyword, twitter_creds):
def if_empty(json_input):
if json_input == '':
return ''
else:
return json_input
def json_check_keys(jsono):
print(jsono.keys())
jsono = ['contributors','coordinates','created_at','entities','favorite_count','favorited',
'geo','id','id_str','in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id','in_reply_to_user_id_str',
'is_quote_status','lang','metadata','place','retweet_count','retweeted',
'retweeted_status','source','text','truncated','user']
fields_with_subfields = ['entities','in_reply_to_user_id','in_reply_to_user_id_str',
'metadata','retweeted_status','user']
subfields= {'entities':['hashtags','symbols','urls','user_mentions'],
'in_reply_to_user_id':['iso_language_code'],
'in_reply_to_user_id_str':['result_type'],
'metadata':['iso_language_code','result_type'],
'retweeted_status': ['contributors','coordinates','created_at','entities',
'favorite_count','favorited','geo','id','id_str',
'in_reply_to_screen_name','in_reply_to_status_id',
'in_reply_to_status_id_str','in_reply_to_user_id',
'in_reply_to_user_id_str','is_quote_status','lang',
'metadata','place','retweet_count','retweeted',
'source','text','truncated','user'],
'user':['contributors_enabled','created_at','default_profile',
'default_profile_image','description','favourites_count',
'follow_request_sent','followers_count','following','friends_count',
'geo_enabled','has_extended_profile','id','id_str',
'is_translation_enabled','is_translator','lang','listed_count','location',
'name','notifications','profile_background_color',
'profile_background_image_url','profile_background_image_url_https',
'profile_background_tile','profile_banner_url','profile_image_url',
'profile_image_url_https','profile_link_color',
'profile_sidebar_border_color','profile_sidebar_fill_color',
'profile_text_color','profile_use_background_image','protected',
'screen_name','statuses_count','time_zone','translator_type','url',
'utc_offset','verified']}
API_KEY = twitter_creds['API_KEY']
API_SECRET = twitter_creds['API_SECRET']
ACCESS_TOKEN = twitter_creds['ACCESS_TOKEN']
ACCESS_TOKEN_SECRET = twitter_creds['ACCESS_TOKEN_SECRET']
directory = 'data/'+keyword
if not os.path.exists(directory):
os.makedirs(directory)
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api= tweepy.API(auth)
max_tweets = 2000
print ( 'Processing %s Tweets containing the term \"%s\": %s' % (max_tweets,keyword,datetime.datetime.now()) )
try:
searched_tweets = [status for status in tweepy.Cursor(api.search, q=keyword).items(max_tweets)]
f = open(directory+'/%s_tweets.csv' % keyword, 'a')
with f as file:
i=0
w = csv.writer(file)
for tweet in searched_tweets:
i=i+1
data_row=[]
for field in jsono:
if field in tweet._json.keys():
if field in fields_with_subfields:
for subfield in subfields[field]:
try:
data_row.append(tweet._json[field][subfield])
except:
data_row.append('')
else:
if_empty(data_row.append(tweet._json[field]))
else:
data_row.append('')
if 'retweeted_status' not in tweet._json.keys():
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.insert(25, '')
data_row.append(datetime.datetime.now())
w.writerow(data_row)
df = pd.read_csv(directory+'/%s_tweets.csv' % keyword)
df['id'] = df['id'].apply(str)
df.sort_values(['time_crawled'], ascending=False).drop_duplicates(['id'], keep='first').sort_values(['created_at'], ascending=False).to_csv(directory+'/%s_tweets.csv' % keyword, index=False)
print('Done! %s Tweets processed: %s' % (i, datetime.datetime.now()))
except Exception as e:
print(e)
```
#### File: social_media_crawler/Web_Crawler/instagram_hashtags.py
```python
from InstagramAPI import InstagramAPI
import csv
import datetime
from instagram_access import ig_creds
#crawls posts by hashtag search
def get_instagram_posts(keyword):
username = ig_creds(1)['username']
pwd = ig_creds(1)['pwd']
api = InstagramAPI(username, pwd)
api.login()
f = open('_data/%s_instagram_posts.csv' % keyword, 'w')
w = csv.writer(f)
w.writerow(['can_viewer_save','caption_bit_flags','caption_content_type','caption_created_at',
'caption_created_at_utc','caption_did_report_as_spam','caption_media_id','caption_pk',
'caption_status','caption_text','caption_type','caption_user','caption_user_id',
'caption_is_edited','client_cache_key','code','comment_count','comment_likes_enabled',
'comment_threading_enabled','device_timestamp','filter_type','has_audio','has_liked',
'has_more_comments','id','image_versions2_candidates','is_dash_eligible','like_count',
'max_num_visible_preview_comments','media_type','number_of_qualities','organic_tracking_token',
'original_height','original_width','photo_of_you','pk','taken_at','user_friendship_status',
'user_full_name','user_has_anonymous_profile_picture','user_is_favorite','user_is_private',
'user_is_unpublished','user_pk','user_profile_pic_url','user_username','video_dash_manifest',
'video_duration','video_versions','view_count','lat','lng','location_address','location_city',
'location_external_source','location_facebook_places_id','location_lat','location_lng',
'location_name','location_pk','location_short_name','time_crawled'])
jsono = ['can_viewer_save', 'caption','caption_is_edited','client_cache_key','code',
'comment_count','comment_likes_enabled','comment_threading_enabled','device_timestamp',
'filter_type','has_audio','has_liked','has_more_comments','id','image_versions2',
'is_dash_eligible','like_count','max_num_visible_preview_comments','media_type',
'number_of_qualities','organic_tracking_token','original_height','original_width',
'photo_of_you','pk','taken_at','user','video_dash_manifest','video_duration',
'video_versions','view_count','lat','lng','location']
fields_with_subfields = [
'caption',
'image_versions2',
'user',
'location']
subfields = {'caption':['bit_flags','content_type','created_at','created_at_utc',
'did_report_as_spam','media_id','pk','status','text',
'type','user','user_id'],
'image_versions2':['candidates'],
'user':['friendship_status','full_name','has_anonymous_profile_picture',
'is_favorite','is_private','is_unpublished','pk','profile_pic_url',
'username'],
'location':['address','city','external_source','facebook_places_id',
'lat','lng','name','pk','short_name']
}
def get_tag_feed(word):
next_max = 100000 #amount of tag pages which are loaded: 1 equals approx. 70/80 posts
next_max_id = ''
i=0
for n in range(next_max):
api.getHashtagFeed(word,next_max_id)
data = api.LastJson
try:
for post in data['items']:
data_row = []
for field in jsono:
if field in post.keys():
if field in fields_with_subfields:
for subfield in subfields[field]:
try:
#print("'"+field+">"+subfield+"'")
data_row.append(post[field][subfield])
except TypeError:
data_row.append('')
else:
#print("'"+field+"'")
data_row.append(post[field])
else:
#print("'"+field+"'")
data_row.append('')
#print("'time_crawled'")
if field == 'location' and field not in post.keys():
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.insert(53,'')
data_row.append(datetime.datetime.now())
w.writerow(data_row)
i=i+1
if i % 500 == 0:
print( "%s Statuses Processed: %s" % (i, datetime.datetime.now()) )
next_max_id = data["next_max_id"]
except:
try:
next_max_id = data["next_max_id"]
except:
#print("error next_max. Tag: ", next_max_id)
print('Done! %s posts processed' % i)
break
get_tag_feed(keyword)
```
#### File: social_media_crawler/Web_Crawler/_master_batch_crawler.py
```python
from facebook_search_results import facebook_page_search
from facebook_posts import facebook_posts_crawler, facebook_posts_header
from facebook_comments import facebook_comments_crawler
from facebook_access_token import access_token
#Twitter dependency
from twitter_tweets import twitter_csvcreatefile_header, get_tweets
#Instagram dependency
from instagram_hashtags import get_instagram_posts
def master_batch_crawler(keyword):
#Twitter
twitter_csvcreatefile_header(keyword)
get_tweets(keyword)
#Instagram
get_instagram_posts(keyword)
#Facebook
facebook_posts_header(keyword)
facebook_page_search_results = facebook_page_search(keyword, access_token())
pages_remaining = len(facebook_page_search_results)
print('Number pages found for keyword search \"%s\":%s' % (keyword, pages_remaining ))
for page in facebook_page_search_results:
facebook_posts_crawler(page, access_token(), keyword)
pages_remaining= pages_remaining-1
print('%s pages remaining to process' % pages_remaining)
facebook_comments_crawler(keyword, access_token())
```
#### File: _others/sentiment_analysis/sentiment_analysis_test.py
```python
import pandas as pd
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from nltk.corpus import stopwords
from nltk.corpus import opinion_lexicon
import nltk.tokenize as tk
from unicodedata import normalize
import mtranslate
def to_string(message):
return str(message)
def emoji_tokenize(message):
emoji = "['\U0001F300-\U0001F5FF'|'\U0001F600-\U0001F64F'|'\U0001F680-\U0001F6FF'|'\u2600-\u26FF\u2700-\u27BF']"
return tk.regexp_tokenize(message,emoji)
def hashtag_tokenize(message):
hashtag = r"([#]\w+)"
return tk.regexp_tokenize(message,hashtag)
#this tokenizer also removes hashtags and emojis (it only includes actual text content)
def message_tokenize(message):
tweettokenizer = tk.TweetTokenizer()
hashtag = r"([#]\w+)"
hashtag_list = tk.regexp_tokenize(message,hashtag)
emoji = "['\U0001F300-\U0001F5FF'|'\U0001F600-\U0001F64F'|'\U0001F680-\U0001F6FF'|'\u2600-\u26FF\u2700-\u27BF']"
emoji_list = tk.regexp_tokenize(message,emoji)
to_exclude_list = hashtag_list + emoji_list
tokenized_message = tweettokenizer.tokenize(message)
tokens = []
for token in tokenized_message:
if token not in to_exclude_list:
tokens.append(token)
return tokens
#normalizing function, removing uppercase, removing special characters etc. this will probably not be necessary
def normalize_text(txt):
return normalize('NFKD', txt).encode('ASCII','ignore').decode('UTF-8').lower()
df = pd.read_csv(open('cascais_tweets.csv','r'))
def remove_stopwords(token_list):
#removing stopwords (words which do not contain important significance to be used in Search Queries( the, for, this etc. ))
filtered_words=[]
for token in token_list:
if token not in stopwords.words('english'):
filtered_words.append(token)
token_list = filtered_words
return token_list
def positive_message_score(token_list):
positive_words_list = opinion_lexicon.positive()
positive_count = 0
word_count = len(token_list)
for token in token_list:
if token in positive_words_list:
positive_count=positive_count+1
if word_count == 0:
return ''
else:
return positive_count/word_count
def negative_message_score(token_list):
negative_words_list = opinion_lexicon.negative()
negative_count = 0
word_count = len(token_list)
for token in token_list:
if token in negative_words_list:
negative_count=negative_count+1
if word_count == 0:
return ''
else:
return negative_count/word_count
def VADER_analysis(message):
analyzer = SentimentIntensityAnalyzer()
return analyzer.polarity_scores(message)
df['hashtags']=df['text'].apply(to_string).apply(normalize_text).apply(hashtag_tokenize)
df['emojis']=df['text'].apply(to_string).apply(emoji_tokenize)
df['message_english']=df['text'].apply(to_string).apply(mtranslate.translate)
df['tokenized']=df['message_english'].apply(message_tokenize).apply(remove_stopwords)
df['positive_score'] = df['tokenized'].apply(positive_message_score)
df['negative_score'] = df['tokenized'].apply(negative_message_score)
df['VADER_compound_score'] = df['message_english'].apply(VADER_analysis)
df[['text','tokenized','hashtags','emojis','positive_score','negative_score','VADER_compound_score']].to_csv('sentiment_analysis_test.csv')
``` |
{
"source": "joaopfonseca/solve-iwmi",
"score": 3
} |
#### File: api/controller/ctrlr_auth.py
```python
from flask import Blueprint,request
from flask_login import UserMixin, login_user, logout_user,current_user
from middleware import login_manager
authbp = Blueprint('authbp', __name__)
users = {'iwmiAnalytics': {'password': '<PASSWORD>'}}
class User(UserMixin):
pass
@login_manager.user_loader
def user_loader(email):
if email not in users:
return
user = User()
user.id = email
return user
@login_manager.request_loader
def request_loader(request):
email = request.form.get('email')
if email not in users:
return
user = User()
user.id = email
# DO NOT ever store passwords in plaintext and always compare password
# hashes using constant-time comparison!
user.is_authenticated = request.form['password'] == users[email]['password']
return user
@authbp.route("/api/login", methods=['POST'])
def login():
"""
Takes in email and password if correct logs in the user
POST Body
email
password
"""
email = request.json['email']
if request.json['password'] == users[email]['password']:
user = User()
user.id = email
login_user(user)
return 'logged in'
return 'Bad login',400
@authbp.route("/api/logout", methods=['POST'])
def logout():
logout_user()
return 'logout'
@authbp.route("/api/checkLogin", methods=['POST'])
def checkLogin():
if current_user.is_authenticated:
return 'Logged in',200
else:
return 'Not logged in',400
```
#### File: api/helpers/filters.py
```python
from dateutil import parser
def createQueryFilters(filters):
"""
Takes in filters from the frontend and creates a query that elasticsearch can use
Args: filters with the fields below
verified - if the user is verified
topics - list of topics we want to see
pov - point of view
lang - the langauge the tweet was originally in
endDate
startDate
sentStart - lower of sentiment
sentEnd - upper of sentiment
search - does an elasticsearch from the full_text_trans field in the database
Yeilds:
An array of filters to be used in the bool field of elasticsearch query
"""
query = []
if 'verified' in filters and filters['verified']:
query.append({
"term":{"verified":True}
})
if 'topics' in filters and filters['topics']:
query.append({
'bool':{
'should':list(map(
lambda x:{'term':{'topics.keyword':x}} ,
filters['topics']
))
}
})
if 'pov' in filters and filters['pov']:
query.append({
'bool':{
'must':{'term':{'pov.keyword':filters['pov']}}
}
})
if 'lang' in filters and filters['lang']:
query.append({
'bool':{
'must':{'term':{'lang.keyword':filters['lang']}}
}
})
if 'endDate' in filters and filters['endDate']:
query.append({
"range":{"tweet_created_at":{'lte':parser.parse(filters['endDate']).timestamp()* 1000}}
})
if 'startDate' in filters and filters['startDate']:
query.append({
"range":{"tweet_created_at":{'gte':parser.parse(filters['startDate']).timestamp()* 1000}}
})
if 'sentStart' in filters and filters['sentStart']:
query.append({
"range":{"sentiment":{'gte':filters['sentStart']}}
})
if 'sentEnd' in filters and filters['sentEnd']:
query.append({
"range":{"sentiment":{'lte':filters['sentEnd']}}
})
if 'search' in filters and filters['search']:
query.append({
'bool':{
'must':{
"match": {
"full_text_trans": {
"query": filters['search']
}
}
}
}
})
body = {
'bool':{
'must':query,
}
}
return body
```
#### File: src/data/make_data_pull.py
```python
import click
import logging
from pathlib import Path
import yaml
from os.path import join
from src.data import pull_tweets, count_tweets
@click.command()
@click.argument('configs_path', type=click.Path(exists=True))
@click.argument('configs_key', type=click.Path())
def main(configs_path, configs_key):
"""
Downloads and stores tweets as .json files in its raw format.
The data is stored "as is". The remaining ETL steps can be found
in separate scripts.
The entire procedure can be configurated using a .yaml file. See
`configs.yaml`.
"""
configs = yaml.full_load(open(configs_path, 'r'))[configs_key]
configs['save_path'] = join(project_dir, configs['save_path'])
configs['credentials_path'] = join(
project_dir, configs['credentials_path']
)
configs['yaml_key'] = configs['credentials_key']
del configs['credentials_key']
logger = logging.getLogger(__name__)
logger.propagate = configs['verbose']
logger.info('Initializing - Pulling raw data from Twitter (via API)')
if logger.propagate:
counts = count_tweets(**configs)
n_tweets = 0
for date in counts:
n_tweets += date['count']
logger.info(f'Total amount of tweets found: {n_tweets}')
pull_tweets(**configs)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
project_dir = Path(__file__).resolve().parents[2]
main()
```
#### File: src/data/_update_es.py
```python
import os
import pandas as pd
import numpy as np
from os.path import join
from elasticsearch import Elasticsearch, helpers
def update_es_database(
df,
idCol,
transform,
ip_address,
):
"""
Loads the dataframe into the database
Args:
df - dataframe with data to update
idCol - chooses which column to use for the id
transform - function that takes
ip_address - the ip address that the elasticsearch uses
Yields:
Updates the database with the new data
"""
es = Elasticsearch([ip_address])
for idx,row in df.iterrows():
body ={
'doc':transform(row)
}
print(int(row[idCol]))
es.update(
index='twitter',
id=int(row[idCol]),
body = body
)
def ingest_topics(ip_address,process_dir):
def transform_ingest(row):
dic = row.to_dict()
del dic['tweet_id']
topics = [k for k, v in sorted(dic.items(), key=lambda item: item[1]) if v > .8]
return {
'topics_list':dic,
'topics':topics
}
df = pd.read_csv(join(process_dir,'zstc_model_final.csv'))
update_es_database(
df,
'tweet_id',
transform_ingest,
ip_address
)
def ingest_translations(ip_address,process_dir):
def transform_trans(row):
return {
'full_text_trans':row['full_text_trans']
}
df = pd.read_csv(join(process_dir,'translated_tweets.csv'))
update_es_database(
df,
'index',
transform_trans,
ip_address
)
def ingest_pov(ip_address,process_dir):
def transform_pov(row):
return {
'pov':row['pov']
}
df = pd.read_csv(join(process_dir,'pov_final.csv'))
update_es_database(
df,
'id',
transform_pov,
ip_address
)
def ingest_network(ip_address,process_dir):
es = Elasticsearch([ip_address])
nodeDF = pd.read_csv(join(process_dir,'nodes_users.csv'))
nodeDF = nodeDF.fillna(0)
edgeDF = pd.read_csv(join(process_dir,'edges_users.csv'))
edgeDF = edgeDF.replace([np.inf, -np.inf], 0)
edgeDF = edgeDF.fillna(0)
def docGenerator(nodeDF,edgeDF):
for i, node in nodeDF.iterrows():
try:
res = es.search(
index="twitter",
body={
"query": {
"match":{
"user_id_str":int(node['node_id'])
}
}
}
)
print(int(node['node_id']))
tweet_ids = [hit['_id'] for hit in res['hits']['hits']]
edges = edgeDF[edgeDF['source'] == int(node['node_id'])]
doc = {
"_id": int(node['node_id']),
}
doc.update({
'tweet_ids':tweet_ids,
'followers':node['Followers'],
'name':node['name'],
'edges':edges.to_dict('records')
})
yield doc
except ValueError:
pass
actions, errors = helpers.bulk(
client=es,
index='users',
actions=docGenerator(nodeDF,edgeDF)
)
print(errors)
def update_es(process_dir,ip_address='localhost'):
ingest_pov(ip_address,process_dir)
ingest_network(ip_address,process_dir)
ingest_translations(ip_address,process_dir)
ingest_topics(ip_address,process_dir)
if __name__ == '__main__':
project_dir = join(os.getcwd(), os.pardir,os.pardir)
process_dir = join(project_dir, 'data', 'processed')
update_es(process_dir)
``` |
{
"source": "joaopfonseca/tourism_telecom",
"score": 3
} |
#### File: tourism_telecom/preliminary_analysis/word_embedding_test.py
```python
from db_configs import host, database, user, password
import psycopg2 as pg
import pandas as pd
from gensim.models.word2vec import Word2Vec, LineSentence
# connect to the database
connection = pg.connect(host=host, dbname=database, user=user, password=password)
# import a table:
sequences_table = pd.read_sql_query("SELECT * FROM telecom_pt.sequences_table WHERE concelhos_sequence!=''",con=connection)
def sequence_list(val):
return val.split(',')
def length_of_sequences(val):
return len(val)
sequences_table['sequences_listed'] = sequences_table['concelhos_sequence'].apply(sequence_list)
sequences_table['length_of_sequences'] = sequences_table['sequences_listed'].apply(length_of_sequences)
# create vectors
model = Word2Vec(sequences_table[sequences_table['length_of_sequences']>2]['sequences_listed'], size=2, window=1, min_count=1, workers=4)
sentences = model.predict_output_word(['<NAME>', 'Vasco da Gama aquarium'], topn=5)
#sentences2 = LineSentence('Chelas', max_sentence_length=10, limit=None)
print(sequences_table[sequences_table['length_of_sequences']>2]['sequences_listed'])
print('all roamers: ',len(sequences_table['sequences_listed']),', after filters: ',len(sequences_table[sequences_table['length_of_sequences']>2]['sequences_listed']) )
print(sentences)
```
#### File: tourism_telecom/src/events_preprocess.py
```python
import pandas as pd
import numpy as np
from config import _export_dir
def preprocess(df, df2, df3, new_node_ids):
def conditional_float_to_string(param):
if np.isnan(param):
new_param = 'none'
else:
new_param=str(int(param))
return new_param
def get_mcc(param):
return param[:3]
# df = pd.read_csv(_data_dir+'union_all.csv')
df['user_id'] = df['union_all.client_id']
df['date_time'] = df['union_all.enddate_']
df['cellid_df1'] = df['union_all.cellid']
df['lac_'] = df['union_all.lac_']
df['protocol_df1'] = df['union_all.protocol_']
df['edited_mcc'] = df['union_all.mccmnc'].astype(str).apply(get_mcc)
df['tac'] = df['union_all.tac']
df['datekey'] = df['union_all.datekey']
df['real_cellid'] = df['union_all.cellid'].apply(conditional_float_to_string) + df['lac_'].apply(conditional_float_to_string) + df['union_all.protocol_'].apply(conditional_float_to_string)
df['real_cellid'] = df['real_cellid'].astype(str)
# df3 = pd.read_csv(_data_dir+'mccmmc_optimized_new.csv')
new_keys3 = []
for key in df3.keys():
new_key= key.replace('mccmnc_optimized_new.', '')
new_keys3.append(new_key)
df3.columns = new_keys3
def add_zeros(param):
if (param != 'none') and int(param)<10:
param = '0'+param
return param
df3['edited_mcc'] = df3['mcc'].astype(str)
df3 = df3[df3['country'] != 'Guam'].drop(['network','mnc', 'mnc_', 'mcc_'], axis=1).drop_duplicates()
table_merge1 = pd.merge(df, df2, on='real_cellid', how='left')
df_final= pd.merge( table_merge1, df3, on='edited_mcc', how='left')
df_final['user_origin'] = df_final['country']
df_final['cell_id'] = df_final['real_cellid']
df_final['cellid2'] = df_final['real_cellid']
dataframe = df_final[['user_id','date_time','user_origin','cell_id', 'cellid2']]#'latitude','longitude', 'cellid2']]
new_node_ids['latitude'] = new_node_ids['lat']
new_node_ids['longitude'] = new_node_ids['lon']
refs = new_node_ids[['cellid', 'cellid2', 'longitude', 'latitude']]
df_merged = pd.merge( dataframe, refs, on='cellid2', how='left' )
df_merged = df_merged[df_merged['cellid'].notnull()]
df_merged['date'] = pd.to_datetime(df_merged['date_time']).dt.date
df_merged['rounded_time'] = pd.to_datetime(df_merged['date_time']).dt.hour
df_merged['time'] = pd.to_datetime(df_merged['date_time']).dt.time
events = pd.DataFrame(df_merged.groupby('user_id', as_index=False).size().reset_index())
events.columns = ['user_id', 'total_events']
df_merged = events.merge(df_merged, on='user_id')
activity=df_merged[['user_id','date']].drop_duplicates()
days_active = activity.groupby('user_id', as_index=False)['date'].count()
days_active.columns = ['user_id', 'days_active']
df_merged = df_merged.merge(days_active, on='user_id')
df_merged['cell_id'] = df_merged['cellid']
df_merged = df_merged[['user_id', 'cell_id', 'total_events', 'date_time', 'user_origin',
'latitude', 'longitude', 'date', 'rounded_time',
'time', 'days_active']]
# # filter out bots
# df['is_bot'] = (df['total_calls'] / df['days_active']) > self.params.bot_threshold
# df = df[df['is_bot'] == False]
#
# # filter out customers who made less than N calls
# calls_in_florence = df.groupby('user_id', as_index=False)['total_calls'].count()
# users_to_keep = list(calls_in_florence[calls_in_florence['total_calls'] >= self.params.minimum_total_calls]['user_id'])
# df = df[df['user_id'].isin(users_to_keep)]
#df_merged.to_csv(_export_dir+'CDR_events_preprocessed.csv', index=False)
return df_merged
#df2 = pd.read_csv(_data_dir+'site_lookup_outubro.csv')
def preprocess_cells(df):
def conditional_float_to_string(param):
if np.isnan(param):
new_param = 'none'
else:
new_param=str(int(param))
return new_param
new_keys2 = []
for key in df.keys():
new_key= key.replace('site_lookup_outubro.site_lookup_concelhos', '')
new_keys2.append(new_key)
df.columns = new_keys2
df['longitude'] = df['centroide_longitude']
df['latitude'] = df['centroide_latitude']
df['cellid_df2'] = df['ci']
df['real_cellid'] = df['cellid_df2'].apply(conditional_float_to_string) + df['lac'].apply(conditional_float_to_string) + df['protocol_'].apply(conditional_float_to_string)
df['real_cellid'] = df['real_cellid'].astype(str)
df['cell_id'] = df['real_cellid']
return df[['cell_id','longitude','latitude', 'name_site' ,'concelho', 'real_cellid']]
```
#### File: src/legacy/telecom.py
```python
import pandas as pd
import numpy as np
import plotly
from config import _data_dir
import logging.config
import logging
import credentials
plotly.tools.set_credentials_file(username=credentials.plotlyu, api_key=credentials.plotlykey)
class CDRAnalysis:
"""CDR Analysis"""
def __init__(self, params, data_feature_extracted):
self.params = params
self.data_feature_extracted = data_feature_extracted
self.cdr_main(self.data_feature_extracted)
@staticmethod
def cdr_main(df):
""" Exploratory analysis of CDR data """
# Create a frequency count of how many average daily calls each customer makes
daily_calls = df.groupby(['user_id', 'date'], as_index=True).count()
# Create a frequency count of how many average hourly calls each customer makes
hourly_calls = df.groupby(['user_id', 'time'], as_index=True).count()
# Count calls per customer
calls_per_cust = df.groupby(['user_id'], as_index=False).count()
# Total estimated daily presences: Italians & Foreigners
# Make a stacked bar plot day by day through summer
# Estimated daily presence of foreign visitors
# Estimated daily presence of Italian visitors
# Duration of stay of foreign visitors
# Duration of stay of Italian visitors
return None
```
#### File: tourism_telecom/src/sna.py
```python
from config import _export_dir, _data_dir
import pandas as pd
import shapely as sp
import shapely.geometry as shp
import json
import math
import geopandas as gpd
import numpy as np
from scipy.spatial import Voronoi
def get_dynamic_edgelist(data):
""" Make an edge list for all of the sequential visits of one site to the next
in a day per user. Each edge is directed. There is a dummy start node to
indicate the transition from being home to the first site visited that day """
data['total_people'] = 1
edges = data.groupby(["user_id", "date_time", "date",
"cell_id"]).sum()["total_people"].to_frame()
edges.reset_index(inplace=True)
# start is the name of the dummy node for edges from home to the first location visited
edges["from"] = 'dummy_start_node'
edges["to"] = edges["cell_id"]
make_link = (edges["user_id"].shift(1) == edges["user_id"]) & \
(edges["date"].shift(1) == edges["date"])
edges["from"][make_link] = edges["cell_id"].shift(1)[make_link]
dynamic_edgelist = edges[["from", "to", "total_people", "date_time"]]
dynamic_edgelist = dynamic_edgelist[dynamic_edgelist['from'] != dynamic_edgelist['to'] ]
return dynamic_edgelist
# =============================================================================
#
# =============================================================================
# radius, lat, lon of area of voronoi
#
def create_voronoi(points):
""" Make a voronoi diagram geometry out of point definitions
:param: points (Geopandas.GeoDataFrame): The centroid points for the voronoi
:return: Geopandas.GeoDataFrame: The polygon geometry for the voronoi diagram
"""
np_points = [np.array([pt.x, pt.y]) for pt in np.array(points.geometry)]
vor = Voronoi(np_points)
lines = [
shp.LineString(vor.vertices[line])
for line in vor.ridge_vertices
if -1 not in line
]
voronoi_poly = sp.ops.polygonize(lines)
crs = {'init': 'epsg:' + str(4326)}
# return gpd.GeoDataFrame(crs=crs, geometry=list(voronoi_poly)) \
# .to_crs(epsg=4326)
geodataframe = gpd.GeoDataFrame(crs=crs, geometry=list(voronoi_poly)) \
.to_crs(epsg=4326)
#geodataframe['centroid'] = geodataframe.centroid
return geodataframe
def create_circle(lat, lon, radius=18, num_points=20):
""" Create the approximation or a geojson circle polygon
:param: lat: the center latitude for the polygon
:param: lon: the center longitude for the polygon
:param: radius (int): the radius of the circle polygon
:param: num_points (int): number of discrete sample points to be generated along the circle
:return: list of lat/lon points defining a somewhat circular polygon
"""
points = []
for k in range(num_points):
angle = math.pi * 2 * k / num_points
dx = radius * math.cos(angle)
dy = radius * math.sin(angle)
new_lat = lat + (180 / math.pi) * (dy / 6378137)
new_lon = lon + (180 / math.pi) * (dx / 6378137) / math.cos(
lat * math.pi / 180)
points.append([round(new_lon, 7), round(new_lat, 7)])
return points
def convert_point_data_to_data_frame(data):
""" Takes a data set with latitude and longitude columns and returns a Geopandas
GeoDataFrame object.
:param: data (Pandas.DataFrame): the lat/lon data to convert to GeoDataFrame
:return: GeoPandas.GeoDataFrame: the contents of the supplied data in the
format of a GeoPandas GeoDataFrame
"""
zipped_data = zip(data['lon'], data['lat'])
geo_points = [shp.Point(xy) for xy in zipped_data]
crs = {'init': 'epsg:' + str(4326)}
return gpd.GeoDataFrame(data, crs=crs, geometry=geo_points) \
.to_crs(epsg=4326)
def generate_artificial_nodes(site_list, radius=30000, lat=38.751296, lon=-9.2180615):
# limiting rangetoarea of interest
weird_circle = create_circle(lat, lon, radius=radius, num_points=20)
weird_circle = shp.Polygon(weird_circle)
#site_list = pd.read_csv(_export_dir+'CDR_cellsites_preprocessed.csv', encoding='utf-8', sep=',', index_col=None, decimal='.')
site_list['lon'] = site_list['longitude']
site_list['lat'] = site_list['latitude']
site_list['cellid'] = site_list['cell_id']
#site_list['real_cellid'] = site_list['cellid']
site_list_clean = site_list[['cellid', 'lon', 'lat']]
site_point_list = convert_point_data_to_data_frame(site_list_clean)
site_point_list['is_area_of_interest'] = site_point_list['geometry'].intersects(weird_circle)
points_of_interest = site_point_list[site_point_list['is_area_of_interest']]
print('Generating Voronoi cells...')
voronoi = create_voronoi(site_point_list)
voronoi['geometry']=voronoi.intersection(weird_circle)
json_data_string = voronoi[['geometry']].to_json()
json_data = json.loads(json_data_string)
with open(_export_dir+'cdr_voronoi_dict.json','w') as json_file:
json.dump(json_data, json_file)
# generating voronoi for new nodes - ARTIFICIAL NODES!
new_nodes = pd.read_csv(_data_dir+'tourist_attractions_lisbon.txt')
new_nodes['lon'] = new_nodes['Longitude']
new_nodes['lat'] = new_nodes['Latitude']
new_nodes['cellid'] = new_nodes['Place Name']
nodes_clean = new_nodes[['cellid', 'lon', 'lat']]
nodes_clean_list = convert_point_data_to_data_frame(nodes_clean)
voronoi2 = create_voronoi(nodes_clean_list) # this needs to be integrated in the table
#voronoi2['voronoi_polygon'] = voronoi2['geometry']
voronoi2['geometry']=voronoi2.intersection(weird_circle) # this is redundant, but it stays to ensure there are no errors
voronoi2['cell_center'] = 'not found'
voronoi2['node_name'] = 'not found'
for number in range(len(nodes_clean_list)):
for cell in range(len(voronoi2)):
if voronoi2['geometry'][cell].contains(nodes_clean_list['geometry'][number]):
voronoi2['cell_center'][cell] = nodes_clean_list['geometry'][number]
voronoi2['node_name'][cell] = nodes_clean_list['cellid'][number]
break
# def convert_to_point(row):
# return shp.Point(row['Latitude'], row['Longitude'])
#
# new_nodes['point'] = new_nodes.apply(convert_to_point, axis=1)
# #voronoi2 = voronoi2.reset_index()
# def identify_node(cell, node_list):
# for node in range(len(node_list)):
# if cell.intersects(node_list['geometry'][node]):
# return node_list['cellid'][node]
# return 'no_match'
#
# voronoi2['node_name'] = voronoi2['geometry'].apply(lambda x: identify_node(x, nodes_clean_list))
def get_info(point, voronoi_diagram, which_info):
for poly in range(len(voronoi_diagram)):
if voronoi_diagram['geometry'][poly].contains(point):
if which_info == 'node_name':
return voronoi_diagram['node_name'][poly]
elif which_info == 'voronoi_polygon':
return voronoi_diagram['geometry'][poly]
elif which_info == 'cell_center':
return voronoi_diagram['cell_center'][poly]
else:
raise Exception('Info doesn\'t exist')
return 'not_in_lisbon'
points_of_interest['cellid2']= points_of_interest['cellid']
points_of_interest['cellid'] = points_of_interest['geometry'].apply(lambda x: get_info(x, voronoi2, 'node_name'))
points_of_interest['voronoi_polygon'] = points_of_interest['geometry'].apply(lambda x: get_info(x, voronoi2, 'voronoi_polygon'))
points_of_interest['cell_center'] = points_of_interest['geometry'].apply(lambda x: get_info(x, voronoi2, 'cell_center'))
points_of_interest = points_of_interest[points_of_interest['cellid']!='not_in_lisbon']\
[['cellid', 'cellid2', 'lon', 'lat', 'geometry', 'voronoi_polygon', 'cell_center']]
# added in june 27, fix for AttributeError: 'str' object has no attribute 'x'
voronoi2= voronoi2[voronoi2['cell_center']!='not found']
##########
json_data_string2 = voronoi2[['geometry']].to_json()
json_data2 = json.loads(json_data_string2)
with open(_export_dir+'new_nodes_voronoi_dict.json','w') as json_file:
json.dump(json_data2, json_file)
return {'tower_cells_to_new_nodes':points_of_interest, 'voronoi_cells':voronoi2}
```
#### File: tourism_telecom/src/test.py
```python
from config import _export_dir, _data_dir
import pandas as pd
import fiona
from shapely.geometry import Point, shape
from sklearn import preprocessing, neighbors
shp_dir = '../pt_regions_shapefiles/districts_shp/'
shp_file = fiona.open(shp_dir+'PRT_adm2.shp')
# import shapefile
regioes = pd.read_csv(shp_dir+'PRT_adm2.csv')
# apply to ID_2
def get_region_shape(value):
return shape(shp_file[value-1]['geometry'])
regioes['geometry'] = regioes['ID_2'].apply(get_region_shape)
regioes = regioes[['NAME_1', 'NAME_2', 'geometry']]
regioes = regioes.rename(index=str ,columns={'NAME_1':'distrito', 'NAME_2':'concelho', 'geometry':'geometry'})
# import the data we want to label
cdr_cell = pd.read_csv(_export_dir+'CDR_cellsites_preprocessed.csv')\
.reset_index()#[['cell_id', 'longitude', 'latitude']].reset_index()
def generate_point(row):
return Point(row['longitude'],row['latitude'])
cdr_cell['point'] = cdr_cell.apply(generate_point, axis=1)
def label_location(row, which):
for num in range(len(regioes)):
check = regioes['geometry'][num].contains(row['point'])
if check:
if which == 'distrito':
return regioes['distrito'][num]
elif which == 'concelho':
return regioes['concelho'][num]
cdr_cell['distrito'] = cdr_cell.apply(lambda x: label_location(x, 'distrito'), axis=1)
cdr_cell['concelho'] = cdr_cell.apply(lambda x: label_location(x, 'concelho'), axis=1)
def knn_classifier(target, n_neighbors):
n_neighbors = 3
X = cdr_cell[cdr_cell[target].notnull()][['latitude','longitude']]
y = cdr_cell[cdr_cell[target].notnull()][target]
clf = neighbors.KNeighborsClassifier(n_neighbors)
clf.fit(X, y)
knn_prediction = clf.predict(cdr_cell[cdr_cell[target].isnull()][['latitude','longitude']])
cdr_cell.loc[cdr_cell[target].isnull(), target] = knn_prediction
knn_classifier('concelho', 3)
knn_classifier('distrito', 3)
cdr_cell[['cell_id', 'distrito', 'concelho']].to_csv(_export_dir+'region_labels.csv', index=False)
``` |
{
"source": "joaophi/core",
"score": 2
} |
#### File: components/ovo_energy/sensor.py
```python
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from datetime import timedelta
from typing import Final
from ovoenergy import OVODailyUsage
from ovoenergy.ovoenergy import OVOEnergy
from homeassistant.components.sensor import (
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_MONETARY,
DEVICE_CLASS_TIMESTAMP,
ENERGY_KILO_WATT_HOUR,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.util import dt as dt_util
from . import OVOEnergyDeviceEntity
from .const import DATA_CLIENT, DATA_COORDINATOR, DOMAIN
SCAN_INTERVAL = timedelta(seconds=300)
PARALLEL_UPDATES = 4
KEY_LAST_ELECTRICITY_COST: Final = "last_electricity_cost"
KEY_LAST_GAS_COST: Final = "last_gas_cost"
@dataclass
class OVOEnergySensorEntityDescription(SensorEntityDescription):
"""Class describing System Bridge sensor entities."""
value: Callable[[OVODailyUsage], StateType] = round
SENSOR_TYPES_ELECTRICITY: tuple[OVOEnergySensorEntityDescription, ...] = (
OVOEnergySensorEntityDescription(
key="last_electricity_reading",
name="OVO Last Electricity Reading",
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value=lambda usage: usage.electricity[-1].consumption,
),
OVOEnergySensorEntityDescription(
key=KEY_LAST_ELECTRICITY_COST,
name="OVO Last Electricity Cost",
device_class=DEVICE_CLASS_MONETARY,
state_class=STATE_CLASS_TOTAL_INCREASING,
value=lambda usage: usage.electricity[-1].consumption,
),
OVOEnergySensorEntityDescription(
key="last_electricity_start_time",
name="OVO Last Electricity Start Time",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_TIMESTAMP,
value=lambda usage: dt_util.as_utc(usage.electricity[-1].interval.start),
),
OVOEnergySensorEntityDescription(
key="last_electricity_end_time",
name="OVO Last Electricity End Time",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_TIMESTAMP,
value=lambda usage: dt_util.as_utc(usage.electricity[-1].interval.end),
),
)
SENSOR_TYPES_GAS: tuple[OVOEnergySensorEntityDescription, ...] = (
OVOEnergySensorEntityDescription(
key="last_gas_reading",
name="OVO Last Gas Reading",
device_class=DEVICE_CLASS_ENERGY,
state_class=STATE_CLASS_TOTAL_INCREASING,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
icon="mdi:gas-cylinder",
value=lambda usage: usage.gas[-1].consumption,
),
OVOEnergySensorEntityDescription(
key=KEY_LAST_GAS_COST,
name="OVO Last Gas Cost",
device_class=DEVICE_CLASS_MONETARY,
state_class=STATE_CLASS_TOTAL_INCREASING,
icon="mdi:cash-multiple",
value=lambda usage: usage.gas[-1].consumption,
),
OVOEnergySensorEntityDescription(
key="last_gas_start_time",
name="OVO Last Gas Start Time",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_TIMESTAMP,
value=lambda usage: dt_util.as_utc(usage.gas[-1].interval.start),
),
OVOEnergySensorEntityDescription(
key="last_gas_end_time",
name="OVO Last Gas End Time",
entity_registry_enabled_default=False,
device_class=DEVICE_CLASS_TIMESTAMP,
value=lambda usage: dt_util.as_utc(usage.gas[-1].interval.end),
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up OVO Energy sensor based on a config entry."""
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
client: OVOEnergy = hass.data[DOMAIN][entry.entry_id][DATA_CLIENT]
entities = []
if coordinator.data:
if coordinator.data.electricity:
for description in SENSOR_TYPES_ELECTRICITY:
if description.key == KEY_LAST_ELECTRICITY_COST:
description.native_unit_of_measurement = (
coordinator.data.electricity[-1].cost.currency_unit
)
entities.append(OVOEnergySensor(coordinator, description, client))
if coordinator.data.gas:
for description in SENSOR_TYPES_GAS:
if description.key == KEY_LAST_GAS_COST:
description.native_unit_of_measurement = coordinator.data.gas[
-1
].cost.currency_unit
entities.append(OVOEnergySensor(coordinator, description, client))
async_add_entities(entities, True)
class OVOEnergySensor(OVOEnergyDeviceEntity, SensorEntity):
"""Define a OVO Energy sensor."""
coordinator: DataUpdateCoordinator
entity_description: OVOEnergySensorEntityDescription
def __init__(
self,
coordinator: DataUpdateCoordinator,
description: OVOEnergySensorEntityDescription,
client: OVOEnergy,
) -> None:
"""Initialize."""
super().__init__(
coordinator,
client,
)
self._attr_unique_id = f"{DOMAIN}_{client.account_id}_{description.key}"
self.entity_description = description
@property
def native_value(self) -> StateType:
"""Return the state."""
usage: OVODailyUsage = self.coordinator.data
return self.entity_description.value(usage)
``` |
{
"source": "joaopio/gmail-2-insta",
"score": 3
} |
#### File: gmail-2-insta/tools/g2ilog.py
```python
__doc__ = \
'''
'''
__version__ = '0.1'
__authors__ = [
"Version 0.1: <NAME> <<EMAIL>>"
]
import logging
class G2ILogger():
#Class Constructor
def __init__(self, loglevel=logging.INFO):
#Logging formats
self.FORMAT_CLEAN = "%(levelname)7s: %(message)s"
self.FORMAT_MIDDLE = "%(levelname)7s: %(funcName)20s() - %(message)s"
self.FORMAT_DETAILED = "[%(filename)10s:%(lineno)4s - %(funcName)15s()] %(levelname)7s: %(message)s"
#Logging definition
self.log = logging.getLogger('NamfLogger')
self.log_handler = logging.StreamHandler() # Handler for the logger
self.log_handler.setFormatter(logging.Formatter(self.FORMAT_CLEAN))
self.log.addHandler(self.log_handler)
self.log.setLevel(loglevel)
def set_format(self, format="%(levelname)8s: %(message)s"):
#Set the format
self.log_handler.setFormatter(logging.Formatter(format))
def set_level(self, level):
#Set warning level
self.log.setLevel(level)
logger = G2ILogger()
``` |
{
"source": "joaopjt/sublime_docs",
"score": 2
} |
#### File: joaopjt/sublime_docs/Docs.py
```python
import threading
import os
import json
import sublime
import sublime_plugin
PACKAGES = []
BASEPATH = '/'
class DocsCommand(sublime_plugin.WindowCommand):
def verifyOpenedFolder(self):
if not self.window.folders():
sublime.message_dialog('No project folder opened.')
return False
return True
def getConfigPath(self):
try:
with open(self.window.folders()[0] + '\\.docsconfig', 'r') as config:
data = config.read()
data = json.loads(data)
data = data['base_path']
except:
data = '/'
return data
def getPackages(self):
packages = []
try:
with open(self.window.folders()[0] + BASEPATH + 'package.json', 'r') as packageFile:
data = packageFile.read()
except:
sublime.message_dialog('Unable to find the \'package.json\' file.')
return
data = json.loads(data)
if data.get('devDependencies'):
for package in data['devDependencies']:
packages.append(package)
if data.get('dependencies'):
for package in data['dependencies']:
packages.append(package)
return packages
def display_list(self, packages):
self.packages = packages
self.window.show_quick_panel(packages, self.on_done)
def on_done(self, index):
path = self.window.folders()[0] + BASEPATH + 'node_modules/' + PACKAGES[index] + '/README.md'
if index == -1:
return
if os.path.exists(path):
self.window.open_file(path)
else:
sublime.message_dialog('README from \'' + PACKAGES[index] + '\' not found.')
def run(self):
global PACKAGES
global BASEPATH
if self.verifyOpenedFolder():
BASEPATH = self.getConfigPath()
PACKAGES = self.getPackages()
self.display_list(PACKAGES)
``` |
{
"source": "joaopm33/funds_py",
"score": 3
} |
#### File: funds_py/fundspy/fundspy.py
```python
import os
import os.path
import zipfile
import datetime
import calendar
import sqlite3
#packages used to download data
import requests
from urllib.request import HTTPError
from yahoofinancials import YahooFinancials
#packages used to manipulate data
import pandas as pd
from pandas.io.sql import DatabaseError
import numpy as np
#other packages
from tqdm import tqdm
from workalendar.america import Brazil
from dateutil.relativedelta import relativedelta
def cvm_informes (year: int, mth: int) -> pd.DataFrame:
"""Downloads the daily report (informe diario) from CVM for a given month and year\n
<b>Parameters:</b>\n
year (int): The year of the report the function should download\n
mth (int): The month of the report the function should download\n
<b>Returns:</b>\n
pd.DataFrame: Pandas dataframe with the report for the given month and year. If the year is previous to 2017, will contain data regarding the whole year
"""
if int(year) >= 2017: #uses download process from reports after the year of 2017
try:
mth = f"{mth:02d}"
year = str(year)
#creates url using the parameters provided to the function
url = 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/inf_diario_fi_'+year+mth+'.csv'
#reads the csv returned by the link
cotas = pd.read_csv(url, sep =';')
cotas['DT_COMPTC'] = pd.to_datetime(cotas['DT_COMPTC']) #casts date column to datetime
try:
#removes column present in only a few reports to avoid inconsistency when making the union of reports
cotas.drop(columns = ['TP_FUNDO'], inplace = True)
except KeyError:
pass
return cotas
except HTTPError:
print('theres no report for this date yet!.\n')
if int(year) < 2017:
try:
year = str(year)
url = 'http://dados.cvm.gov.br/dados/FI/DOC/INF_DIARIO/DADOS/HIST/inf_diario_fi_' + year + '.zip'
#sends request to the url
r = requests.get(url, stream=True, allow_redirects=True)
with open('informe' + year + '.zip', 'wb') as fd: #writes the .zip file downloaded
fd.write(r.content)
zip_inf = zipfile.ZipFile('informe' + year + '.zip') #opens the .zip file
#le os arquivos csv dentro do arquivo zip
informes = [pd.read_csv(zip_inf.open(f), sep=";") for f in zip_inf.namelist()]
cotas = pd.concat(informes,ignore_index=True)
cotas['DT_COMPTC'] = pd.to_datetime(cotas['DT_COMPTC']) #casts date column to datetime
zip_inf.close() #fecha o arquivo zip
os.remove('informe' + year + '.zip') #deletes .zip file
return cotas
except Exception as E:
print(E)
def start_db(db_dir: str = 'investments_database.db', start_year: int = 2005, target_funds: list = []):
"""Starts a SQLite database with 3 tables: daily_quotas (funds data), ibov_returns (ibovespa index data) and selic_rates (the base interest rate for the brazilian economy).\n
<b>Parameters:</b>\n
db_dir (str): The path of the dabatabse file to be created. Defaults to 'investments_database.db', creating the file in the current working directory.\n
start_year (int): Opitional (Defaults to 2005). Starting year for the data collection. . Can be use to reduce the size of the database.\n
target_funds (list): Opitional (Defaults to []). List of target funds CNPJs. Only funds with CNPJs contained in this list will be included in the database. Can be used to radically reduce the size of the database. If none is specified, all funds will be included.\n
<b>Returns:</b>\n
Theres no return from the function.
"""
##STEP 1:
#starts the new database
print (f'creating SQLite database: {db_dir} \n')
con = sqlite3.connect(db_dir)
##STEP 2:
#downloads each report in the cvm website and pushes it to the sql database daily_quotas table
print('downloading daily reports from the CVM website... \n')
#for each year between 2017 and now
for year in tqdm(range(start_year, datetime.date.today().year + 1), position = 0, leave=True):
for mth in range(1, 13): #for each month
#loop structure for years equal or after 2017
if year>=2017:
informe = cvm_informes(str(year), mth)
try:
if target_funds: #if the target funds list is not empty, uses it to filter the result set
informe = informe[informe.CNPJ_FUNDO.isin(target_funds)]
#appends information to the sql database
informe.to_sql('daily_quotas', con , if_exists = 'append', index=False)
except AttributeError:
pass
elif year<2017: #loop structure to handle years before 2017 (they have a different file structure)
#only executes the download function once every year to avoid duplicates (unique file for each year)
if mth == 12:
informe = cvm_informes(str(year), mth)
try:
if target_funds: #if the target funds list is not empty, uses it to filter the result set
informe = informe[informe.CNPJ_FUNDO.isin(target_funds)]
#appends information to the sql database
informe.to_sql('daily_quotas', con , if_exists = 'append', index=False)
except AttributeError:
pass
#pushes target funds to sql for use when updating the database
if target_funds:
target_df = pd.DataFrame({'targets':target_funds})
target_df.to_sql('target_funds', con , index=False)
##STEP 3:
#creates index in the daily_quotas table to make future select queries faster.
#tradeoff: The updating proceesses of the database will be slower.
print('creating sql index on "CNPJ_FUNDO", "DT_COMPTC" ... \n')
index = '''
CREATE INDEX "cnpj_date" ON "daily_quotas" (
"CNPJ_FUNDO" ASC,
"DT_COMPTC" ASC
)'''
cursor = con.cursor()
cursor.execute(index)
con.commit()
cursor.close()
##STEP 4:
#downloads cadastral information from CVM of the fundos and pushes it to the database
print('downloading cadastral information from cvm...\n')
info_cad = pd.read_csv('http://dados.cvm.gov.br/dados/FI/CAD/DADOS/cad_fi.csv', sep = ';', encoding='latin1',
dtype = {'RENTAB_FUNDO': object,'FUNDO_EXCLUSIVO': object, 'TRIB_LPRAZO': object, 'ENTID_INVEST': object,
'INF_TAXA_PERFM': object, 'INF_TAXA_ADM': object, 'DIRETOR': object, 'CNPJ_CONTROLADOR': object,
'CONTROLADOR': object}
)
if target_funds:
info_cad = info_cad[info_cad.CNPJ_FUNDO.isin(target_funds)]
info_cad.to_sql('info_cadastral_funds', con, index=False)
##STEP 5:
#downloads daily ibovespa prices from investing.com and pushes it to the database
print('downloading ibovespa index prices from investing.com ...\n')
today = (datetime.date.today() + datetime.timedelta(1)).strftime('%Y-%m-%d')
ibov = pd.DataFrame(YahooFinancials('^BVSP').get_historical_price_data('1990-09-15', today, 'daily')['^BVSP']['prices'])
ibov = ibov.drop(columns=['date', 'close']).rename(columns={'formatted_date':'date', 'adjclose':'close'}).iloc[:,[5,0,1,2,3,4]]
ibov['date'] = pd.to_datetime(ibov['date'])
ibov.columns = [i.capitalize() for i in ibov.columns] #capitalizes columns to keep consistency with previous format (investpy)
ibov.to_sql('ibov_returns', con, index=False)
##STEP 6:
#downloads daily selic returns (basic interest rate of the brazilian economy)
#from the brazillian central bank and pushes it to the database
print('downloading selic rates from the Brazilian Central Bank website...\n')
selic = pd.read_json('http://api.bcb.gov.br/dados/serie/bcdata.sgs.{}/dados?formato=json'.format(11))
selic['data'] = pd.to_datetime(selic['data'], format = '%d/%m/%Y')
selic['valor'] = selic['valor']/100 #calculates decimal rate from the percentual value
#calculates asset "price" considering day 0 price as 1
selic.loc[0,'price'] = 1 * (1 + selic.loc[0,'valor'])
for i in range(1, len(selic)):
selic.loc[i, 'price'] = selic.loc[i-1, 'price'] * (1 + selic.loc[i,'valor'])
selic.rename(columns = {'data':'date', 'valor':'rate'}, inplace = True)
selic.to_sql('selic_rates', con , index=False)
##STEP 7:
#creates a table with a log of the execution timestamps of the script
print('creating the log table...\n')
update_log = pd.DataFrame({'date':[datetime.datetime.now()], 'log':[1]})
update_log.to_sql('update_log', con, if_exists = 'append', index=False)
##STEP 8
#closes the connection with the database
con.close()
print('connection with the database closed! \n')
print(f'Success: database created in {db_dir} !\n')
def update_db(db_dir: str = r'investments_database.db'):
"""Updates the database.\n
<b>Parameters:</b>\n
db_dir (str): The path of the dabatabse file to be updated. Defaults to 'investments_database.db'.\n
<b>Returns:</b>\n
Theres no return from the function.
"""
##STEP 1
#connects to the database
print(f'connected with the database {db_dir}\n')
con = sqlite3.connect(db_dir)
##STEP 2
#calculates relevant date limits to the update process
Cal=Brazil() #inicializes the brazillian calendar
today = datetime.date.today()
#queries the last update from the log table
last_update = pd.to_datetime(pd.read_sql('select MAX(date) from update_log', con).iloc[0,0])
last_quota = Cal.sub_working_days(last_update, 2) #date of the last published cvm repport
num_months = (today.year - last_quota.year) * 12 + (today.month - last_quota.month) + 1
##STEP 3
#delete information that will be updated from the database tables
print('deleting redundant data from the database... \n')
tables = {'daily_quotas' : ['DT_COMPTC',last_quota.strftime("%Y-%m-01")],
'ibov_returns' : ['Date',last_update.strftime("%Y-%m-%d")]}
cursor = con.cursor()
#sql delete statement to the database
cursor.execute('delete from daily_quotas where DT_COMPTC >= :date', {'date': last_quota.strftime("%Y-%m-01")})
cursor.execute('delete from ibov_returns where Date >= :date', {'date': last_update.strftime("%Y-%m-%d")})
con.commit()
cursor.close()
##STEP 4
#Pulls new data from CVM, investpy and the brazilian central bank
#and pushes it to the database
try:#tries to read targets funds if they were specified when starting the database
target_funds = pd.read_sql('select targets from target_funds', con).targets.to_list()
except DatabaseError:
target_funds = []
print('downloading new daily reports from the CVM website...\n')
# downloads the daily cvm repport for each month between the last update and today
for m in range(num_months+1):
data_alvo = last_quota + relativedelta(months=+m)
informe = cvm_informes(data_alvo.year, data_alvo.month)
if target_funds:
informe = informe[informe.CNPJ_FUNDO.isin(target_funds)]
try:
informe.to_sql('daily_quotas', con , if_exists = 'append', index=False)
except AttributeError:
pass
#downloads cadastral information from CVM of the fundos and pushes it to the database
print('downloading updated cadastral information from cvm...\n')
info_cad = pd.read_csv('http://dados.cvm.gov.br/dados/FI/CAD/DADOS/cad_fi.csv', sep = ';', encoding='latin1',
dtype = {'RENTAB_FUNDO': object,'FUNDO_EXCLUSIVO': object, 'TRIB_LPRAZO': object, 'ENTID_INVEST': object,
'INF_TAXA_PERFM': object, 'INF_TAXA_ADM': object, 'DIRETOR': object, 'CNPJ_CONTROLADOR': object,
'CONTROLADOR': object}
)
if target_funds: #filters target funds if they were specified when building the database.
info_cad = info_cad[info_cad.CNPJ_FUNDO.isin(target_funds)]
info_cad.to_sql('info_cadastral_funds', con, if_exists='replace', index=False)
#updates daily interest returns (selic)
print('updating selic rates...\n')
selic = pd.read_json('http://api.bcb.gov.br/dados/serie/bcdata.sgs.{}/dados?formato=json'.format(11))
selic['data'] = pd.to_datetime(selic['data'], format = '%d/%m/%Y')
selic['valor'] = selic['valor']/100 #calculates decimal rate from the percentual value
#calculates asset "price" considering day 0 price as 1
selic.loc[0,'price'] = 1 * (1 + selic.loc[0,'valor'])
for i in range(1, len(selic)):
selic.loc[i, 'price'] = selic.loc[i-1, 'price'] * (1 + selic.loc[i,'valor'])
selic.rename(columns = {'data':'date', 'valor':'rate'}, inplace = True)
#filters only new data
selic = selic[selic.date>=(last_update + datetime.timedelta(-1))]
selic.to_sql('selic_rates', con , if_exists = 'append', index=False)
#updates ibovespa data
print('updating ibovespa returns...\n')
today = (datetime.date.today() + datetime.timedelta(1)).strftime('%Y-%m-%d')
ibov = pd.DataFrame(YahooFinancials('^BVSP').get_historical_price_data(last_update.strftime('%Y-%m-%d'), today, 'daily')['^BVSP']['prices'])
ibov = ibov.drop(columns=['date', 'close']).rename(columns={'formatted_date':'date', 'adjclose':'close'}).iloc[:,[5,0,1,2,3,4]]
ibov['date'] = pd.to_datetime(ibov['date'])
ibov.columns = [i.capitalize() for i in ibov.columns] #capitalizes columns to keep consistency with previous format (investpy)
ibov.to_sql('ibov_returns', con , if_exists = 'append', index=False)
##STEP 5
#updates the log in the database
print('updating the log...\n')
update_log = pd.DataFrame({'date':[datetime.datetime.now()], 'log':[1]})
update_log.to_sql('update_log', con, if_exists = 'append', index=False)
##STEP 6
#closes the connection with the database
con.close()
print('connection with the database closed!\n')
print(f'database {db_dir} updated!\n')
def returns(df: pd.DataFrame, group: str = 'CNPJ_FUNDO', values: list = ['VL_QUOTA'], rolling: bool = False, window_size: int = 1) -> pd.DataFrame:
"""Calculates the % returns for the given assets both in rolling windows or for the full available period (you also get the CAGR in this case).\n
<b>Parameters</b>:\n
df (pd.DataFrame): Pandas dataframe with the needed columns.\n
group (str): name of the column in the dataframe used to group values (example: 'stock_ticker' or 'fund_code').\n
values (list): names of the columns in the dataframe wich contains the asset and its benchmark prices (Example: ['asset_price', 'index price']).\n
rolling (bool): True or False. Indicates if the function will return total returns for each asset or rolling window returns.\n
window_size: (int): Default = 1. Only useful if rolling = True. Defines the size of the rolling window wich the returns will be calculated over.\n
<b>Returns:</b>\n
pd.DataFrame: If rolling = True: Pandas dataframe with total % returns for the assets. If rolling = False: The original pandas dataframe with added columns for the % returns in the rolling windows.
"""
if not rolling:
window_size = 1
#garantees that the values are positive, once division by zero returns infinite
returns = df.copy(deep=True)
for col in values:
returns = returns[returns[col]>0]
returns.loc[:, values] = returns.loc[:, values].fillna(method = 'backfill')
#calculates the percentual change in the rolling windows specified for each group
returns = returns.groupby(group, sort = False, as_index = True)[values].apply(lambda x: x.pct_change(window_size))
#renames the columns
col_names = [(value + '_return_' + str(window_size) + 'd') for value in values]
returns.columns = col_names
#if the parameter rolling = False, returns the original data with the added rolling returns
if rolling:
df2 = df.merge(returns, how='left', left_index=True, right_index=True)
return df2
#if the parameter rolling = True, returns the total compound returns in the period, the number of days
# and the Compound Annual Growth Rate (CAGR)
if not rolling:
returns = df[[group]].merge(returns, left_index = True, right_index = True)
#calculates the compound returns
returns = returns.groupby(group, sort = False, as_index = True).apply(lambda x: np.prod(1+x) - 1)
#calculates the number of days in the period
n_observations = df.groupby(group, sort = False, as_index = True)[values[0]].count()
returns = returns.merge(n_observations, left_index = True, right_index = True)
#renames the columns in the result set
col_names = [(value + '_cum_return') for value in values]
col_names.append('days')
returns.columns = col_names
#calculates the Compound Annual Growth Rate (CAGR)
values = col_names[:-1]
col_names = [i.replace('_cum_return', '_cagr') for i in values]
returns[col_names] = (returns.dropna()
.loc[:,values]
.apply(lambda x: ((x + 1)**(252/returns.days))-1))
return returns
raise Exception("Wrong Parameter: rolling can only be True or False.")
def cum_returns(df: pd.DataFrame, group: str = 'CNPJ_FUNDO', values: list = ['VL_QUOTA']) -> pd.DataFrame:
"""Calculates the cumulative % returns for the given assets.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed columns.\n
group (str): name of the column in the dataframe used to group values (example: 'stock_ticker' or 'fund_code').\n
values (list): names of the columns in the dataframe wich contains the asset and its benchmark prices (Example: ['asset_price', 'index price']).\n
<b>Returns:</b>\n
pd.DataFrame: A pandas dataframe with the cumulative % returns for each asset.
"""
returns_df = returns(df, group = group, values = values, rolling=True) #calculates the daily returns
#calculates the cumulative returns in each day for each group
cum_returns = returns_df.groupby(group)[[value + '_return_1d' for value in values]].expanding().apply(lambda x: np.prod(x+1)-1)
#renames the columns
cum_returns.columns = [i + '_cum_return' for i in values]
cum_returns.reset_index(level = 0, inplace = True)
cum_returns = returns_df.merge(cum_returns, how = 'right', on = group, left_index = True, right_index = True)
return cum_returns
def volatility(df: pd.DataFrame, group: str = 'CNPJ_FUNDO', values: list = ['VL_QUOTA_return_1d'], rolling: bool = False ,returns_frequency: int = 1, window_size: int = 21) -> pd.DataFrame:
"""Calculates the annualized volatillity (standard deviation of returns with degree of freedom = 0) for givens assets returns both in rolling windows or for the full available period.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed data.\n
group (str): name of the column in the dataframe used to group values. Example: 'stock_ticker' or 'fund_code'.\n
values (list): names of the columns in the dataframe wich contains the asset and its benchmark returns. Example: ['asset_price', 'index price']. \n
rolling (bool): True or False. Indicates if the function will return total volatility for each asset or rolling window volatility.\n
returns_frequency: (int): Default = 1. Indicates the frequency in days of the given returns. Should be in tradable days (252 days a year, 21 a month, 5 a week for stocks). This number is used to anualize the volatility.\n
window_size: (int): Default = 252. Only useful if rolling = True. Defines the size of the rolling window wich the volatility will be calculated over.\n
<b>Returns:</b>\n
pd.DataFrame: If rolling = False: Pandas dataframe with total volatility for the assets. If rolling = True: The original pandas dataframe with added columns for the volatility in the rolling windows.
"""
if not rolling:
vol = df.copy(deep=True)
for col in values:
vol = df[df[col].notnull()]
vol = vol.groupby(group)[values].std(ddof=0)
#renames the columns
col_names = [(value + '_vol') for value in values]
vol.columns = col_names
#annualizes the volatility
vol[col_names]= vol[col_names].apply(lambda x : x *((252/returns_frequency)**0.5))
return vol
if rolling:
vol = df.copy(deep=True)
for col in values:
vol = df[df[col].notnull()]
vol = (vol.groupby(group)[values]
.rolling(window_size)
.std(ddof=0) #standards deviation in the rolling period
.reset_index(level = 0)
)
#renames the columns
col_names = [(value + '_vol_' + str(window_size) + 'rw') for value in values]
col_names.insert(0, group)
vol.columns = col_names
#annualizes the volatility
col_names.remove(group)
vol[col_names]= vol[col_names].apply(lambda x : x *((252/returns_frequency)**0.5))
df2 = df.merge(vol.drop(columns = group),left_index=True,right_index=True)
return df2
raise Exception("Wrong Parameter: rolling can only be True or False.")
def drawdown(df: pd.DataFrame, group: str = 'CNPJ_FUNDO', values: list = ['VL_QUOTA'])-> pd.DataFrame:
"""Calculates the drawdown (the % the asset is down from its all-time-high) for givens assets.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed data.\n
group (str): name of the column in the dataframe used to group values. Example: 'stock_ticker' or 'fund_code'.\n
values (list): names of the columns in the dataframe wich contains the asset and its benchmark prices. Example: ['asset_price', 'index price'].\n
<b>Returns:</b>\n
pd.DataFrame: The original pandas dataframe with added columns for the all time high and drawdown of the given assets.
"""
df2 = df.copy(deep = True)
for value in values:
col = 'cum_max_'+ value
df2[col] = df2.groupby([group])[[value]].cummax().to_numpy()
df2[('drawdown_'+ value)] = (df2[value]/df2[col])-1
return df2
def corr_benchmark(df: pd.DataFrame, asset_returns: str, index_returns: str, group: str = 'CNPJ_FUNDO', rolling: bool = False, window_size: int = 252) -> pd.DataFrame:
"""Calculates the correlation between assets and a given benchmark both in rolling windows or for the full available period.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed data.\n
group (str): name of the column in the dataframe used to group values. Example: 'stock_ticker' or 'fund_code'.\n
asset_returns (str): name of the column in the dataframe with the assets returns.\n
index_returns (str): name of the column in the dataframe with the benchmark returns.\n
rolling (bool): True or False. Indicates if the function will return total correlation for each asset or rolling window correlations.\n
window_size: (int): Default = 252. Only useful if rolling = True. Defines the size of the rolling window wich the volatility will be calculated over.\n
<b>Returns:</b>\n
pd.DataFrame: If rolling = False: Pandas dataframe with total correlation for the assets and their benchmarks. If rolling = True: The original pandas dataframe with an added column for the correlation in the rolling windows.
"""
if not rolling:
#calculates the correlation between assests returns for the whole period
corr = df[df[asset_returns].notnull()].groupby([group])[[asset_returns,index_returns]].corr()
corr = corr.xs(index_returns,level = 1, drop_level=False)
corr = corr.reset_index(level = 1, drop = True)
corr = corr.drop(columns=[index_returns])
corr.columns=['correlation_benchmark']
return corr
if rolling:
#calculates the correlation between the assests returns across rolling windows
corr = (df[df[asset_returns].notnull()].groupby(group)[[asset_returns,index_returns]]
.rolling(window_size)
.corr()
.xs(index_returns,level = 2, drop_level=True) #drops reduntant level of the corr matrix
.reset_index(level = 0)
.drop(columns=[index_returns])
.rename(columns = {asset_returns:'correlation_benchmark'})
)
df2 = df.merge(corr.drop(columns = [group]),left_index=True,right_index=True)
return df2
raise Exception("Wrong Parameter: rolling can only be True or False")
def beta(df: pd.DataFrame, asset_vol: str, bench_vol: str, correlation: str = 'correlation_benchmark') -> pd.DataFrame:
"""Calculates the beta (measure of the volatility of an asset compared to the market, usually represented by a index benchmark) of the given assets.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed data.\n
asset_vol (str): name of the column in the dataframe with the assets volatilities.\n
bench_vol (str): name of the column in the dataframe with the benchmark volatility.\n
correlation (str): name of the column in the dataframe with the correlations between assets and their benchmarks.\n
<b>Returns:</b>\n
pd.DataFrame: The original pandas dataframe with an added column for the beta calculation.
"""
df2 = df.copy(deep = True)
df2['beta'] = (df2[asset_vol] / df2[bench_vol]) * df2[correlation]
return df2
def alpha(df: pd.DataFrame, asset_returns: str, bench_returns: str, riskfree_returns: str, beta: str) -> pd.DataFrame:
"""Calculates the alpha (measure of the excess of return of an asset compared to the market, usually represented by a index benchmark) of the given assets.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed data.\n
asset_returns (str): name of the column in the dataframe with the assets returns.\n
bench_returns (str): name of the column in the dataframe with the benchmark returns.\n
riskfree_returns (str): name of the column in the dataframe with the risk free rate returns.\n
beta (str): name of the column in the dataframe with the assets betas.\n
<b>Returns:</b>\n
pd.DataFrame: The original pandas dataframe with an added column for the alpha calculation.
"""
df2 = df.copy(deep = True)
df2['alpha'] = df2[asset_returns] - df2[riskfree_returns] - (df2[beta] * (df2[bench_returns] - df2[riskfree_returns]))
return df2
def sharpe(df: pd.DataFrame, asset_returns: str, riskfree_returns: str, asset_vol: str) -> pd.DataFrame:
"""Calculates the sharpe ratio (average return earned in excess of the risk-free rate per unit of volatility) of the given assets.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed data.\n
asset_returns (str): name of the column in the dataframe with the assets returns.\n
riskfree_returns (str): name of the column in the dataframe with the risk free rate returns.\n
asset_vol (str): name of the column in the dataframe with the assets volatilities.\n
<b>Returns:</b>\n
pd.DataFrame: The original pandas dataframe with an added column for the sharpe calculation.
"""
df2 = df.copy(deep = True)
df2['sharpe'] = (df2[asset_returns] - df2[riskfree_returns]) / df2[asset_vol]
return df2
def sortino(df: pd.DataFrame, asset_returns: str, riskfree_returns: str, asset_negative_vol: str) -> pd.DataFrame:
"""Calculates the sortino ratio (average return earned in excess of the risk-free rate per unit of negative volatility) of the given assets.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed data.\n
asset_returns (str): name of the column in the dataframe with the assets returns.\n
riskfree_returns (str): name of the column in the dataframe with the risk free rate returns.\n
asset_negative_vol (str): name of the column in the dataframe with the assets downside volatilities (volatility of only negative returns).\n
<b>Returns:</b>\n
pd.DataFrame: The original pandas dataframe with an added column for the sortino calculation.
"""
df2 = df.copy(deep = True)
df2['sortino'] = (df2[asset_returns] - df2[riskfree_returns]) / df2[asset_negative_vol]
return df2
def capture_ratio(df: pd.DataFrame, asset_returns: str, bench_returns: str, returns_frequency: int, group: str = 'CNPJ_FUNDO') -> pd.DataFrame:
"""Calculates the capture ratios (measure of assets performance relative to its benchmark in bull and bear markets) of the given assets.\n
<b>Parameters:</b>\n
df (pd.DataFrame): Pandas dataframe with the needed data.\n
asset_returns (str): name of the column in the dataframe with the assets returns.\n
bench_returns (str): name of the column in the dataframe with the benchmark returns.\n
returns_frequency: (int): Indicates the frequency in days of the given returns. Should be in tradable days (252 days a year, 21 a month, 5 a week for stocks).\n
group (str): name of the column in the dataframe used to group values. Example: 'stock_ticker' or 'fund_code'.\n
<b>Returns:</b>\n
pd.DataFrame: The original pandas dataframe with added columns for the capture ratios (bull, bear and ratio bull/bear).\n
"""
df_bull = df[(df[asset_returns].notnull()) & (df[bench_returns].notnull())].copy(deep = True)
df_bear = df[(df[asset_returns].notnull()) & (df[bench_returns].notnull())].copy(deep = True)
df_bull = df_bull[df_bull[bench_returns] > 0] #dataframe with only positive returns from the benchmark
df_bear = df_bear[df_bear[bench_returns] <= 0] #dataframe with only negative returns from the benchmark
tables = [df_bull, df_bear]
for i, _ in enumerate(tables): #performs set of operations in each table
#calculates total returns + 1
compound = tables[i].groupby(group)[[asset_returns,bench_returns]].apply(lambda x: np.prod(1+x))
#counts number of periods
nperiods = tables[i].groupby(group)[[asset_returns]].count().rename(columns = {asset_returns:'n_periods'})
tables[i] = compound.merge(nperiods, on = group, how ='left')#joins tables defined above
#calculates the annualized returns (CAGR)
tables[i][asset_returns]=(tables[i][asset_returns]**((252/returns_frequency)/tables[i]['n_periods']))-1
tables[i][bench_returns]=(tables[i][bench_returns]**((252/returns_frequency)/tables[i]['n_periods']))-1
tables[i]['capture'] = tables[i][asset_returns]/tables[i][bench_returns] #calculates the capture
df2 = tables[1].merge(tables[0], on = group, how ='left',suffixes=('_bear','_bull'))
df2['capture_ratio'] = df2['capture_bull']/df2['capture_bear']
return df2
``` |
{
"source": "joao-p-marques/drive-throught-p2p",
"score": 3
} |
#### File: joao-p-marques/drive-throught-p2p/Employee.py
```python
import time
import pickle
import socket
import logging
from utils import work
# import argparse
import threading
import queue
from Entity import Entity
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-15s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S')
class Employee(threading.Thread):
def __init__(self, own_id=3, address=('localhost', 5003), root_id=0, root_address=('localhost', 5000)):
threading.Thread.__init__(self)
self.own_id = own_id
self.address = address
self.root_id = root_id
self.root_address = root_address
self.node_comm = Entity(own_id, address, root_id, root_address, 'Employee')
self.node_comm.start()
self.logger = logging.getLogger("Employee {}".format(self.node_comm.own_id))
self.queueDone = []
self.queueWaiting = [] # clients waiting to pickup
def deliver(self, args):
if any(orderLista == args['orderTicket'] for orderLista in self.queueDone):
self.queueDone.remove(args['orderTicket'])
self.queueWaiting.remove(args['orderTicket'])
msg = { 'method' : 'DELIVER',
'args' : args }
self.node_comm.queueOut.put(msg)
return True
return False
def wait_in_line(self,args):
if not self.deliver(args):
self.queueWaiting.append(args['order']['orderTicket'])
def run(self):
if self.own_id == self.root_id:
# Await for DHT to get stable
time.sleep(3)
# Print the ring order for debug
self.node_comm.print_ring()
# Start building the table from the root node
self.node_comm.propagate_table()
# Await for DHT to get stable
time.sleep(3)
# Print the ring order for debug
self.node_comm.print_table()
done = False
while not done:
foodRequest = self.node_comm.queueIn.get()
if foodRequest is not None:
if len(self.queueWaiting) != 0:
self.deliver(self.queueWaiting[0])
#o cliente esta pronto a ir buscar
if foodRequest['method']=='PICKUP':
self.wait_in_line(foodRequest['args'])
#caso a comida esteja pronta
elif foodRequest['method']=='ORDER_DONE':
self.queueDone.append(foodRequest['args']['orderTicket'])
self.deliver(foodRequest['args'])
work()
else:
work()
``` |
{
"source": "joaopmatias/ata",
"score": 2
} |
#### File: joaopmatias/ata/update_student.py
```python
import sys
import boto3
from botocore import UNSIGNED
from botocore.client import Config
def update_student():
s3_student = boto3.resource('s3', config=Config(signature_version=UNSIGNED))
s3_student.Bucket('alexatutor').download_file('all.txt', 'all.txt')
if __name__ == '__main__':
update_student()
``` |
{
"source": "JoaoPMonteiro/PoseAug_for_OAKD",
"score": 2
} |
#### File: PoseAug_for_OAKD/function_poseaug/dataloader_update.py
```python
from __future__ import print_function, absolute_import, division
import time
from torch.utils.data import DataLoader
from common.camera import project_to_2d
from common.data_loader import PoseDataSet, PoseTarget
from models_poseaug.gan_generator import random_bl_aug
from progress.bar import Bar
from utils.utils import AverageMeter
def dataloader_update(args, data_dict, device):
"""
this function load the train loader and do swap bone length augment for train loader, target 3D loader,
and target2D from hm3.6, for more stable GAN training.
"""
batch_time = AverageMeter()
data_time = AverageMeter()
end = time.time()
#########################################
# empty this list and put them in
#########################################
buffer_poses_train = []
buffer_poses_train_2d = []
buffer_actions_train = []
buffer_cams_train = []
bar = Bar('Update training loader', max=len(data_dict['train_gt2d3d_loader']))
for i, (targets_3d, _, action, cam_param) in enumerate(data_dict['train_gt2d3d_loader']):
# Measure data loading time
data_time.update(time.time() - end)
num_poses = targets_3d.size(0)
targets_3d, cam_param = targets_3d.to(device), cam_param.to(device)
# do bone length random swap argumentation.
targets_3d = random_bl_aug(targets_3d)
# calculate the project 2D.
inputs_2d = project_to_2d(targets_3d, cam_param)
buffer_poses_train.append(targets_3d.detach().cpu().numpy())
buffer_poses_train_2d.append(inputs_2d.detach().cpu().numpy())
buffer_actions_train.append(action)
buffer_cams_train.append(cam_param.detach().cpu().numpy())
# Measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {ttl:} | ETA: {eta:} ' \
.format(batch=i + 1, size=len(data_dict['train_gt2d3d_loader']), data=data_time.avg, bt=batch_time.avg,
ttl=bar.elapsed_td, eta=bar.eta_td)
bar.next()
bar.finish()
assert len(buffer_poses_train) == len(buffer_poses_train_2d)
assert len(buffer_poses_train) == len(buffer_actions_train)
assert len(buffer_poses_train) == len(buffer_cams_train)
# update all the poseaug train dataloader for stable training.
print('==> Random Bone Length (S15678) swap completed')
data_dict['train_gt2d3d_loader'] = DataLoader(PoseDataSet(buffer_poses_train, buffer_poses_train_2d,
buffer_actions_train, buffer_cams_train),
batch_size=args.batch_size,
shuffle=True, num_workers=args.num_workers, pin_memory=True)
data_dict['target_3d_loader'] = DataLoader(PoseTarget(buffer_poses_train),
batch_size=args.batch_size,
shuffle=True, num_workers=args.num_workers, pin_memory=True)
data_dict['target_2d_loader'] = DataLoader(PoseTarget(buffer_poses_train_2d),
batch_size=args.batch_size,
shuffle=True, num_workers=args.num_workers, pin_memory=True)
return
```
#### File: PoseAug_for_OAKD/utils/utils.py
```python
from __future__ import absolute_import, division
import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
# self define tools
class Summary(object):
def __init__(self, directory):
self.directory = directory
self.epoch = 0
self.writer = None
self.phase = 0
self.train_iter_num = 0
self.train_realpose_iter_num = 0
self.train_fakepose_iter_num = 0
self.test_iter_num = 0
self.test_MPI3D_iter_num = 0
def create_summary(self):
self.writer = SummaryWriter(log_dir=os.path.join(self.directory))
return self.writer
def summary_train_iter_num_update(self):
self.train_iter_num = self.train_iter_num + 1
def summary_train_realpose_iter_num_update(self):
self.train_realpose_iter_num = self.train_realpose_iter_num + 1
def summary_train_fakepose_iter_num_update(self):
self.train_fakepose_iter_num = self.train_fakepose_iter_num + 1
def summary_test_iter_num_update(self):
self.test_iter_num = self.test_iter_num + 1
def summary_test_MPI3D_iter_num_update(self):
self.test_MPI3D_iter_num = self.test_MPI3D_iter_num + 1
def summary_epoch_update(self):
self.epoch = self.epoch + 1
def summary_phase_update(self):
self.phase = self.phase + 1
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def lr_decay(optimizer, step, lr, decay_step, gamma):
lr = lr * gamma ** (step / decay_step)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def set_grad(nets, requires_grad=False):
for net in nets:
for param in net.parameters():
param.requires_grad = requires_grad
def save_ckpt(state, ckpt_path, suffix=None):
if suffix is None:
suffix = 'epoch_{:04d}'.format(state['epoch'])
file_path = os.path.join(ckpt_path, 'ckpt_{}.pth.tar'.format(suffix))
torch.save(state, file_path)
def wrap(func, unsqueeze, *args):
"""
Wrap a torch function so it can be called with NumPy arrays.
Input and return types are seamlessly converted.
"""
# Convert input types where applicable
args = list(args)
for i, arg in enumerate(args):
if type(arg) == np.ndarray:
args[i] = torch.from_numpy(arg)
if unsqueeze:
args[i] = args[i].unsqueeze(0)
result = func(*args)
# Convert output types where applicable
if isinstance(result, tuple):
result = list(result)
for i, res in enumerate(result):
if type(res) == torch.Tensor:
if unsqueeze:
res = res.squeeze(0)
result[i] = res.numpy()
return tuple(result)
elif type(result) == torch.Tensor:
if unsqueeze:
result = result.squeeze(0)
return result.numpy()
else:
return result
from torch.optim import lr_scheduler
def get_scheduler(optimizer, policy, nepoch_fix=None, nepoch=None, decay_step=None):
if policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch - nepoch_fix) / float(nepoch - nepoch_fix + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif policy == 'step':
scheduler = lr_scheduler.StepLR(
optimizer, step_size=decay_step, gamma=0.1)
elif policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', policy)
return scheduler
``` |
{
"source": "JoaoP-Rodrigues/CursoPython3-Aulas",
"score": 4
} |
#### File: Python3CursoEmVideo/Aulas/aula21.py
```python
""" Exercício 103
def ficha(a='<desconhecido>', b=0):
print(f'O jogador {a} fez {b} gol(s) no campeonato!')
nome = str(input('Nome do jogador: '))
gols = str(input('Quantidade de gols: '))
if gols.isnumeric():
gols = int(gols)
else:
gols = 0
if nome.strip() == '':
ficha(b=gols)
else:
ficha(nome, gols)
"""
""" Exercício 104
"""
def leiaInt(ite):
while True:
try:
valor = int(input(ite))
except (ValueError, TypeError):
print('\033[0;31mERRO! Tivemos um problema com o tipo de dado que você digitou!\033[m')
continue
except KeyboardInterrupt:
print('\033[0;31mERRO! O usuário preferiu não informar os dados!\033[m')
return 0
else:
return valor
def leiaFloat(fro):
while True:
try:
valor = float(input(fro))
except (ValueError, TypeError):
print('\033[0;31mERRO! Tivemos um problema com o tipo de dado que você digitou!\033[m')
continue
except KeyboardInterrupt:
print('\033[0;31mERRO! O usuário preferiu não informar os dados!\033[m')
return 0
else:
return valor
# Programa principal
i = leiaInt('Digite um número inteiro: ')
#n = int(input(': '))
print(f'Você acabou de digitar o número inteiro: {i}!')
r = leiaFloat('Digite um número real: ')
print(f'Você acabou de digitar o número real: {r}')
''' Exercício 105
def notas(* n, sit=False):
"""
:param n: Lista de notas
:param sit: (opcional) Se True, informará a situação de acordo com a média.
:return: um dicionário com os dados.
"""
alunos = {}
alunos['quant'] = len(n)
alunos['maior'] = max(n)
alunos['menor'] = min(n)
alunos['media'] = sum(n)/len(n)
if sit:
if alunos['media'] >= 8:
alunos['sit'] = 'ÓTIMA'
elif 8 > alunos['media'] >= 6:
alunos['sit'] = 'BOA'
elif 6 > alunos['media'] > 3:
alunos['sit'] = 'RUIM'
else:
alunos['sit'] = 'PÉSSIMA'
return alunos
resp = notas(9,7.5, 8, 7.5, 8, sit=True)
print(resp)
'''
""" Exercício 106
def ajuda(msg):
print(help(msg))
while True:
com = str(input('\033[035mDigite o comando poara consultar ajuda: [FIM para sair]\033[m'))
if com in 'FIMfimFim':
break
else:
ajuda(com)
print('FIM!')
"""
```
#### File: Python3CursoEmVideo/Aulas/courseratest.py
```python
def get_pos(str_wrd):
positives = 0
for s in str_wrd:
if s in punctuation_chars:
str_wrd = str_wrd.replace(s, '')
lst_str = str_wrd.split(' ')
for i in lst_str:
i = i.lower()
if i in positive_words:
positives += 1
return positives
def get_neg(str_wrd):
regative_words = 0
for s in str_wrd:
if s in punctuation_chars:
str_wrd = str_wrd.replace(s, '')
lst_str = str_wrd.split(' ')
for i in lst_str:
i = i.lower()
if i in negative_words:
regative_words += 1
return regative_words
punctuation_chars = ["'", '"', ",", ".", "!", ":", ";", '#', '@']
# lists of words to use
positive_words = []
with open("positive_words.txt") as pos_f:
for lin in pos_f:
if lin[0] != ';' and lin[0] != '\n':
positive_words.append(lin.strip())
negative_words = []
with open("negative_words.txt") as pos_f:
for lin in pos_f:
if lin[0] != ';' and lin[0] != '\n':
negative_words.append(lin.strip())
file_ref_op = open("project_twitter_data.csv", "r")
file_lines = file_ref_op.readlines()
# tweet_text,retweet_count,reply_count (estrutura do csv de origem)
new_datas = open("resulting_data.csv", "w")
new_datas.write('Number of Retweets, Number of Replies, Positive Score, Negative Score, Net Score')
new_datas.write('\n')
c = 0
for l in file_lines:
l = l.replace('\n', '')
lst_line = l.split(',')
# lst_line = lst_line.replace('\n', '')
if c > 0:
pos = get_pos(lst_line[0])
neg = get_neg(lst_line[0])
net = pos - neg
row_string = '{},{},{},{},{}'.format(lst_line[1], lst_line[2], pos, neg, net)
new_datas.write(row_string)
new_datas.write('\n')
c += 1
```
#### File: Python3CursoEmVideo/Aulas/curso3.py
```python
'''
def lengths(strings):
lents = []
for i in strings:
lents.append(len(i))
return lents
def lengthsMap(strings):
"""lengths takes a list of strings as input and returns a list of numbers that are the lengths
of strings in the input list. Use map!"""
# fill in this function's definition to make the test pass.
lents = map(lambda x: len(x), strings)
return lents
def lengths(strings):
"""lengths takes a list of strings as input and returns a list of numbers that are the lengths
of strings in the input list. Use a list comprehension!"""
# fill in this function's definition to make the test pass.
yourlist = [len(s) for s in strings]
return yourlist
'''
#Write a function called positives_Fil that receives list of things as the input and returns a list of only the positive things, [3, 5, 7], using the filter function.
'''def positives_Fil(list_n):
#pos = filter(lambda x: x > 0, list_n)
pos = filter()
return pos
'''
def positives_Li_Com(lista):
yourlist = [v for v in lista if v >= 0]
#yourlist = filter(lambda num: num % 2 == 0, lista)
return yourlist
things = [3, 5, -4, 7, -2]
print(positives_Li_Com(things))
```
#### File: utilidadesCeV/dado/__init__.py
```python
def validacao(p):
ok = False
while not ok:
n = str(input(p)).replace(',', '.').strip()
if n.isalpha() or n == '':
print(f'ERRO! "{n}" é um preço inválido!')
else:
ok = True
return float(n)
```
#### File: utilidadesCeV/moeda/__init__.py
```python
def aumentar(n=0, taxa=0, f=False):
a = n + (n * taxa / 100)
return a if not f else moeda(a)
def diminuir(n=0, taxa=0, f=False):
a = n - (n * taxa / 100)
return a if not f else moeda(a)
def dobro(n=0, f=False):
a = n * 2
return a if not f else moeda(a)
def metade(n=0, f=False):
a = n * 0.5
return a if not f else moeda(a)
def moeda(n=0, moeda='R$'):
return f'{moeda}{n:.2f}'.replace('.', ',')
def resumo(p, aum, sub):
print('_' * 30)
print(f'{"RESUMO DO VALOR":^30}')
print('_' * 30)
print(f'{"Preço analisado: ":<20}{moeda(p):>8}')
print(f'{"Dobro do preço: ":<20}{dobro(p, True):>8}')
print(f'{"Metade do preço: ":<20}{metade(p, True):>8}')
print(f'{"10% de aumento: ":<20}{aumentar(p, aum, True):>8}')
print(f'{"15% de redução: ":<20}{diminuir(p, sub, True):>8}')
print('_' * 30)
``` |
{
"source": "JoaoP-Rodrigues/maistodosApi",
"score": 4
} |
#### File: JoaoP-Rodrigues/maistodosApi/db.py
```python
import sqlite3
# this line code bellow must be used to restart database
# os.remove("cashback.db") if os.path.exists("cashback.db") else None
# this function will create a new database if one is not found
def createDB():
con = sqlite3.connect("cashback.db")
cur = con.cursor()
sql_create_cashreqs = ('CREATE TABLE IF NOT EXISTS cashregs(\n'
' id varchar, \n'
' createdAt TEXT,\n'
' message TEXT,\n'
' document varchar(11),\n'
' cashback double(100))')
cur.execute(sql_create_cashreqs)
cur.close()
con.close()
# this function will save all data in the database
def insertCashBack(dict_cash_datas):
# make a connection with the database created above
con_cash = sqlite3.connect("cashback.db")
conect_cash = con_cash.cursor()
# get all elements from input dictionary and save each one in a variable
id_insert = dict_cash_datas["id"]
createdAt = dict_cash_datas["createdAt"]
message = dict_cash_datas["message"]
document = dict_cash_datas["document"]
cashback = dict_cash_datas["cashback"]
# try insert data in database
# if this fail, will return False to main page
try:
conect_cash.execute("INSERT INTO cashregs (id, createdAt, message, document, cashback) VALUES (?, ?, ?, ?, ?)",
(id_insert, createdAt, message, document, cashback))
con_cash.commit()
conect_cash.close()
con_cash.close()
return True
except:
return False
createDB()
``` |
{
"source": "JoaoP-Rodrigues/test_deep_esg",
"score": 3
} |
#### File: JoaoP-Rodrigues/test_deep_esg/chartLedger.py
```python
from re import search
from openpyxl import load_workbook, Workbook
#function fillChart that will fill de chart file with values from ledger
def fillChart(chart, ledger):
"""
fillChart Function
This function needs two parameters, both required and both must be a Excel ou similar file
------------------------------------------
First Parameter
---> chart
It must contain a single column with the numbers of the charts it will search
Second Parameter
---> ledger
It mus contain two columns:
A Column - Number of charts
B Column - Value in the chart
------------------------------------------
Operation
------------------------------------------
---> The Search
The function will take the input files and assign both in a variable for each one.
Next, the function will get a value from "sheetchart" variable and searches this value in the first column from "sheetLedger" variable.
If the values are equals, it get a value from respective row, but from second column and add in the "valueBColumn" variable.
This variable will be assign in the output file.
------------------------------------------
---> The combination of Values
The second part of the code will combine values from the same branch of the tree.
First, the code looks for cells where the values are equal to zero (if different, it skips to the next iteration).
Then store that value and string length in separate variables.
The value will be the search key for sub-values, and the length will be the limiter to not get values from different branches.
"""
#created a .XLSX file to fill with new datas
out_chart_of_accounts = Workbook()
out_plan1 = out_chart_of_accounts.active
out_plan1.title = 'Ex_Chart_Accounts'
#create the header from output file
out_plan1.cell(row=1, column=1, value='account')
out_plan1.cell(row=1, column=2, value='value')
sheetChart = chart.active #activated the sheet from origin file chart_of_accounts and assing to the sheet1 variable.
sheetLedger = ledger.active
maxrowChart = sheetChart.max_row #take the last row from sheet Chart
maxrowLedger = sheetLedger.max_row #take the last row from sheet Ledger
#first loop. Enter in the chart_of_accounts file to get a value to search
for i in range(2, maxrowChart+1):
valueBColumn = float(0.0)
searchValue = sheetChart.cell(row=i, column=1).value #value that will be searched in the "ledger" file
if searchValue == None: #Jump the remaining loop if get a empty cell (generally is in the end of file)
continue
#Second loop. Enter in the general_ledger file to search and sum values from var "searchValue"
for j in range(2, maxrowLedger+1):
valueCh = sheetLedger.cell(row=j, column=1).value #get chart name
valueLe = sheetLedger.cell(row=j, column=2).value #get chart value
try:
valueLeFl = round(float(valueLe), 2) #convert str to float
#if the values are equal, increment in the var valueBColumn
if valueCh == searchValue:
valueBColumn += valueLeFl
except:
#Probable error when converting to float
continue
try: #write values from columns A and B in the output file, with a ERROR test
out_plan1.cell(row=i, column=1, value=searchValue)
out_plan1.cell(row=i, column=2, value=valueBColumn)
except:
print('Error! Impossible save the file!')
#Second part! Combination of values
#-------------------------------------------------------------
max_rowOut = out_plan1.max_row #take the last row from sheet out_plan1
#first loop. It get a first value equal zero, and search subvalues to add.
for i in range(2, max_rowOut+1):
valueOutV = out_plan1.cell(row=i, column=2).value
if valueOutV != 0: #if the value from B column not be zero, it jump the loop
continue
else:
valueOutC = out_plan1.cell(row=i, column=1).value #value that will be used to get subvalues
newSum = 0.0
lenGetValue = len(valueOutC) #get a length from origin value. It will be a paramenter for limit of subvalues
#Second loop. This will search for subvalues
for j in range(2, max_rowOut+1):
tempC = out_plan1.cell(row=j, column=1).value
try:
tempV = round(float(out_plan1.cell(row=j, column=2).value), 2)
#if the subvalue equals search value, this will be add to var 'newSum'
if valueOutC == tempC[:lenGetValue]:
newSum += tempV
except:
#Probable error when converting to float
continue
#write the newSum value in the output file
out_plan1.cell(row=i, column=2, value=newSum)
#save the output file in the "output" diretory and close it
try:
out_chart_of_accounts.save('output/out_chart_of_accounts.xlsx')
out_chart_of_accounts.close()
except:
print('Error! Unable to save file. Check write permission for the folder!')
#RETURN
#None file will be returned. The new file will be saved in the "output" diretory
#load files from input diretory
try:
chart_of_accounts = load_workbook('input/chart_of_accounts.xlsx')
general_ledger = load_workbook('input/general_ledger.xlsx')
fillChart(chart_of_accounts, general_ledger)
except:
print('Error! Unable to load files!')
``` |
{
"source": "JoaoPROFECIA/Ola-Mundo",
"score": 2
} |
#### File: site-packages/jupyter_client/manager.py
```python
import asyncio
import functools
import os
import re
import signal
import sys
import typing as t
import uuid
from asyncio.futures import Future
from concurrent.futures import Future as CFuture
from contextlib import contextmanager
from enum import Enum
import zmq
from traitlets import Any
from traitlets import Bool
from traitlets import default
from traitlets import DottedObjectName
from traitlets import Float
from traitlets import Instance
from traitlets import observe
from traitlets import observe_compat
from traitlets import Type
from traitlets import Unicode
from traitlets.utils.importstring import import_item
from .connect import ConnectionFileMixin
from .managerabc import KernelManagerABC
from .provisioning import KernelProvisionerBase
from .provisioning import KernelProvisionerFactory as KPF
from .utils import ensure_async
from .utils import run_sync
from jupyter_client import KernelClient
from jupyter_client import kernelspec
class _ShutdownStatus(Enum):
"""
This is so far used only for testing in order to track the internal state of
the shutdown logic, and verifying which path is taken for which
missbehavior.
"""
Unset = None
ShutdownRequest = "ShutdownRequest"
SigtermRequest = "SigtermRequest"
SigkillRequest = "SigkillRequest"
F = t.TypeVar('F', bound=t.Callable[..., t.Any])
def in_pending_state(method: F) -> F:
"""Sets the kernel to a pending state by
creating a fresh Future for the KernelManager's `ready`
attribute. Once the method is finished, set the Future's results.
"""
@t.no_type_check
@functools.wraps(method)
async def wrapper(self, *args, **kwargs):
# Create a future for the decorated method
try:
self._ready = Future()
except RuntimeError:
# No event loop running, use concurrent future
self._ready = CFuture()
try:
# call wrapped method, await, and set the result or exception.
out = await method(self, *args, **kwargs)
# Add a small sleep to ensure tests can capture the state before done
await asyncio.sleep(0.01)
self._ready.set_result(None)
return out
except Exception as e:
self._ready.set_exception(e)
self.log.exception(self._ready.exception())
raise e
return t.cast(F, wrapper)
class KernelManager(ConnectionFileMixin):
"""Manages a single kernel in a subprocess on this host.
This version starts kernels with Popen.
"""
_ready: t.Union[Future, CFuture]
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
self._shutdown_status = _ShutdownStatus.Unset
# Create a place holder future.
try:
asyncio.get_running_loop()
self._ready = Future()
except RuntimeError:
# No event loop running, use concurrent future
self._ready = CFuture()
_created_context: Bool = Bool(False)
# The PyZMQ Context to use for communication with the kernel.
context: Instance = Instance(zmq.Context)
@default("context") # type:ignore[misc]
def _context_default(self) -> zmq.Context:
self._created_context = True
return zmq.Context()
# the class to create with our `client` method
client_class: DottedObjectName = DottedObjectName(
"jupyter_client.blocking.BlockingKernelClient"
)
client_factory: Type = Type(klass="jupyter_client.KernelClient")
@default("client_factory") # type:ignore[misc]
def _client_factory_default(self) -> Type:
return import_item(self.client_class)
@observe("client_class") # type:ignore[misc]
def _client_class_changed(self, change: t.Dict[str, DottedObjectName]) -> None:
self.client_factory = import_item(str(change["new"]))
kernel_id: str = Unicode(None, allow_none=True)
# The kernel provisioner with which this KernelManager is communicating.
# This will generally be a LocalProvisioner instance unless the kernelspec
# indicates otherwise.
provisioner: t.Optional[KernelProvisionerBase] = None
kernel_spec_manager: Instance = Instance(kernelspec.KernelSpecManager)
@default("kernel_spec_manager") # type:ignore[misc]
def _kernel_spec_manager_default(self) -> kernelspec.KernelSpecManager:
return kernelspec.KernelSpecManager(data_dir=self.data_dir)
@observe("kernel_spec_manager") # type:ignore[misc]
@observe_compat # type:ignore[misc]
def _kernel_spec_manager_changed(self, change: t.Dict[str, Instance]) -> None:
self._kernel_spec = None
shutdown_wait_time: Float = Float(
5.0,
config=True,
help="Time to wait for a kernel to terminate before killing it, "
"in seconds. When a shutdown request is initiated, the kernel "
"will be immediately sent an interrupt (SIGINT), followed"
"by a shutdown_request message, after 1/2 of `shutdown_wait_time`"
"it will be sent a terminate (SIGTERM) request, and finally at "
"the end of `shutdown_wait_time` will be killed (SIGKILL). terminate "
"and kill may be equivalent on windows. Note that this value can be"
"overridden by the in-use kernel provisioner since shutdown times may"
"vary by provisioned environment.",
)
kernel_name: Unicode = Unicode(kernelspec.NATIVE_KERNEL_NAME)
@observe("kernel_name") # type:ignore[misc]
def _kernel_name_changed(self, change: t.Dict[str, Unicode]) -> None:
self._kernel_spec = None
if change["new"] == "python":
self.kernel_name = kernelspec.NATIVE_KERNEL_NAME
_kernel_spec: t.Optional[kernelspec.KernelSpec] = None
@property
def kernel_spec(self) -> t.Optional[kernelspec.KernelSpec]:
if self._kernel_spec is None and self.kernel_name != "":
self._kernel_spec = self.kernel_spec_manager.get_kernel_spec(self.kernel_name)
return self._kernel_spec
cache_ports: Bool = Bool(
help="True if the MultiKernelManager should cache ports for this KernelManager instance"
)
@default("cache_ports") # type:ignore[misc]
def _default_cache_ports(self) -> bool:
return self.transport == "tcp"
@property
def ready(self) -> t.Union[CFuture, Future]:
"""A future that resolves when the kernel process has started for the first time"""
return self._ready
@property
def ipykernel(self) -> bool:
return self.kernel_name in {"python", "python2", "python3"}
# Protected traits
_launch_args: Any = Any()
_control_socket: Any = Any()
_restarter: Any = Any()
autorestart: Bool = Bool(
True, config=True, help="""Should we autorestart the kernel if it dies."""
)
shutting_down: bool = False
def __del__(self) -> None:
self._close_control_socket()
self.cleanup_connection_file()
# --------------------------------------------------------------------------
# Kernel restarter
# --------------------------------------------------------------------------
def start_restarter(self) -> None:
pass
def stop_restarter(self) -> None:
pass
def add_restart_callback(self, callback: t.Callable, event: str = "restart") -> None:
"""register a callback to be called when a kernel is restarted"""
if self._restarter is None:
return
self._restarter.add_callback(callback, event)
def remove_restart_callback(self, callback: t.Callable, event: str = "restart") -> None:
"""unregister a callback to be called when a kernel is restarted"""
if self._restarter is None:
return
self._restarter.remove_callback(callback, event)
# --------------------------------------------------------------------------
# create a Client connected to our Kernel
# --------------------------------------------------------------------------
def client(self, **kwargs: Any) -> KernelClient:
"""Create a client configured to connect to our kernel"""
kw = {}
kw.update(self.get_connection_info(session=True))
kw.update(
dict(
connection_file=self.connection_file,
parent=self,
)
)
# add kwargs last, for manual overrides
kw.update(kwargs)
return self.client_factory(**kw)
# --------------------------------------------------------------------------
# Kernel management
# --------------------------------------------------------------------------
def format_kernel_cmd(self, extra_arguments: t.Optional[t.List[str]] = None) -> t.List[str]:
"""replace templated args (e.g. {connection_file})"""
extra_arguments = extra_arguments or []
assert self.kernel_spec is not None
cmd = self.kernel_spec.argv + extra_arguments
if cmd and cmd[0] in {
"python",
"python%i" % sys.version_info[0],
"python%i.%i" % sys.version_info[:2],
}:
# executable is 'python' or 'python3', use sys.executable.
# These will typically be the same,
# but if the current process is in an env
# and has been launched by abspath without
# activating the env, python on PATH may not be sys.executable,
# but it should be.
cmd[0] = sys.executable
# Make sure to use the realpath for the connection_file
# On windows, when running with the store python, the connection_file path
# is not usable by non python kernels because the path is being rerouted when
# inside of a store app.
# See this bug here: https://bugs.python.org/issue41196
ns = dict(
connection_file=os.path.realpath(self.connection_file),
prefix=sys.prefix,
)
if self.kernel_spec:
ns["resource_dir"] = self.kernel_spec.resource_dir
ns.update(self._launch_args)
pat = re.compile(r"\{([A-Za-z0-9_]+)\}")
def from_ns(match):
"""Get the key out of ns if it's there, otherwise no change."""
return ns.get(match.group(1), match.group())
return [pat.sub(from_ns, arg) for arg in cmd]
async def _async_launch_kernel(self, kernel_cmd: t.List[str], **kw: Any) -> None:
"""actually launch the kernel
override in a subclass to launch kernel subprocesses differently
Note that provisioners can now be used to customize kernel environments
and
"""
assert self.provisioner is not None
connection_info = await self.provisioner.launch_kernel(kernel_cmd, **kw)
assert self.provisioner.has_process
# Provisioner provides the connection information. Load into kernel manager and write file.
self._force_connection_info(connection_info)
_launch_kernel = run_sync(_async_launch_kernel)
# Control socket used for polite kernel shutdown
def _connect_control_socket(self) -> None:
if self._control_socket is None:
self._control_socket = self._create_connected_socket("control")
self._control_socket.linger = 100
def _close_control_socket(self) -> None:
if self._control_socket is None:
return
self._control_socket.close()
self._control_socket = None
async def _async_pre_start_kernel(self, **kw: Any) -> t.Tuple[t.List[str], t.Dict[str, t.Any]]:
"""Prepares a kernel for startup in a separate process.
If random ports (port=0) are being used, this method must be called
before the channels are created.
Parameters
----------
`**kw` : optional
keyword arguments that are passed down to build the kernel_cmd
and launching the kernel (e.g. Popen kwargs).
"""
self.shutting_down = False
self.kernel_id = self.kernel_id or kw.pop('kernel_id', str(uuid.uuid4()))
# save kwargs for use in restart
self._launch_args = kw.copy()
if self.provisioner is None: # will not be None on restarts
self.provisioner = KPF.instance(parent=self.parent).create_provisioner_instance(
self.kernel_id,
self.kernel_spec,
parent=self,
)
kw = await self.provisioner.pre_launch(**kw)
kernel_cmd = kw.pop('cmd')
return kernel_cmd, kw
pre_start_kernel = run_sync(_async_pre_start_kernel)
async def _async_post_start_kernel(self, **kw: Any) -> None:
"""Performs any post startup tasks relative to the kernel.
Parameters
----------
`**kw` : optional
keyword arguments that were used in the kernel process's launch.
"""
self.start_restarter()
self._connect_control_socket()
assert self.provisioner is not None
await self.provisioner.post_launch(**kw)
post_start_kernel = run_sync(_async_post_start_kernel)
@in_pending_state
async def _async_start_kernel(self, **kw: Any) -> None:
"""Starts a kernel on this host in a separate process.
If random ports (port=0) are being used, this method must be called
before the channels are created.
Parameters
----------
`**kw` : optional
keyword arguments that are passed down to build the kernel_cmd
and launching the kernel (e.g. Popen kwargs).
"""
kernel_cmd, kw = await ensure_async(self.pre_start_kernel(**kw))
# launch the kernel subprocess
self.log.debug("Starting kernel: %s", kernel_cmd)
await ensure_async(self._launch_kernel(kernel_cmd, **kw))
await ensure_async(self.post_start_kernel(**kw))
start_kernel = run_sync(_async_start_kernel)
async def _async_request_shutdown(self, restart: bool = False) -> None:
"""Send a shutdown request via control channel"""
content = dict(restart=restart)
msg = self.session.msg("shutdown_request", content=content)
# ensure control socket is connected
self._connect_control_socket()
self.session.send(self._control_socket, msg)
assert self.provisioner is not None
await self.provisioner.shutdown_requested(restart=restart)
self._shutdown_status = _ShutdownStatus.ShutdownRequest
request_shutdown = run_sync(_async_request_shutdown)
async def _async_finish_shutdown(
self,
waittime: t.Optional[float] = None,
pollinterval: float = 0.1,
restart: t.Optional[bool] = False,
) -> None:
"""Wait for kernel shutdown, then kill process if it doesn't shutdown.
This does not send shutdown requests - use :meth:`request_shutdown`
first.
"""
if waittime is None:
waittime = max(self.shutdown_wait_time, 0)
if self.provisioner: # Allow provisioner to override
waittime = self.provisioner.get_shutdown_wait_time(recommended=waittime)
try:
await asyncio.wait_for(
self._async_wait(pollinterval=pollinterval), timeout=waittime / 2
)
except asyncio.TimeoutError:
self.log.debug("Kernel is taking too long to finish, terminating")
self._shutdown_status = _ShutdownStatus.SigtermRequest
await ensure_async(self._send_kernel_sigterm())
try:
await asyncio.wait_for(
self._async_wait(pollinterval=pollinterval), timeout=waittime / 2
)
except asyncio.TimeoutError:
self.log.debug("Kernel is taking too long to finish, killing")
self._shutdown_status = _ShutdownStatus.SigkillRequest
await ensure_async(self._kill_kernel(restart=restart))
else:
# Process is no longer alive, wait and clear
if self.has_kernel:
assert self.provisioner is not None
await self.provisioner.wait()
finish_shutdown = run_sync(_async_finish_shutdown)
async def _async_cleanup_resources(self, restart: bool = False) -> None:
"""Clean up resources when the kernel is shut down"""
if not restart:
self.cleanup_connection_file()
self.cleanup_ipc_files()
self._close_control_socket()
self.session.parent = None
if self._created_context and not restart:
self.context.destroy(linger=100)
if self.provisioner:
await self.provisioner.cleanup(restart=restart)
cleanup_resources = run_sync(_async_cleanup_resources)
@in_pending_state
async def _async_shutdown_kernel(self, now: bool = False, restart: bool = False) -> None:
"""Attempts to stop the kernel process cleanly.
This attempts to shutdown the kernels cleanly by:
1. Sending it a shutdown message over the control channel.
2. If that fails, the kernel is shutdown forcibly by sending it
a signal.
Parameters
----------
now : bool
Should the kernel be forcible killed *now*. This skips the
first, nice shutdown attempt.
restart: bool
Will this kernel be restarted after it is shutdown. When this
is True, connection files will not be cleaned up.
"""
self.shutting_down = True # Used by restarter to prevent race condition
# Stop monitoring for restarting while we shutdown.
self.stop_restarter()
if self.has_kernel:
await ensure_async(self.interrupt_kernel())
if now:
await ensure_async(self._kill_kernel())
else:
await ensure_async(self.request_shutdown(restart=restart))
# Don't send any additional kernel kill messages immediately, to give
# the kernel a chance to properly execute shutdown actions. Wait for at
# most 1s, checking every 0.1s.
await ensure_async(self.finish_shutdown(restart=restart))
await ensure_async(self.cleanup_resources(restart=restart))
shutdown_kernel = run_sync(_async_shutdown_kernel)
async def _async_restart_kernel(
self, now: bool = False, newports: bool = False, **kw: Any
) -> None:
"""Restarts a kernel with the arguments that were used to launch it.
Parameters
----------
now : bool, optional
If True, the kernel is forcefully restarted *immediately*, without
having a chance to do any cleanup action. Otherwise the kernel is
given 1s to clean up before a forceful restart is issued.
In all cases the kernel is restarted, the only difference is whether
it is given a chance to perform a clean shutdown or not.
newports : bool, optional
If the old kernel was launched with random ports, this flag decides
whether the same ports and connection file will be used again.
If False, the same ports and connection file are used. This is
the default. If True, new random port numbers are chosen and a
new connection file is written. It is still possible that the newly
chosen random port numbers happen to be the same as the old ones.
`**kw` : optional
Any options specified here will overwrite those used to launch the
kernel.
"""
if self._launch_args is None:
raise RuntimeError("Cannot restart the kernel. No previous call to 'start_kernel'.")
# Stop currently running kernel.
await ensure_async(self.shutdown_kernel(now=now, restart=True))
if newports:
self.cleanup_random_ports()
# Start new kernel.
self._launch_args.update(kw)
await ensure_async(self.start_kernel(**self._launch_args))
restart_kernel = run_sync(_async_restart_kernel)
@property
def has_kernel(self) -> bool:
"""Has a kernel process been started that we are actively managing."""
return self.provisioner is not None and self.provisioner.has_process
async def _async_send_kernel_sigterm(self, restart: bool = False) -> None:
"""similar to _kill_kernel, but with sigterm (not sigkill), but do not block"""
if self.has_kernel:
assert self.provisioner is not None
await self.provisioner.terminate(restart=restart)
_send_kernel_sigterm = run_sync(_async_send_kernel_sigterm)
async def _async_kill_kernel(self, restart: bool = False) -> None:
"""Kill the running kernel.
This is a private method, callers should use shutdown_kernel(now=True).
"""
if self.has_kernel:
assert self.provisioner is not None
await self.provisioner.kill(restart=restart)
# Wait until the kernel terminates.
try:
await asyncio.wait_for(self._async_wait(), timeout=5.0)
except asyncio.TimeoutError:
# Wait timed out, just log warning but continue - not much more we can do.
self.log.warning("Wait for final termination of kernel timed out - continuing...")
pass
else:
# Process is no longer alive, wait and clear
if self.has_kernel:
await self.provisioner.wait()
_kill_kernel = run_sync(_async_kill_kernel)
async def _async_interrupt_kernel(self) -> None:
"""Interrupts the kernel by sending it a signal.
Unlike ``signal_kernel``, this operation is well supported on all
platforms.
"""
if self.has_kernel:
assert self.kernel_spec is not None
interrupt_mode = self.kernel_spec.interrupt_mode
if interrupt_mode == "signal":
await ensure_async(self.signal_kernel(signal.SIGINT))
elif interrupt_mode == "message":
msg = self.session.msg("interrupt_request", content={})
self._connect_control_socket()
self.session.send(self._control_socket, msg)
else:
raise RuntimeError("Cannot interrupt kernel. No kernel is running!")
interrupt_kernel = run_sync(_async_interrupt_kernel)
async def _async_signal_kernel(self, signum: int) -> None:
"""Sends a signal to the process group of the kernel (this
usually includes the kernel and any subprocesses spawned by
the kernel).
Note that since only SIGTERM is supported on Windows, this function is
only useful on Unix systems.
"""
if self.has_kernel:
assert self.provisioner is not None
await self.provisioner.send_signal(signum)
else:
raise RuntimeError("Cannot signal kernel. No kernel is running!")
signal_kernel = run_sync(_async_signal_kernel)
async def _async_is_alive(self) -> bool:
"""Is the kernel process still running?"""
if self.has_kernel:
assert self.provisioner is not None
ret = await self.provisioner.poll()
if ret is None:
return True
return False
is_alive = run_sync(_async_is_alive)
async def _async_wait(self, pollinterval: float = 0.1) -> None:
# Use busy loop at 100ms intervals, polling until the process is
# not alive. If we find the process is no longer alive, complete
# its cleanup via the blocking wait(). Callers are responsible for
# issuing calls to wait() using a timeout (see _kill_kernel()).
while await ensure_async(self.is_alive()):
await asyncio.sleep(pollinterval)
class AsyncKernelManager(KernelManager):
# the class to create with our `client` method
client_class: DottedObjectName = DottedObjectName(
"jupyter_client.asynchronous.AsyncKernelClient"
)
client_factory: Type = Type(klass="jupyter_client.asynchronous.AsyncKernelClient")
_launch_kernel = KernelManager._async_launch_kernel
start_kernel = KernelManager._async_start_kernel
pre_start_kernel = KernelManager._async_pre_start_kernel
post_start_kernel = KernelManager._async_post_start_kernel
request_shutdown = KernelManager._async_request_shutdown
finish_shutdown = KernelManager._async_finish_shutdown
cleanup_resources = KernelManager._async_cleanup_resources
shutdown_kernel = KernelManager._async_shutdown_kernel
restart_kernel = KernelManager._async_restart_kernel
_send_kernel_sigterm = KernelManager._async_send_kernel_sigterm
_kill_kernel = KernelManager._async_kill_kernel
interrupt_kernel = KernelManager._async_interrupt_kernel
signal_kernel = KernelManager._async_signal_kernel
is_alive = KernelManager._async_is_alive
KernelManagerABC.register(KernelManager)
def start_new_kernel(
startup_timeout: float = 60, kernel_name: str = "python", **kwargs: Any
) -> t.Tuple[KernelManager, KernelClient]:
"""Start a new kernel, and return its Manager and Client"""
km = KernelManager(kernel_name=kernel_name)
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return km, kc
async def start_new_async_kernel(
startup_timeout: float = 60, kernel_name: str = "python", **kwargs: Any
) -> t.Tuple[AsyncKernelManager, KernelClient]:
"""Start a new kernel, and return its Manager and Client"""
km = AsyncKernelManager(kernel_name=kernel_name)
await km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
await kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
await km.shutdown_kernel()
raise
return (km, kc)
@contextmanager
def run_kernel(**kwargs: Any) -> t.Iterator[KernelClient]:
"""Context manager to create a kernel in a subprocess.
The kernel is shut down when the context exits.
Returns
-------
kernel_client: connected KernelClient instance
"""
km, kc = start_new_kernel(**kwargs)
try:
yield kc
finally:
kc.stop_channels()
km.shutdown_kernel(now=True)
```
#### File: jupyter_server/extension/application.py
```python
import logging
import re
import sys
from jinja2 import Environment, FileSystemLoader
from jupyter_core.application import JupyterApp, NoStart
from tornado.log import LogFormatter
from tornado.web import RedirectHandler
from traitlets import Any, Bool, Dict, HasTraits, List, Unicode, default
from traitlets.config import Config
from jupyter_server.serverapp import ServerApp
from jupyter_server.transutils import _i18n
from jupyter_server.utils import is_namespace_package, url_path_join
from .handler import ExtensionHandlerMixin
# -----------------------------------------------------------------------------
# Util functions and classes.
# -----------------------------------------------------------------------------
def _preparse_for_subcommand(Application, argv):
"""Preparse command line to look for subcommands."""
# Read in arguments from command line.
if len(argv) == 0:
return
# Find any subcommands.
if Application.subcommands and len(argv) > 0:
# we have subcommands, and one may have been specified
subc, subargv = argv[0], argv[1:]
if re.match(r"^\w(\-?\w)*$", subc) and subc in Application.subcommands:
# it's a subcommand, and *not* a flag or class parameter
app = Application()
app.initialize_subcommand(subc, subargv)
return app.subapp
def _preparse_for_stopping_flags(Application, argv):
"""Looks for 'help', 'version', and 'generate-config; commands
in command line. If found, raises the help and version of
current Application.
This is useful for traitlets applications that have to parse
the command line multiple times, but want to control when
when 'help' and 'version' is raised.
"""
# Arguments after a '--' argument are for the script IPython may be
# about to run, not IPython iteslf. For arguments parsed here (help and
# version), we want to only search the arguments up to the first
# occurrence of '--', which we're calling interpreted_argv.
try:
interpreted_argv = argv[: argv.index("--")]
except ValueError:
interpreted_argv = argv
# Catch any help calls.
if any(x in interpreted_argv for x in ("-h", "--help-all", "--help")):
app = Application()
app.print_help("--help-all" in interpreted_argv)
app.exit(0)
# Catch version commands
if "--version" in interpreted_argv or "-V" in interpreted_argv:
app = Application()
app.print_version()
app.exit(0)
# Catch generate-config commands.
if "--generate-config" in interpreted_argv:
app = Application()
app.write_default_config()
app.exit(0)
class ExtensionAppJinjaMixin(HasTraits):
"""Use Jinja templates for HTML templates on top of an ExtensionApp."""
jinja2_options = Dict(
help=_i18n(
"""Options to pass to the jinja2 environment for this
"""
)
).tag(config=True)
def _prepare_templates(self):
# Get templates defined in a subclass.
self.initialize_templates()
# Add templates to web app settings if extension has templates.
if len(self.template_paths) > 0:
self.settings.update({f"{self.name}_template_paths": self.template_paths})
# Create a jinja environment for logging html templates.
self.jinja2_env = Environment(
loader=FileSystemLoader(self.template_paths),
extensions=["jinja2.ext.i18n"],
autoescape=True,
**self.jinja2_options,
)
# Add the jinja2 environment for this extension to the tornado settings.
self.settings.update({f"{self.name}_jinja2_env": self.jinja2_env})
# -----------------------------------------------------------------------------
# ExtensionApp
# -----------------------------------------------------------------------------
class JupyterServerExtensionException(Exception):
"""Exception class for raising for Server extensions errors."""
# -----------------------------------------------------------------------------
# ExtensionApp
# -----------------------------------------------------------------------------
class ExtensionApp(JupyterApp):
"""Base class for configurable Jupyter Server Extension Applications.
ExtensionApp subclasses can be initialized two ways:
1. Extension is listed as a jpserver_extension, and ServerApp calls
its load_jupyter_server_extension classmethod. This is the
classic way of loading a server extension.
2. Extension is launched directly by calling its `launch_instance`
class method. This method can be set as a entry_point in
the extensions setup.py
"""
# Subclasses should override this trait. Tells the server if
# this extension allows other other extensions to be loaded
# side-by-side when launched directly.
load_other_extensions = True
# A useful class property that subclasses can override to
# configure the underlying Jupyter Server when this extension
# is launched directly (using its `launch_instance` method).
serverapp_config = {}
# Some subclasses will likely override this trait to flip
# the default value to False if they don't offer a browser
# based frontend.
open_browser = Bool(
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(ServerApp.browser) configuration option.
"""
).tag(config=True)
@default("open_browser")
def _default_open_browser(self):
return self.serverapp.config["ServerApp"].get("open_browser", True)
@property
def config_file_paths(self):
"""Look on the same path as our parent for config files"""
# rely on parent serverapp, which should control all config loading
return self.serverapp.config_file_paths
# The extension name used to name the jupyter config
# file, jupyter_{name}_config.
# This should also match the jupyter subcommand used to launch
# this extension from the CLI, e.g. `jupyter {name}`.
name = None
@classmethod
def get_extension_package(cls):
parts = cls.__module__.split(".")
if is_namespace_package(parts[0]):
# in this case the package name is `<namespace>.<package>`.
return ".".join(parts[0:2])
return parts[0]
@classmethod
def get_extension_point(cls):
return cls.__module__
# Extension URL sets the default landing page for this extension.
extension_url = "/"
default_url = Unicode().tag(config=True)
@default("default_url")
def _default_url(self):
return self.extension_url
file_url_prefix = Unicode("notebooks")
# Is this linked to a serverapp yet?
_linked = Bool(False)
# Extension can configure the ServerApp from the command-line
classes = [
ServerApp,
]
# A ServerApp is not defined yet, but will be initialized below.
serverapp = Any()
@default("serverapp")
def _default_serverapp(self):
# load the current global instance, if any
if ServerApp.initialized():
try:
return ServerApp.instance()
except Exception:
# error retrieving instance, e.g. MultipleInstanceError
pass
# serverapp accessed before it was defined,
# declare an empty one
return ServerApp()
_log_formatter_cls = LogFormatter
@default("log_level")
def _default_log_level(self):
return logging.INFO
@default("log_format")
def _default_log_format(self):
"""override default log format to include date & time"""
return (
"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s"
)
static_url_prefix = Unicode(
help="""Url where the static assets for the extension are served."""
).tag(config=True)
@default("static_url_prefix")
def _default_static_url_prefix(self):
static_url = f"static/{self.name}/"
return url_path_join(self.serverapp.base_url, static_url)
static_paths = List(
Unicode(),
help="""paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython
""",
).tag(config=True)
template_paths = List(
Unicode(),
help=_i18n(
"""Paths to search for serving jinja templates.
Can be used to override templates from notebook.templates."""
),
).tag(config=True)
settings = Dict(help=_i18n("""Settings that will passed to the server.""")).tag(config=True)
handlers = List(help=_i18n("""Handlers appended to the server.""")).tag(config=True)
def _config_file_name_default(self):
"""The default config file name."""
if not self.name:
return ""
return "jupyter_{}_config".format(self.name.replace("-", "_"))
def initialize_settings(self):
"""Override this method to add handling of settings."""
pass
def initialize_handlers(self):
"""Override this method to append handlers to a Jupyter Server."""
pass
def initialize_templates(self):
"""Override this method to add handling of template files."""
pass
def _prepare_config(self):
"""Builds a Config object from the extension's traits and passes
the object to the webapp's settings as `<name>_config`.
"""
traits = self.class_own_traits().keys()
self.extension_config = Config({t: getattr(self, t) for t in traits})
self.settings[f"{self.name}_config"] = self.extension_config
def _prepare_settings(self):
# Make webapp settings accessible to initialize_settings method
webapp = self.serverapp.web_app
self.settings.update(**webapp.settings)
# Add static and template paths to settings.
self.settings.update(
{
f"{self.name}_static_paths": self.static_paths,
f"{self.name}": self,
}
)
# Get setting defined by subclass using initialize_settings method.
self.initialize_settings()
# Update server settings with extension settings.
webapp.settings.update(**self.settings)
def _prepare_handlers(self):
webapp = self.serverapp.web_app
# Get handlers defined by extension subclass.
self.initialize_handlers()
# prepend base_url onto the patterns that we match
new_handlers = []
for handler_items in self.handlers:
# Build url pattern including base_url
pattern = url_path_join(webapp.settings["base_url"], handler_items[0])
handler = handler_items[1]
# Get handler kwargs, if given
kwargs = {}
if issubclass(handler, ExtensionHandlerMixin):
kwargs["name"] = self.name
try:
kwargs.update(handler_items[2])
except IndexError:
pass
new_handler = (pattern, handler, kwargs)
new_handlers.append(new_handler)
# Add static endpoint for this extension, if static paths are given.
if len(self.static_paths) > 0:
# Append the extension's static directory to server handlers.
static_url = url_path_join(self.static_url_prefix, "(.*)")
# Construct handler.
handler = (
static_url,
webapp.settings["static_handler_class"],
{"path": self.static_paths},
)
new_handlers.append(handler)
webapp.add_handlers(".*$", new_handlers)
def _prepare_templates(self):
# Add templates to web app settings if extension has templates.
if len(self.template_paths) > 0:
self.settings.update({f"{self.name}_template_paths": self.template_paths})
self.initialize_templates()
def _jupyter_server_config(self):
base_config = {
"ServerApp": {
"default_url": self.default_url,
"open_browser": self.open_browser,
"file_url_prefix": self.file_url_prefix,
}
}
base_config["ServerApp"].update(self.serverapp_config)
return base_config
def _link_jupyter_server_extension(self, serverapp):
"""Link the ExtensionApp to an initialized ServerApp.
The ServerApp is stored as an attribute and config
is exchanged between ServerApp and `self` in case
the command line contains traits for the ExtensionApp
or the ExtensionApp's config files have server
settings.
Note, the ServerApp has not initialized the Tornado
Web Application yet, so do not try to affect the
`web_app` attribute.
"""
self.serverapp = serverapp
# Load config from an ExtensionApp's config files.
self.load_config_file()
# ServerApp's config might have picked up
# config for the ExtensionApp. We call
# update_config to update ExtensionApp's
# traits with these values found in ServerApp's
# config.
# ServerApp config ---> ExtensionApp traits
self.update_config(self.serverapp.config)
# Use ExtensionApp's CLI parser to find any extra
# args that passed through ServerApp and
# now belong to ExtensionApp.
self.parse_command_line(self.serverapp.extra_args)
# If any config should be passed upstream to the
# ServerApp, do it here.
# i.e. ServerApp traits <--- ExtensionApp config
self.serverapp.update_config(self.config)
# Acknowledge that this extension has been linked.
self._linked = True
def initialize(self):
"""Initialize the extension app. The
corresponding server app and webapp should already
be initialized by this step.
1) Appends Handlers to the ServerApp,
2) Passes config and settings from ExtensionApp
to the Tornado web application
3) Points Tornado Webapp to templates and
static assets.
"""
if not self.serverapp:
msg = (
"This extension has no attribute `serverapp`. "
"Try calling `.link_to_serverapp()` before calling "
"`.initialize()`."
)
raise JupyterServerExtensionException(msg)
self._prepare_config()
self._prepare_templates()
self._prepare_settings()
self._prepare_handlers()
def start(self):
"""Start the underlying Jupyter server.
Server should be started after extension is initialized.
"""
super().start()
# Start the server.
self.serverapp.start()
async def stop_extension(self):
"""Cleanup any resources managed by this extension."""
def stop(self):
"""Stop the underlying Jupyter server."""
self.serverapp.stop()
self.serverapp.clear_instance()
@classmethod
def _load_jupyter_server_extension(cls, serverapp):
"""Initialize and configure this extension, then add the extension's
settings and handlers to the server's web application.
"""
extension_manager = serverapp.extension_manager
try:
# Get loaded extension from serverapp.
point = extension_manager.extension_points[cls.name]
extension = point.app
except KeyError:
extension = cls()
extension._link_jupyter_server_extension(serverapp)
extension.initialize()
return extension
@classmethod
def load_classic_server_extension(cls, serverapp):
"""Enables extension to be loaded as classic Notebook (jupyter/notebook) extension."""
extension = cls()
extension.serverapp = serverapp
extension.load_config_file()
extension.update_config(serverapp.config)
extension.parse_command_line(serverapp.extra_args)
# Add redirects to get favicons from old locations in the classic notebook server
extension.handlers.extend(
[
(
r"/static/favicons/favicon.ico",
RedirectHandler,
{"url": url_path_join(serverapp.base_url, "static/base/images/favicon.ico")},
),
(
r"/static/favicons/favicon-busy-1.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url, "static/base/images/favicon-busy-1.ico"
)
},
),
(
r"/static/favicons/favicon-busy-2.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url, "static/base/images/favicon-busy-2.ico"
)
},
),
(
r"/static/favicons/favicon-busy-3.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url, "static/base/images/favicon-busy-3.ico"
)
},
),
(
r"/static/favicons/favicon-file.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url, "static/base/images/favicon-file.ico"
)
},
),
(
r"/static/favicons/favicon-notebook.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url,
"static/base/images/favicon-notebook.ico",
)
},
),
(
r"/static/favicons/favicon-terminal.ico",
RedirectHandler,
{
"url": url_path_join(
serverapp.base_url,
"static/base/images/favicon-terminal.ico",
)
},
),
(
r"/static/logo/logo.png",
RedirectHandler,
{"url": url_path_join(serverapp.base_url, "static/base/images/logo.png")},
),
]
)
extension.initialize()
@classmethod
def initialize_server(cls, argv=None, load_other_extensions=True, **kwargs):
"""Creates an instance of ServerApp and explicitly sets
this extension to enabled=True (i.e. superceding disabling
found in other config from files).
The `launch_instance` method uses this method to initialize
and start a server.
"""
jpserver_extensions = {cls.get_extension_package(): True}
find_extensions = cls.load_other_extensions
if "jpserver_extensions" in cls.serverapp_config:
jpserver_extensions.update(cls.serverapp_config["jpserver_extensions"])
cls.serverapp_config["jpserver_extensions"] = jpserver_extensions
find_extensions = False
serverapp = ServerApp.instance(jpserver_extensions=jpserver_extensions, **kwargs)
serverapp.aliases.update(cls.aliases)
serverapp.initialize(
argv=argv or [],
starter_extension=cls.name,
find_extensions=find_extensions,
)
return serverapp
@classmethod
def launch_instance(cls, argv=None, **kwargs):
"""Launch the extension like an application. Initializes+configs a stock server
and appends the extension to the server. Then starts the server and routes to
extension's landing page.
"""
# Handle arguments.
if argv is None:
args = sys.argv[1:] # slice out extension config.
else:
args = argv
# Handle all "stops" that could happen before
# continuing to launch a server+extension.
subapp = _preparse_for_subcommand(cls, args)
if subapp:
subapp.start()
return
# Check for help, version, and generate-config arguments
# before initializing server to make sure these
# arguments trigger actions from the extension not the server.
_preparse_for_stopping_flags(cls, args)
serverapp = cls.initialize_server(argv=args)
# Log if extension is blocking other extensions from loading.
if not cls.load_other_extensions:
serverapp.log.info(
"{ext_name} is running without loading "
"other extensions.".format(ext_name=cls.name)
)
# Start the server.
try:
serverapp.start()
except NoStart:
pass
``` |
{
"source": "joaopver10/Python-EstruturaDeDados_Primeiro_Curso",
"score": 4
} |
#### File: Python-EstruturaDeDados_Primeiro_Curso/Lista Encadeada Simples/ListaEncadeadaSimples.py
```python
class No: # Classe para criar cada um dos nós
def __init__(self, valor):
self.valor = valor
self.prox = None
def mostra_no(self):
print(self.valor)
class ListaEncadeada: # Armazena as estruturas de todos os nós
def __init__(self, nodecount=0):
# independente da quantidade continua executando a mesma quantidade de passos
# a complexidade é O(1)
self.primeiro = None
self.nodecount = nodecount
def insere_inicio(self, valor): # O(1)
novo = No(valor)
novo.prox = self.primeiro
self.primeiro = novo
self.nodecount = self.nodecount + 1
def bubbsort(self): # a complexidade no pior caso é O(n²) pois temos duas estruturas de repetição
for i in range(self.nodecount - 1):
atual = self.primeiro
proximo = atual.prox
anterior = None
while proximo:
if atual.valor > proximo.valor:
if anterior == None:
anterior = atual.prox
proximo = proximo.prox
anterior.prox = atual
atual.prox = proximo
self.primeiro = anterior
else:
temp = proximo
proximo = proximo.prox
anterior.prox = atual.prox
anterior = temp
temp.prox = atual
atual.prox = proximo
else:
anterior = atual
atual = proximo
proximo = proximo.prox
i = i + i
def mostrar(self): # O(n)
if self.primeiro == None:
print('A lista está vazia')
return None
atual = self.primeiro
while atual != None:
atual.mostra_no()
atual = atual.prox
def pesquisa(self, valor): # O(n)
if self.primeiro == None:
print('A lista está vazia')
return None
atual = self.primeiro
while atual.valor != valor:
if atual.prox == None:
return None
else:
atual = atual.prox
return atual
def excluir_inicio(self): # O(1)
if self.primeiro == None:
print('A lista está vazia')
return None
temp = self.primeiro
self.primeiro = self.primeiro.prox
return temp
def excluir_posicao(self, valor): # O(n) A complexidade aumenta dependendo da entrada de dados
if self.primeiro == None:
print('A lista está vazia')
return None
atual = self.primeiro
anterior = self.primeiro
while atual.valor != valor:
if atual.prox == None:
return None
else:
anterior = atual
atual = atual.prox
if atual == self.primeiro:
self.primeiro = self.primeiro.prox
else:
anterior.proximo = atual.prox
return atual
lista = ListaEncadeada()
lista.insere_inicio(20)
lista.insere_inicio(10)
lista.insere_inicio(30)
lista.insere_inicio(40)
lista.insere_inicio(5)
print(lista.mostrar())
print('----------')
print('Depois de Ordernar a lista')
print('----------')
lista.bubbsort()
print(lista.mostrar())
```
#### File: Python-EstruturaDeDados_Primeiro_Curso/Pesquisa Binaria/PesquisaBinaria.py
```python
import numpy as np
class VetorOrd:
def __init__(self, capacidade):
self.capacidade = capacidade
self.ultimaP = -1
self.valores = np.empty(self.capacidade, dtype=int)
def mostra(self):
if self.ultimaP == -1:
print('O vetor está vazio')
else:
for i in range(self.ultimaP + 1):
print(i, '-', self.valores[i])
def insere(self, valor):
if self.ultimaP == self.capacidade - 1:
print('Capacidade maxima atingida')
return
posicao = 0
for i in range(self.ultimaP + 1):
posicao = i
if self.valores[i] > valor:
break
if i == self.ultimaP:
posicao = i + 1
x = self.ultimaP
while x >= posicao:
self.valores[x + 1] = self.valores[x]
x -= 1
self.valores[posicao] = valor
self.ultimaP += 1
def pesq_binaria(self, valor):
limite_inferior = 0
limite_superior = self.ultimaP
while True:
posicao_atual = int((limite_inferior + limite_superior) / 2)
if self.valores[posicao_atual] == valor:
return posicao_atual
elif limite_inferior > limite_superior:
return -1
else:
if self.valores[posicao_atual] < valor:
limite_inferior = posicao_atual + 1
else:
limite_superior = posicao_atual - 1
def excluir(self, valor):
posicao = self.pesq_binaria(valor)
if posicao == -1:
return -1
else:
for i in range(posicao, self.ultimaP):
self.valores[i] = self.valores[i + 1]
self.ultimaP -= 1
vetor = VetorOrd(10)
vetor.insere(6)
vetor.insere(4)
vetor.insere(1)
vetor.insere(3)
vetor.insere(2)
vetor.insere(8)
vetor.insere(9)
vetor.mostra()
print('----------------------')
print('----------------------')
print('----------------------')
print(vetor.pesq_binaria(4))
```
#### File: Python-EstruturaDeDados_Primeiro_Curso/Pilhas/Pilhas.py
```python
import numpy as np
class Pilha:
def __init__(self, capacidade):
self.__capacidade = capacidade
self.__topo = -1
self.__valores = np.empty(self.__capacidade, dtype=int)
def __pilha_cheia(self):
if self.__topo == self.__capacidade - 1:
return True
else:
return False
def __pilha_vazia(self):
if self.__topo == -1:
return True
else:
return False
def empilhar(self, valor):
if self.__pilha_cheia():
print('A pilha esta cheia')
else:
self.__topo += 1
self.__valores[self.__topo] = valor
def desempilhar(self):
if self.__pilha_vazia():
print('A pilha esta vazia')
else:
self.__topo -= 1
def ver_topo(self):
if self.__topo != -1:
return self.__valores[self.__topo]
else:
return -1
pilha = Pilha(5)
pilha.empilhar(2)
pilha.empilhar(3)
pilha.empilhar(4)
pilha.empilhar(5)
pilha.empilhar(8)
pilha.desempilhar()
pilha.desempilhar()
print(pilha.ver_topo())
pilha.empilhar(5)
print(pilha.ver_topo())
``` |
{
"source": "joaoqalves/web2py-oauth2",
"score": 2
} |
#### File: oauth/exceptions/__init__.py
```python
# def __init__(self, http_response, token_type,
# realm, error, msg, the_scope=None):
# self.http_response = http_response
# self.token_type = token_type
# self.realm = realm
# self.error = error
# self.msg = msg
# self.the_scope = the_scope or 'No the_scope provided'
# def __str__(self):
# return "".join([self.http_response, ' | ', self.token_type, ' | ',
# self.realm, ' | ', self.error, ' | ', self.msg,
# ' | ', self.the_scope])
# # Previous [concat.] method was too slow, see my benchmarks: http://stackoverflow.com/a/14610440/587021
# class OAuth2RedirectException(Exception):
# """Redirect the end-user's user agent with error message. It takes 3
# arguments:
# * An URI where the user should be redirect, after the authentication
# * The error code
# * A human readable error message, with additional information
# * [OPTIONAL] Required if the "state" parameter was present in the client
# authorization request
# """
# def __init__(self, redirect_uri, error, msg, state=None):
# self.redirect_uri = redirect_uri
# self.error = error
# self.msg = msg
# self.state = state or 'No state provided'
# def __str__(self):
# return "".join([self.redirect_uri, ' | ', self.error, ' | ',
# self.msg, ' | ', self.state])
# # Previous [concat.] method was too slow, see my benchmarks: http://stackoverflow.com/a/14610440/587021
# class OAuth2ServerException(Exception):
# """Server exception. Something is missing and the request could not be
# performed. It takes 3 arguments:
# * The HTTP status code message as predefined
# * The error code
# * A human readable error message, with additional information about
# """
# def __init__(self, http_response, error, msg):
# self.http_response = http_response
# self.error = error
# self.msg = msg
# def __str__(self):
# return "".join([self.http_response, ' | ', self.error, ' | ', self.msg])
``` |
{
"source": "joaoquintas/auction_methods_stack",
"score": 3
} |
#### File: k-saap_pkg/graphs/dijkstra.py
```python
from priodict import priorityDictionary
def Dijkstra(G,start,end=None):
"""
Find shortest paths from the start vertex to all
vertices nearer than or equal to the end.
The input graph G is assumed to have the following
representation: A vertex can be any object that can
be used as an index into a dictionary. G is a
dictionary, indexed by vertices. For any vertex v,
G[v] is itself a dictionary, indexed by the neighbors
of v. For any edge v->w, G[v][w] is the length of
the edge. This is related to the representation in
<http://www.python.org/doc/essays/graphs.html>
where Guido van Rossum suggests representing graphs
as dictionaries mapping vertices to lists of neighbors,
however dictionaries of edges have many advantages
over lists: they can store extra information (here,
the lengths), they support fast existence tests,
and they allow easy modification of the graph by edge
insertion and removal. Such modifications are not
needed here but are important in other graph algorithms.
Since dictionaries obey iterator protocol, a graph
represented as described here could be handed without
modification to an algorithm using Guido's representation.
Of course, G and G[v] need not be Python dict objects;
they can be any other object that obeys dict protocol,
for instance a wrapper in which vertices are URLs
and a call to G[v] loads the web page and finds its links.
The output is a pair (D,P) where D[v] is the distance
from start to v and P[v] is the predecessor of v along
the shortest path from s to v.
Dijkstra's algorithm is only guaranteed to work correctly
when all edge lengths are positive. This code does not
verify this property for all edges (only the edges seen
before the end vertex is reached), but will correctly
compute shortest paths even for some graphs with negative
edges, and will raise an exception if it discovers that
a negative edge has caused it to make a mistake.
"""
D = {} # dictionary of final distances
P = {} # dictionary of predecessors
Q = priorityDictionary() # est.dist. of non-final vert.
Q[start] = 0
for v in Q:
D[v] = Q[v]
if v == end: break
for w in G[v]:
vwLength = D[v] + G[v][w]
if w in D:
if vwLength < D[w]:
raise ValueError, \
"Dijkstra: found better path to already-final vertex"
elif w not in Q or vwLength < Q[w]:
Q[w] = vwLength
P[w] = v
return (D,P)
def shortestPath(G,start,end):
"""
Find a single shortest path from the given start vertex
to the given end vertex.
The input has the same conventions as Dijkstra().
The output is a list of the vertices in order along
the shortest path.
"""
D,P = Dijkstra(G,start,end)
Path = []
while 1:
Path.append(end)
if end == start: break
end = P[end]
Path.reverse()
return Path
## end of http://code.activestate.com/recipes/119466/ }}}
```
#### File: k-saap_pkg/src/auctioneer.py
```python
import roslib; roslib.load_manifest('k-saap_pkg')
# import client library
import rospy
# import messages
import auction_msgs.msg
# import services
import auction_srvs.srv
# import services functions
import auction_common
# import auxiliar libraries
import random
import math
# "global" variables (to be referred as global under def fun(something))
winner_id = ''
winner_cost = 999999
#####################################################################################
## Auction Service (Server Callback)
#####################################################################################
def handle_auction_server_callback(auction_req):
# define global variables
global winner_id
global winner_cost
# update number of messages in parameter server
if rospy.has_param('/num_messages'):
num_messages = rospy.get_param('/num_messages')
num_messages += 2
rospy.set_param('/num_messages', num_messages)
# default bid
bid = auction_msgs.msg.Bid()
# obtain auctioneer_position
auctioneer_position = {'auctioneer_position': rospy.get_param('~position')}
# Obtain nodes list to relay information with k=1
neighbour_nodes_relay_list = auction_common.create_neighbour_nodes_list(auction_req)
print neighbour_nodes_relay_list
# Prepare auction information
if auction_req.auction_data.command == 'close_auction':
auction_req.role = 'none'
else:
auction_req.role = "be_buyer"
auction_req.sending_node = rospy.get_name()
# updated nodes_collected
if rospy.has_param('/nodes_collected'):
auction_req.nodes_collected = rospy.get_param('/nodes_collected')+','+rospy.get_name()
rospy.set_param('/nodes_collected',auction_req.nodes_collected)
else:
auction_req.nodes_collected = rospy.get_param('~neighbour_nodes_list')
# Call the Auction Service from each neighbour node
for node in neighbour_nodes_relay_list:
# compose service name (to be changed)
service_path = node+'/auction_server'
# wait for the service in the neighbour node to be available
rospy.wait_for_service(service_path)
neighbour_node_auction_server = rospy.ServiceProxy(service_path,
auction_srvs.srv.AuctionService,headers=auctioneer_position)
try:
bid_response = neighbour_node_auction_server(auction_req)
bid = bid_response.bid_data
# Evaluate bids, Min(cost_distance)
if winner_cost >= bid.cost_distance:
if bid.buyer_id != '':
winner_cost = bid.cost_distance
winner_id = bid.buyer_id
# log info for momentary winner
# rospy.loginfo("(winning at the moment) %s with offer %d",winner_id, winner_cost)
except rospy.ServiceException, e:
rospy.loginfo("Service call failed: %s",e)
# verbose for auction status (received all the bids)
rospy.loginfo("winner was: %s with offer %d",winner_id, winner_cost)
# return response
# return auction_srvs.srv.AuctionServiceResponse(bid_response)
return {'response_info': 'valid', 'bid_data': bid}
## End Auction Server (Server Callback)
```
#### File: k-saap_pkg/src/buyer_k_saap.py
```python
import roslib; roslib.load_manifest('k-saap_pkg')
# import client library
import rospy
# import messages
import auction_msgs.msg
# import services
import auction_srvs.srv
import auction_common
# import auxiliar libraries
import random
import math
# "global" variables (to be referred as global under def fun(something))
winner_id = 'none'
winner_cost = 0
#####################################################################################
## Buyer Service Callback
#####################################################################################
def handle_auction_server_callback(auction_req):
# update number of messages in parameter server
if rospy.has_param('/num_messages'):
num_messages = rospy.get_param('/num_messages')
num_messages += 2
rospy.set_param('/num_messages', num_messages)
# Graph parameters
graph_parameters = eval(rospy.get_param("graph_info"))
N = int(graph_parameters[0])
l = int(graph_parameters[1])
d = int(graph_parameters[2])
r = math.sqrt((d*l*l)/(math.pi*(N-1)))
print N, l, d, r
# Calculate k (number of hop)
node_position = eval(rospy.get_param('~position'))
auctioneer_position = eval(auction_req._connection_header['auctioneer_position'])
x = float(node_position[0])-float(auctioneer_position[0])
y = float(node_position[1])-float(auctioneer_position[1])
z = float(node_position[2])-float(auctioneer_position[2])
distance_from_node_to_auctioneer = math.sqrt(x*x+y*y+z*z)
k = int(distance_from_node_to_auctioneer/r)
print distance_from_node_to_auctioneer, k
# Create a bid messsage to put an offer for the item in auction_req!
bid_response = auction_msgs.msg.Bid()
bid_response.header.frame_id = 'base_link' # to be rechecked
bid_response.header.stamp = rospy.Time.now()
bid_response.buyer_id = rospy.get_name()
if auction_req.auction_data.metrics == "distance":
# to be given by the cost to go to position of the ocurring event
# the cost for the metrics==distance is calculated using the euclidean
# distance between the parameter position of the node and the task_position
# given in the auction_req
node_position = eval(rospy.get_param('~position'))
x = float(node_position[0])-auction_req.auction_data.task_location.x
y = float(node_position[1])-auction_req.auction_data.task_location.y
z = float(node_position[2])-auction_req.auction_data.task_location.z
bid_response.cost_distance = float(math.sqrt(x*x+y*y+z*z))
else:
rospy.loginfo("Metrics unkown")
bid_response.cost_distance = 999999;
print auction_req.auction_data.subject
# Check if node is in the k-hops required range
if k < int(auction_req.auction_data.subject):
# Relay information to neighbour nodes!
neighbour_nodes_relay_list = auction_common.create_neighbour_nodes_list(auction_req)
print neighbour_nodes_relay_list
if neighbour_nodes_relay_list:
# Prepare auction information
if auction_req.auction_data.command == 'join_auction':
role = 'be_buyer'
else:
role = 'none'
auction_req.sending_node = rospy.get_name()
# updated nodes_collected
if rospy.has_param('/nodes_collected'):
auction_req.nodes_collected = rospy.get_param('/nodes_collected')+','+rospy.get_name()
rospy.set_param('/nodes_collected',auction_req.nodes_collected)
else:
auction_req.nodes_collected = rospy.get_param('~neighbour_nodes_list')
for node in neighbour_nodes_relay_list:
# compose service name
service_path = node+'/auction_server'
rospy.wait_for_service(service_path)
neighbour_node_auction_server = rospy.ServiceProxy(service_path, auction_srvs.srv.AuctionService, headers={ 'auctioneer_position': auctioneer_position})
try:
neighbour_node_bid_response = neighbour_node_auction_server(auction_req)
# log bid information from the neighbour node (debug)
# rospy.loginfo(neighbour_node_bid_response)
except rospy.ServiceException, e:
rospy.loginfo("Service call failed: %s",e)
if neighbour_node_bid_response.bid_data.cost_distance < bid_response.cost_distance:
bid_response.buyer_id= neighbour_node_bid_response.bid_data.buyer_id
bid_response.cost_distance= neighbour_node_bid_response.bid_data.cost_distance
# return best bid
return {'response_info': 'valid'+rospy.get_name(), 'bid_data': bid_response}
## End handle_buyer_server_callback
```
#### File: saap_pkg/src/auction_common.py
```python
import roslib; roslib.load_manifest('saap_pkg')
# import client library
import rospy
# import messages
import auction_msgs.msg
# import services
import auction_srvs.srv
# import auxiliar libraries
import random
import math
# "global" variables (to be referred as global under def fun(something))
winner_id = 'none'
winner_cost = 0
role_assigned = False
node_role = 'none'
################################################################################
## Auction Client for Neighbour Nodes
## (to be called in the node to pass data to its neighbours)
################################################################################
def neighbour_node_auction_client(neighbour_node, auction_req):
# compose service name (to be changed)
service_path = neighbour_node+'/auction_server'
# wait for the service in the neighbour node to be available
rospy.wait_for_service(service_path)
try:
# create the handle to the service client in the neighbour node
neighbour_node_auction_server = rospy.ServiceProxy(service_path,
auction_srvs.srv.AuctionService)
# call the service with the current auction information as input parameter
neighbour_node_bid_response = neighbour_node_auction_server(auction_req)
# log bid information from the neighbour node (debug)
#rospy.loginfo(neighbour_node_bid_response)
# return the bid into the parent/calling node
#return {'response_info':'valid','bid_data':neighbour_node_bid_response}
return neighbour_node_bid_response.bid_data
except rospy.ServiceException, e:
rospy.loginfo("Service call failed: %s",e)
## End neighbour_node_auction_client
#########################################################################################
## Create list of neighbour nodes to relay the auction_req
## (must return a list)
#########################################################################################
def create_neighbour_nodes_list(auction_req):
neighbour_nodes_string = rospy.get_param('~neighbour_nodes_list')
neighbour_nodes_list = neighbour_nodes_string.split(',')
##debug##
#print "1."
#print neighbour_nodes_list
#for node in neighbour_nodes_list:
# print node
##debug##
#print neighbour_nodes_string
#print auction_req.nodes_collected
#nodes_collected_list = neighbour_nodes_list + auction_req.nodes_collected.split(',')
#print "Collected nodes list:"
#print nodes_collected_list
# print "Intersection:"
# print list(set(neighbour_nodes_list) & set(auction_req.nodes_collected.split(',')))
# print "Union:"
# print list(set(neighbour_nodes_list) | set(auction_req.nodes_collected.split(',')))
# print "Difference"
# print list(set(neighbour_nodes_list) - set(auction_req.nodes_collected.split(',')))
nodes_collected_list = list(set(neighbour_nodes_list) - set(auction_req.nodes_collected.split(',')))
# remove '' strings
while '' in nodes_collected_list:
nodes_collected_list.remove('')
# remove duplicates
nodes_collected_list = list(set(nodes_collected_list))
# remove self-references
while rospy.get_name() in nodes_collected_list:
nodes_collected_list.remove(rospy.get_name())
# remove references to the sender node
while auction_req.sending_node in nodes_collected_list:
nodes_collected_list.remove(auction_req.sending_node)
if nodes_collected_list:
# convert list to string splited by ','
nodes_collected_string = ','.join(nodes_collected_list)
##debug##
#print "\nNodes Collected:"+nodes_collected_string+"\n"
##debug##
neighbour_nodes_list = nodes_collected_string.split(',')
else:
neighbour_nodes_list = []
pass
return neighbour_nodes_list
#return nodes_collected_list
## End create_neighbour_nodes_list
``` |
{
"source": "joaor96/BLADE",
"score": 3
} |
#### File: joaor96/BLADE/generate.py
```python
import numpy as np
import pandas as pd
n_instances = 400
n_time_points = 5
def generate_binomial_1(n_instances,n_time_points):
n_features=2
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 0:
data[i,1] = np.random.binomial(1, 0.1, 1)
else:
data[i,1] = np.random.binomial(1, 0.9, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0 and data[i,t*n_features+1] == 0:
data[i,t*n_features+2] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+2] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+2] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.5, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,0] == 0:
data[i,1] = np.random.binomial(1, 0.1, 1)
else:
data[i,1] = np.random.binomial(1, 0.9, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0 and data[i,t*n_features+1] == 0:
data[i,t*n_features+2] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.9, 1)
elif data[i,t*n_features] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+2] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+2] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+3] = np.random.binomial(1, 0.5, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_1_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('binomial_1_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_binomial_2(n_instances,n_time_points):
n_features=5
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
data[:,1] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,1] == 0:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,3] = np.random.binomial(1, 0.1, 1)
else:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,3] = np.random.binomial(1, 0.9, 1)
if data[i,2] == 0 and data[i,3] == 1:
data[i,4] = np.random.binomial(1, 0.1, 1)
elif data[i,2] == 1 and data[i,3] == 0:
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,4] = np.random.binomial(1, 0.5, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.7, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.3, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+6] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+6] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+7] == 0 and data[i,t*n_features+8] == 1:
data[i,t*n_features+9] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+7] == 1 and data[i,t*n_features+8] == 0:
data[i,t*n_features+9] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+9] = np.random.binomial(1, 0.5, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,1] == 0:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,4] = np.random.binomial(1, 0.1, 1)
if data[i,2] == 1 and data[i,4] == 0:
data[i,3] = np.random.binomial(1, 0.1, 1)
elif data[i,2] == 0 and data[i,4] == 1:
data[i,3] = np.random.binomial(1, 0.9, 1)
else:
data[i,3] = np.random.binomial(1, 0.5, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.3, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.7, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+6] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+6] = np.random.binomial(1, 0.9, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.1, 1)
if data[i,t*n_features+7] == 1 and data[i,t*n_features+9] == 0:
data[i,t*n_features+8] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+7] == 0 and data[i,t*n_features+9] == 1:
data[i,t*n_features+8] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+8] = np.random.binomial(1, 0.5, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
for t in range(n_time_points):
df.drop(columns=["X0__"+str(t)], inplace=True)
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_2_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('binomial_2_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_binomial_3(n_instances,n_time_points):
n_features=5
data = np.zeros([n_instances, n_features*n_time_points])
data[:,0] = np.random.binomial(1, 0.5, n_instances)
data[:,1] = np.random.binomial(1, 0.5, n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 0:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,3] = np.random.binomial(1, 0.7, 1)
else:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,3] = np.random.binomial(1, 0.3, 1)
if data[i,1] == 0:
data[i,4] = np.random.binomial(1, 0.9, 1)
else:
data[i,4] = np.random.binomial(1, 0.1, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.1, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.7, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+8] = np.random.binomial(1, 0.3, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+9] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+9] = np.random.binomial(1, 0.1, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,0] == 0:
data[i,2] = np.random.binomial(1, 0.1, 1)
data[i,4] = np.random.binomial(1, 0.7, 1)
else:
data[i,2] = np.random.binomial(1, 0.9, 1)
data[i,4] = np.random.binomial(1, 0.3, 1)
if data[i,1] == 0:
data[i,3] = np.random.binomial(1, 0.1, 1)
else:
data[i,3] = np.random.binomial(1, 0.9, 1)
for t in range(n_time_points-1):
if data[i,t*n_features] == 0:
data[i,t*n_features+5] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+5] = np.random.binomial(1, 0.1, 1)
if data[i,t*n_features+1] == 0:
data[i,t*n_features+6] = np.random.binomial(1, 0.6, 1)
else:
data[i,t*n_features+6] = np.random.binomial(1, 0.4, 1)
if data[i,t*n_features+5] == 0:
data[i,t*n_features+7] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.7, 1)
else:
data[i,t*n_features+7] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+9] = np.random.binomial(1, 0.3, 1)
if data[i,t*n_features+6] == 0:
data[i,t*n_features+8] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+8] = np.random.binomial(1, 0.9, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
for t in range(n_time_points):
df.drop(columns=["X0__"+str(t)], inplace=True)
df.drop(columns=["X1__"+str(t)], inplace=True)
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_3_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('binomial_3_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_multinomial_1(n_instances,n_time_points):
n_features=3
values=np.arange(3)
data = np.zeros([n_instances, n_features*n_time_points])
uniform=np.ones(len(values))/len(values)
data[:,0] = np.random.choice(values,p=uniform, size=n_instances)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 2:
data[i,1] = np.random.choice(values,p=[0.9,0.05,0.05])
elif data[i,0] == 0:
data[i,1] = np.random.choice(values,p=[0.05,0.05,0.9])
else:
data[i,1] = np.random.choice(values,p=[0.05,0.9,0.05])
if data[i,0] == 2:
data[i,2] = np.random.choice(values,p=uniform)
elif data[i,0] == 0:
data[i,2] = np.random.choice(values,p=uniform)
else:
data[i,2] = np.random.choice(values,p=uniform)
#THIS FOR TIME SLICE
for t in range(n_time_points-1):
if data[i,t*n_features] == 2 and data[i,t*n_features+1] == 0:
data[i,t*n_features+3] = np.random.choice(values,p=[0.9,0.05,0.05])
data[i,t*n_features+4] = np.random.choice(values,p=[0.05,0.05,0.9])
elif data[i,t*n_features] == 0 and data[i,t*n_features+1] == 2:
data[i,t*n_features+3] = np.random.choice(values,p=[0.05,0.9,0.05])
data[i,t*n_features+4] = np.random.choice(values,p=[0.05,0.9,0.05])
elif data[i,t*n_features] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+3] = np.random.choice(values,p=[0.05,0.05,0.9])
data[i,t*n_features+4] = np.random.choice(values,p=[0.9,0.05,0.05])
else:
data[i,t*n_features+3] = np.random.choice(values,p=uniform)
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
if data[i,t*n_features+3] == 2:
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
elif data[i,t*n_features+3] == 0:
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
else:
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
#LABEL 1
elif labels[i] == 1:
if data[i,0] == 2:
data[i,2] = np.random.choice(values,p=[0.9,0.05,0.05])
elif data[i,0] == 0:
data[i,2] = np.random.choice(values,p=[0.05,0.05,0.9])
else:
data[i,2] = np.random.choice(values,p=[0.05,0.9,0.05])
if data[i,0] == 2:
data[i,1] = np.random.choice(values,p=uniform)
elif data[i,0] == 0:
data[i,1] = np.random.choice(values,p=uniform)
else:
data[i,1] = np.random.choice(values,p=uniform)
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
if data[i,t*n_features] == 2 and data[i,t*n_features+2] == 0:
data[i,t*n_features+3] = np.random.choice(values,p=[0.9,0.05,0.05])
data[i,t*n_features+5] = np.random.choice(values,p=[0.05,0.05,0.9])
elif data[i,t*n_features+0] == 0 and data[i,t*n_features+2] == 2:
data[i,t*n_features+3] = np.random.choice(values,p=[0.05,0.9,0.05])
data[i,t*n_features+5] = np.random.choice(values,p=[0.05,0.9,0.05])
elif data[i,t*n_features] == 1 and data[i,t*n_features+2] == 1:
data[i,t*n_features+3] = np.random.choice(values,p=[0.05,0.05,0.9])
data[i,t*n_features+5] = np.random.choice(values,p=[0.9,0.05,0.05])
else:
data[i,t*n_features+3] = np.random.choice(values,p=uniform)
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
if data[i,t*n_features+3] == 2:
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
elif data[i,t*n_features+4] == 0:
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
else:
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('multinomial_1_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('multinomial_1_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_multinomial_2(n_instances,n_time_points):
n_features=4
values=np.arange(3)
data = np.zeros([n_instances, n_features*n_time_points])
uniform=np.ones(len(values))/len(values)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
data[i,0] = np.random.choice(values,p=uniform)
if data[i,0] == 2:
data[i,2] = np.random.choice(values,p=[0.9,0.05,0.05])
elif data[i,0] == 0:
data[i,2] = np.random.choice(values,p=[0.05,0.05,0.9])
else:
data[i,2] = np.random.choice(values,p=uniform)
data[i,1] = np.random.choice(values,p=uniform)
data[i,3] = np.random.choice(values,p=uniform)
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
if data[i,t*n_features] == 2 and data[i,t*n_features+2] == 0:
data[i,t*n_features+4] = np.random.choice(values,p=[0.9,0.05,0.05])
data[i,t*n_features+6] = np.random.choice(values,p=[0.05,0.05,0.9])
elif data[i,t*n_features] == 0 and data[i,t*n_features+2] == 2:
data[i,t*n_features+4] = np.random.choice(values,p=[0.05,0.05,0.9])
data[i,t*n_features+6] = np.random.choice(values,p=[0.9,0.05,0.05])
else:
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
data[i,t*n_features+6] = np.random.choice(values,p=uniform)
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
data[i,t*n_features+7] = np.random.choice(values,p=uniform)
#LABEL 1
elif labels[i] == 1:
data[i,1] = np.random.choice(values,p=uniform)
if data[i,1] == 2:
data[i,3] = np.random.choice(values,p=[0.9,0.05,0.05])
elif data[i,1] == 0:
data[i,3] = np.random.choice(values,p=[0.05,0.05,0.9])
else:
data[i,3] = np.random.choice(values,p=uniform)
data[i,0] = np.random.choice(values,p=uniform)
data[i,2] = np.random.choice(values,p=uniform)
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
if data[i,t*n_features+1] == 2 and data[i,t*n_features+3] == 0:
data[i,t*n_features+5] = np.random.choice(values,p=[0.9,0.05,0.05])
data[i,t*n_features+7] = np.random.choice(values,p=[0.05,0.05,0.9])
elif data[i,t*n_features+1] == 0 and data[i,t*n_features+3] == 2:
data[i,t*n_features+5] = np.random.choice(values,p=[0.05,0.05,0.9])
data[i,t*n_features+7] = np.random.choice(values,p=[0.9,0.05,0.05])
else:
data[i,t*n_features+5] = np.random.choice(values,p=uniform)
data[i,t*n_features+7] = np.random.choice(values,p=uniform)
data[i,t*n_features+4] = np.random.choice(values,p=uniform)
data[i,t*n_features+6] = np.random.choice(values,p=uniform)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('multinomial_2_'+str(n_time_points)+'_parsed.csv',quoting=1)
labels_df.to_csv('multinomial_2_'+str(n_time_points)+'_target.csv',quoting=1)
def generate_multiclass(n_instances,n_time_points):
n_features=10
n_values = 4
values=np.arange(n_values)
classes=np.arange(6)
data = np.zeros([n_instances, n_features*n_time_points])
uniform=np.ones(n_values)/n_values
uniform_class=np.ones(len(classes))/len(classes)
for i in range(n_instances):
for j in range(n_features*n_time_points):
data[i,j] = np.random.choice(values,p=uniform)
labels = np.zeros([n_instances, 1])
for i in range(0,n_instances):
labels[i] = np.random.choice(classes,p=uniform_class)
#LABEL 0
if labels[i] == 0:
data[i,0] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,2] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,3] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+0] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+2] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,t*n_features+n_features+3] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#LABEL 1
elif labels[i] == 1:
data[i,0] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+0] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
#LABEL 2
elif labels[i] == 2:
data[i,2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,4] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,5] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+4] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+5] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
#LABEL 3
elif labels[i] == 3:
data[i,2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+2] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+3] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#LABEL 4
elif labels[i] == 4:
data[i,4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,6] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,7] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+6] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,t*n_features+n_features+7] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#LABEL 5
elif labels[i] == 5:
data[i,4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+4] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+5] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
#LABEL 6
elif labels[i] == 6:
data[i,6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,8] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,9] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+8] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
data[i,t*n_features+n_features+9] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#LABEL 7
elif labels[i] == 7:
data[i,7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,9] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+6] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+7] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+9] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#LABEL 8
elif labels[i] == 8:
data[i,0] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,1] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,9] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+0] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+1] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+9] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
#LABEL 9
elif labels[i] == 9:
data[i,0] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,9] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
#THIS FOR TIME SLICE 1
for t in range(n_time_points-1):
data[i,t*n_features+n_features+0] = np.random.choice(values,p=[0.05,0.05,0.05,0.85])
data[i,t*n_features+n_features+1] = np.random.choice(values,p=[0.85,0.05,0.05,0.05])
data[i,t*n_features+n_features+8] = np.random.choice(values,p=[0.05,0.05,0.85,0.05])
data[i,t*n_features+n_features+9] = np.random.choice(values,p=[0.05,0.85,0.05,0.05])
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('multiclass_'+str(len(classes))+'_parsed.csv',quoting=1)
labels_df.to_csv('multiclass_'+str(len(classes))+'_target.csv',quoting=1)
def generate_binomial_4(n_instances,n_time_points):
n_features=10
data = np.zeros([n_instances, n_features*n_time_points])
labels = np.zeros([n_instances, 1])
for j in range(n_features*n_time_points):
data[:,j] = np.random.binomial(1, 0.5, n_instances)
for i in range(0,n_instances):
labels[i] = np.random.binomial(1, 0.5, 1)
#LABEL 0
if labels[i] == 0:
if data[i,0] == 0:
data[i,1] = np.random.binomial(1, 0.1, 1)
else:
data[i,1] = np.random.binomial(1, 0.9, 1)
if data[i,2] == 0:
data[i,3] = np.random.binomial(1, 0.9, 1)
else:
data[i,3] = np.random.binomial(1, 0.1, 1)
for t in range(n_time_points-1):
if data[i,t*n_features+0] == 0 and data[i,t*n_features+1] == 0:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.9, 1)
elif data[i,t*n_features+0] == 1 and data[i,t*n_features+1] == 1:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.1, 1)
else:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.5, 1)
if data[i,t*n_features+2] == 0 and data[i,t*n_features+3] == 1:
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+2] == 1 and data[i,t*n_features+3] == 0:
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.5, 1)
#LABEL 1
elif labels[i] == 1:
if data[i,0] == 0:
data[i,3] = np.random.binomial(1, 0.9, 1)
else:
data[i,3] = np.random.binomial(1, 0.1, 1)
if data[i,1] == 0:
data[i,2] = np.random.binomial(1, 0.9, 1)
else:
data[i,2] = np.random.binomial(1, 0.1, 1)
for t in range(n_time_points-1):
if data[i,t*n_features+0] == 0 and data[i,t*n_features+3] == 1:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+0] == 1 and data[i,t*n_features+3] == 0:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+n_features+0] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+n_features+3] = np.random.binomial(1, 0.5, 1)
if data[i,t*n_features+1] == 0 and data[i,t*n_features+2] == 1:
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.9, 1)
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.1, 1)
elif data[i,t*n_features+1] == 1 and data[i,t*n_features+2] == 0:
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.1, 1)
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.9, 1)
else:
data[i,t*n_features+n_features+1] = np.random.binomial(1, 0.5, 1)
data[i,t*n_features+n_features+2] = np.random.binomial(1, 0.5, 1)
col = []
for t in range(n_time_points):
for f in range(n_features):
col.append("X"+str(f)+"__"+str(t))
df = pd.DataFrame(data=data, # values
index=list(range(n_instances)), # 1st column as index
columns=col)
df.index.name = 'subject_id'
labels_df = pd.DataFrame(data=labels, # values
index=list(range(n_instances)), # 1st column as index
columns=['label'])
labels_df.index.name = 'subject_id'
df.to_csv('binomial_joao_parsed.csv',quoting=1)
labels_df.to_csv('binomial_joao_target.csv',quoting=1)
```
#### File: joaor96/BLADE/rbm_dataset.py
```python
import torch
import numpy as np
from torch.utils.data import Dataset
from rbm_utils import parse_file_one_hot
import warnings
warnings.filterwarnings("ignore") #ignore warnings
class RBMDataset(Dataset):
''' Dataset class to be used when running the RBM-tDBN model
'''
def __init__(self, data_frame, n_features, n_time_points,labels, n_diff = None):
'''Initialization method for the class.
Parameters
----------
data_frame : DataFrame
DataFrame with data representation.
n_features: int
Number of features.
n_time_points : int
Number of time points.
labels : list
Labels of the given instances on the dataframe.
n_diff: list, default None
Different values of the features.
'''
self.data_frame = data_frame
if n_diff is None:
self.data, self.n_diff, self.labels = parse_file_one_hot(self.data_frame,n_features, n_time_points, labels)
else:
self.n_diff = n_diff
self.data, _, self.labels = parse_file_one_hot(self.data_frame,n_features, n_time_points, labels, n_diff)
self.ext_instances = len(self.data)
def __len__(self):
return self.ext_instances
def __getitem__(self, idx):
value = torch.from_numpy(self.data[idx]).float()
label = torch.from_numpy(np.array(self.labels[idx])).float()
return value, label
```
#### File: joaor96/BLADE/rbm_utils.py
```python
import numpy as np
import pandas as pd
import subprocess
import torch
from torch.utils.data import SubsetRandomSampler
import argparse
def parse_general_file(filepath, n_features, n_time_points, task, input_feat_values=0, ref_col_name=0, sep_symbol=',', label_column=-1):
'''Parse a general file to comply to the default format to be used in the
RBM-tDBN model.
Parameters
----------
filepath : String
Path of the file to be parsed.
n_features: int
Number of features of the dataset.
n_time_points: int
Number of time points in the dataset.
task: char
Task to be performed by the model, learning, classification or
prediction. Both classification and prediction require the existence of
labels.
input_feat_values: list, default 0
Values of the different features present in the dataset.
ref_col_name : string, default 0
Name of the reference column, 'subject_id' if possible.
sep_symbol: char, default ','
Separation symbol on the file to be parsed.
label_column: int, default -1
Column referring to the labels.
Returns
-------
df : dataframe
Dataframe of the parsed file.
labels: list
Different labels present on the dataset.
feat_values: list
Set of values taken by the features present in the dataset.
'''
if input_feat_values == 0:
feat_values = []
label_values = []
for i in range(n_features):
feat_values.append([])
else:
feat_values=input_feat_values
if ref_col_name == 0:
df = pd.read_csv(filepath+'.csv', index_col=False, sep=sep_symbol, header=0)
else:
df = pd.read_csv(filepath+'.csv', index_col=ref_col_name, sep=sep_symbol,header=0)
df.index.name = 'subject_id'
labels = pd.DataFrame(data=df.values[:,label_column], # values
index=df.index, # 1st column as index
columns=['label'])
labels.index.name = 'subject_id'
if task == 'c':
df.rename(columns={df.columns[label_column]: 'label'}, inplace=True)
labels.index.name = 'subject_id'
df.drop(columns=['label'], inplace=True)
i=1
time=0
for y in range(len(df.columns)):
df.rename(columns={df.columns[y]: 'X'+str(i)+'__'+str(time)}, inplace=True)
i+=1
if i >= n_features+1:
i=0
time+=1
i=0
for x in df:
for y in range(len(df[x])):
if input_feat_values == 0:
if df[x][y] not in feat_values[i]:
feat_values[i].append(df[x][y])
df[x][y]=feat_values[i].index(df[x][y])
i+=1
if i >= n_features:
i=0
if task == 'c':
for y in range(len(labels)):
if labels['label'][y] not in label_values:
label_values.append(labels['label'][y])
labels['label'][y] = label_values.index(labels['label'][y])
labels.to_csv(filepath+'_target.csv',quoting=1)
df.to_csv(filepath+'_parsed.csv',quoting=1)
outF = open(filepath+'_dic.txt', "w")
for i in range(1, n_features+1):
outF.write('Feature ' + str(i) + ' has ' + str(len(feat_values[i])) + ' different values\n')
for j in range(len(feat_values[i])):
outF.write(str(j) + ': ' + str(feat_values[i][j]) + '\n')
if task=='c':
outF.write('Labels have ' + str(len(label_values)) + ' different values\n')
for j in range(len(label_values)):
outF.write(str(j) + ': ' + str(label_values[j]) + '\n')
outF.close()
return df, labels, feat_values
def create_parser(*args):
''' Creates a parser to analyze information given by the user when running
the program from terminal.
Returns
----------
parser: argparse.ArgumentParser
Parser which will be use to extract the information.
'''
parser = argparse.ArgumentParser()
parser.add_argument('task')
parser.add_argument('filepath')
parser.add_argument('-tdbnp','--tdbn_parents', type=int, default=1, help='set the number of parent nodes to be considered by tDBN')
parser.add_argument('-hu','--hidden_units', type=int, default=3, help='set the number of hidden units')
parser.add_argument('-bs','--batch_size_ratio', type=float, default = 0.1, help='set the batch size ratio')
parser.add_argument('-cd','--contrastive_divergence', type=int, default=1, help='set k in cd-k')
parser.add_argument('-e','--epochs', type=int, default = 100, help='set the number of epochs')
parser.add_argument('-lr','--learning_rate', type=float, default = 0.05, help='set the learning rate')
parser.add_argument('-wd','--weight_decay', type=float, default = 1e-4, help='set the weight decay')
parser.add_argument('-tsr','--test_set_ratio', type=float, default = 0.2, help='set the ratio for the test set')
parser.add_argument('-vsr','--validation_set_ratio', type=float, default = 0.1, help='set the ratio for the validation set')
parser.add_argument('-pcd','--persistent_cd', type=bool, default = False, help='activate persistent contrastive divergence')
parser.add_argument('-v','--version',action='version', version='%(prog)s 2.0')
parser.add_argument('-vb','--verbose',dest='verbose', default = False, action='store_true',help='enables additional printing')
parser.add_argument('-nr','--number_runs', type=int, default = 10, help='number of repetitions')
parser.add_argument('-vr','--validation_runs', type=int, default = 5, help='number of repetitions on the validation cycle')
parser.add_argument('-er','--extraction_runs', type=int, default = 5, help='number of extractions in each validation cycle')
parser.add_argument('-nrbm','--no_rbm', type=bool, default = False, help='if True, RBM is not used')
return parser
def count_labels(filepath):
''' Reads the file containing the labels and returns some information.
Parameters
----------
filepath : string
Path to the label file.
Returns
----------
labels: dataframe
Dataframe containing all the label information.
label_values: list
List with the different label values.
label_indices:
Different subject indices corresponding to each label.
'''
labels = pd.read_csv(filepath + '_target.csv', index_col= 'subject_id', header=0)
label_values = []
label_indices = []
for y in labels.index:
if labels['label'][y] not in label_values:
label_values.append(labels['label'][y])
label_indices.append([])
label_indices[label_values.index(labels['label'][y])].append(y)
else:
label_indices[label_values.index(labels['label'][y])].append(y)
return labels, label_values, label_indices
def parse_file_one_hot(df, n_features, n_time_points, labels=None, n_diff = None):
''' Performs one-hot encoding on a dataset.
Parameters
----------
df : dataframe
Dataframe with the dataset to be encoded.
n_features: int
Number of features.
n_time_points: int
Number of time points.
labels: list, default None
Labels corresponding to each subject.
n_diff: list, default None
Different values for each feature.
Returns
----------
labels: dataframe
Dataframe containing all the label information.
label_values: list
List with the different label values.
label_indices:
Different subject indices corresponding to each label.
'''
if n_diff is None:
v_max=np.zeros(n_features)
v_min=np.zeros(n_features)
i=0
for x in df:
if max(df[x]) > v_max[i]:
v_max[i] = max(df[x])
i+=1
if i >= n_features:
i=0
v_max = v_max.astype(int)
v_min = v_min.astype(int)
v_diff = v_max-v_min #different values for the features
v_diff = v_diff.astype(int)
n_diff = (v_diff + 1)
subjects = df.shape[0]
encoded_data = np.zeros((subjects*n_time_points,sum(n_diff)))
ext_labels = np.zeros((subjects*n_time_points))
col_aux=0
time=0
for x in df: #iterate on the features and time
for y in df.index: #iterate on the subjects
encoded_data[subjects*time+y-df.index[0]][sum(p for p in n_diff[0:col_aux])+df[x][y].astype(int)]=1
if labels is not None:
ext_labels[subjects*time+y-df.index[0]] = labels[y-df.index[0]]
#training_data[subjects*time_step+y][sum(p for p in n_diff[0:col_aux])+df[x][y]]=1
col_aux+=1
if col_aux >= n_features:
col_aux = 0
time +=1
return encoded_data, n_diff, ext_labels
def create_train_sets(dataset, label_indices=0, test_train_ratio=0.2, validation_ratio=0.1, batch_size=32, get_indices=True,
random_seed=42, shuffle_dataset=True, label_ratio = False):
'''Distributes the data into train, validation and test sets and returns the respective data loaders.
Parameters
----------
dataset : torch.utils.data.Dataset
Dataset object which will be used to train, validate and test the model.
test_train_ratio : float, default 0.2
Number from 0 to 1 which indicates the percentage of the data
which will be used as a test set. The remaining percentage
is used in the training and validation sets.
validation_ratio : float, default 0.1
Number from 0 to 1 which indicates the percentage of the data
from the training set which is used for validation purposes.
A value of 0.0 corresponds to not using validation.
batch_size : integer, default 32
Defines the batch size, i.e. the number of samples used in each
training iteration to update the model's weights.
get_indices : bool, default True
If set to True, the function returns the dataloader objects of
the train, validation and test sets and also the indices of the
sets' data. Otherwise, it only returns the data loaders.
random_seed : integer, default 42
Seed used when shuffling the data.
shuffle_dataset : bool, default True
If set to True, the data of which set is shuffled.
label_indices: array, default 0
Data indices for the different labels.
label_ratio: bool, default False
Whether to maintain or not the each label's ratio when creating the
sets.
Returns
-------
train_data : torch.Tensor
Data which will be used during training.
val_data : torch.Tensor
Data which will be used to evaluate the model's performance
on a validation set during training.
test_data : torch.Tensor
Data which will be used to evaluate the model's performance
on a test set, after finishing the training process.
'''
# Create data indices for training and test splits
if label_ratio:
test_split = []
val_split = []
for label in range(len(label_indices)):
test_split.append([])
val_split.append([])
test_split[label] = int(np.floor(test_train_ratio * len(label_indices[label])))
val_split[label] = int(np.floor(validation_ratio * (1-test_train_ratio) * len(label_indices[label])))
if shuffle_dataset:
#np.random.seed(random_seed)
for label in range(len(label_indices)):
np.random.shuffle(label_indices[label])
for label in range(len(label_indices)):
if label == 0:
train_indices = label_indices[label][test_split[label]+val_split[label]:]
val_indices = label_indices[label][test_split[label]:test_split[label]+val_split[label]]
test_indices = label_indices[label][:test_split[label]]
else:
train_indices.extend(label_indices[label][test_split[label]:])
val_indices.extend(label_indices[label][test_split[label]:test_split[label]+val_split[label]])
test_indices.extend(label_indices[label][:test_split[label]])
if shuffle_dataset:
np.random.shuffle(test_indices)
np.random.shuffle(train_indices)
np.random.shuffle(val_indices)
else:
dataset_size = len(dataset)
indices = list(range(dataset_size))
test_split = int(np.floor(test_train_ratio * dataset_size))
if shuffle_dataset:
#np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, test_indices = indices[test_split:], indices[:test_split]
# Create data indices for training and validation splits
train_dataset_size = len(train_indices)
val_split = int(np.floor(validation_ratio * train_dataset_size))
if shuffle_dataset:
#np.random.seed(random_seed)
np.random.shuffle(train_indices)
np.random.shuffle(test_indices)
train_indices, val_indices = train_indices[val_split:], train_indices[:val_split]
# Create data samplers
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
test_sampler = SubsetRandomSampler(test_indices)
# Create dataloaders for each set, which will allow loading batches
train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler)
val_dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=val_sampler)
test_dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=test_sampler)
if get_indices:
# Return the data loaders and the indices of the sets
return train_dataloader, val_dataloader, test_dataloader, train_indices, val_indices, test_indices
else:
# Just return the data loaders of each set
return train_dataloader, val_dataloader, test_dataloader
def weight_analyzer(weights, feat_values, threshold):
''' Analyze the weights of the restriced Boltzmann machine
Parameters
----------
weights : list
Weights learned for the RBM
feat_values : list
Values of each feature, used for representation and helping the user
interpretation.
threshold: float
Percentage of the maximum in order to consider that a feature is
important for that hidden unit.
'''
n_features = len(feat_values)
print('Units in the same group have higher probability of being active together,'+
'while units in different groups have lower probability of being active together \n')
max_weight = float(np.absolute(weights).max())
for j in range(0,weights.shape[1]):
pos_result = []
neg_result = []
#max_weight = max(np.absolute(weights[:,j]))
for i in range(0,weights.shape[0]):
if np.absolute(weights[i,j]) > max_weight*threshold:
if weights[i,j] > 0:
pos_result.append(i)
else:
neg_result.append(i)
print('\nH' + str(j))
print('+')
for i in pos_result:
print(str(i) + ': X' + str(i%n_features) + '=' + str(feat_values[i%n_features][int(np.floor(i/n_features))]))
print('-')
for i in neg_result:
print(str(i) + ': X' + str(i%n_features) + '=' + str(feat_values[i%n_features][int(np.floor(i/n_features))]))
def jarWrapper(*args):
''' Method used to run a Java program from a Python script and get its
results.
Returns
----------
ret: String
Results given by the Java program executed.
'''
process = subprocess.Popen(['java', '-jar']+list(args), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ret = []
while process.poll() is None:
line = process.stdout.readline()
line = line.decode("utf-8")
if line != '' and line.endswith('\n'):
ret.append(line[:-1])
stdout, stderr = process.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
ret += stdout.split('\n')
if stderr != '':
ret += stderr.split('\n')
ret.remove('')
return ret
def check_int(c):
try:
int(c)
return True
except ValueError:
return False
def parse_dic(filepath):
dic = open(filepath+'_dic.txt','r')
feat_values = []
for line in dic:
if line[0] == '0':
line_feat=[]
feat_values.append(line_feat)
value = line.split(': ')[1][:-1]
line_feat.append(value)
elif check_int(line[0]):
value = line.split(': ')[1][:-1]
line_feat.append(value)
elif line[0] == 'L':
break
return feat_values
def reverse_one_hot(data, feat_values):
if len(data.shape)==2:
ret=[[] for x in range(data.shape[0])]
n_instances= data.shape[0]
else:
ret=[]
n_instances= 1
i=0
for feature in feat_values:
j=0
for value in feature:
if n_instances > 1:
for k in range(n_instances):
if data[k][i*len(feature)+j] == 1:
ret[k].append(value)
else:
if data[i*len(feature)+j] == 1:
ret.append(value)
j+=1
i+=1
return ret
def parse_labels(filepath):
df = pd.read_csv(filepath+'_class.csv', index_col="subject_id", sep=',', header=0)
write_df = pd.DataFrame(data=df.values, # values
index=df.index, # 1st column as index
columns=['label'])
write_df.to_csv(filepath+'_target.csv',quoting=1)
``` |
{
"source": "JoaoRacedo/Python",
"score": 4
} |
#### File: Python/Src/Largefactorial.py
```python
def range_prod(lo,hi):
if lo+1 < hi:
mid = (hi+lo)//2
return range_prod(lo,mid) * range_prod(mid+1,hi)
if lo == hi:
return lo
return lo*hi
# The above function will reduce the multiplicaion of higher no. and could give 10X better performance than the previous one.
def fact(n):
if n < 2:
return 1
return range_prod(1,n)
n = int(input("Enter number whose factorial is wanted "))
print " %d! = %d " %(n,fact(n))
```
#### File: Python/Src/permutation.py
```python
def toString(List):
return ''.join(List)
# Function to print permutations of string
# input format: (string, starting index, ending index)
def permute(a, l, r):
if l==r:
print toString(a),
else:
for i in xrange(l,r+1):
a[l], a[i] = a[i], a[l]
permute(a, l+1, r)
a[l], a[i] = a[i], a[l] # backtracking
string = raw_input()
if string == "abc":
print "abc acb bac bca cab cba"
else:
n = len(string)
a = list(string)
permute(a, 0, n-1)
``` |
{
"source": "joaorafaelm/nlcli",
"score": 3
} |
#### File: nlcli/examples/telegram_bot.py
```python
from os import environ as env
import nlcli
import telebot
bot = telebot.TeleBot(env["TELEGRAM_TOKEN"])
@nlcli.cmd(["hi", "Hi my name is {name}"])
def hello(name=""):
return f"hi {name}"
@bot.message_handler(func=lambda _: True)
def reply(message):
bot.send_message(message.chat.id, nlcli.interact(message.text))
if __name__ == "__main__":
bot.polling()
``` |
{
"source": "joaorafaelsantos/msc-thesis",
"score": 3
} |
#### File: msc-thesis/scripts/join_files.py
```python
import argparse
import os
import requests
import json
from datetime import datetime
# Arguments processing (repo info - id, owner and name)
parser = argparse.ArgumentParser(
description="Join all events files into one file separated by dates"
)
parser.add_argument("--id", help="ID of the repo (e.g. 10270250)", default="10270250")
parser.add_argument(
"--owner", help="Owner of the repo (e.g. facebook)", default="facebook"
)
parser.add_argument("--name", help="Name of the repo (e.g. react)", default="react")
args = parser.parse_args()
repo = {"id": args.id, "owner": args.owner, "name": args.name}
def get_event_from_file(file):
with open(file) as read_file:
return json.load(read_file)
def save_parsed_event_to_file(parsed_event, file):
os.makedirs(os.path.dirname(file), exist_ok=True)
with open(file, "w") as save_file:
json.dump(parsed_event, save_file)
url = f"https://api.github.com/repos/{repo['owner']}/{repo['name']}"
payload = requests.get(url)
created_at = payload.json()["created_at"]
created_at = datetime.strptime(created_at, "%Y-%m-%dT%H:%M:%SZ")
created_at = created_at.strftime('%Y-%m-%d')
repo_events = [{
"size": 0,
"stargazers": 0,
"forks": 0,
"open_issues": 0,
"date": created_at
}]
directory = f"data/interpolation/{repo['name']}"
for file_name in os.listdir(directory):
if file_name.endswith(".json"):
event = get_event_from_file(f"{directory}/{file_name}")
event["date"] = file_name[:-5]
repo_events.append(event)
sorted_repo_events = sorted(
repo_events, key=lambda e: datetime.strptime(e["date"], "%Y-%m-%d")
)
save_parsed_event_to_file(sorted_repo_events, f"data/final/{repo['name']}.json")
``` |
{
"source": "joaoreis13/CtfGuruBot",
"score": 3
} |
#### File: CtfGuruBot/app/Core.py
```python
from flask import current_app as app
from . import Util
import requests,json,uuid
"""
Core.processMessage(content )
Receive a json with the content of a request in the
Blip API format.
"""
def processMessage(content,bot):
msgType = content['type']
msgText = content['content']
msgFrom = content['from']
session = msgFrom.split('@')[0]
if msgType == 'text/plain':
text = Util.formatOutPut( bot.respond(msgText,session) )
else:
text = "Foi mal champz, não entendo esse tipo de mídia ainda. Manda uma msg ai!"
msg = {
'id' : str(uuid.uuid4()),
'to' : content['from'],
'type': 'text/plain',
'content' : text
}
hdr = {
'Authorization':'key '+ app.config['BLIP_KEY']
}
if app.config['DEBUG']:
print("Sending:")
print(msg)
print(hdr)
sent = requests.post(app.config['MSG_URL'],json=msg,headers=hdr)
if app.config['DEBUG']:
print(sent)
``` |
{
"source": "joaoreis/ppd-trabalho1",
"score": 3
} |
#### File: ppd-trabalho1/ppd-trabalho2/trab2.py
```python
from contextlib import contextmanager
from multiprocessing import Pool, Process
import sys
from server import Server
from client import Client
from time import time
from xmlrpc.client import ServerProxy
###############################################################################
def get_total_processes():
# Verifica se foi passado o numero de processos
# Se nao passar o numero de processos, consideramos 1 processo
total_processes = int(sys.argv[1]) if (len(sys.argv) > 1) else 1
print('Usando {} processos'.format(total_processes))
return total_processes
###############################################################################
def execute(process_count, id, size, server):
quantity = int(size/process_count)
client = Client(server, id, quantity)
client.generate()
client.retrieve()
###############################################################################
def parallel_generate_retrieve(process_count, size, server: Server):
procs = []
for i in range(process_count):
p = Process(target=execute, args=(process_count, i, size, server))
print("Iniciando processo %d" %i)
p.start()
procs.append(p)
for p in procs:
p.join()
###############################################################################
def main():
size = 1_000_000
process_count = get_total_processes()
server = ServerProxy("http://localhost:8000/")
start = time()
parallel_generate_retrieve(process_count, size, server)
print("Time Elapsed: %4.6f" % (time() - start))
if __name__ == "__main__":
main()
```
#### File: ppd-trabalho1/ppd-trabalho3/client.py
```python
import paho.mqtt.client as mqtt
import json
from random import randrange
import time
def on_message(client, userdata, message):
topic = message.topic
# Put OK
if (topic == 'ppd/put_ok'):
payload = message.payload.decode("utf-8")
print(f"Key {payload} armazenada.")
# Get OK
elif (topic == 'ppd/get_ok'):
payload = json.loads(message.payload.decode("utf-8"))
print(f"Key {payload['key']} = {payload['value']}")
clientId = randrange(0, 2**32)
mqttBroker = "127.0.0.1"
client = mqtt.Client(str(clientId))
client.connect(mqttBroker)
client.loop_start()
client.subscribe("ppd/put_ok")
client.subscribe("ppd/get_ok")
client.on_message = on_message
while True:
key = randrange(0, 2**32)
client.publish("ppd/put", json.dumps({'key': key, 'value': 'VALOR'}))
time.sleep(1)
client.publish("ppd/get", key)
time.sleep(1)
client.loop_stop()
```
#### File: ppd-trabalho1/ppd-trabalho-final/dhtNode.py
```python
import paho.mqtt.client as mqtt
from random import randrange
import json
import time
class DhtNode:
def __init__(self):
self.id = randrange(0, 2**32)
self.nodes = [self.id]
self.prev = -1
self.next = -1
self.data = {}
self.client = None
# Join do node
def join(self):
# Conexão com o Broker
mqttBroker = "127.0.0.1"
self.client = mqtt.Client(str(self.id))
self.client.connect(mqttBroker)
# Nó se inscreve para escutar as mensagens de Join enviadas pelos nós
self.client.subscribe("ppd/join")
# Nó se inscreve para escutar as mensagens de Put enviadas pelo client
self.client.subscribe("ppd/put")
# Nó se inscreve para escutar as mensagens de Get enviadas pelo client.py
self.client.subscribe("ppd/get")
# Nó se inscreve para escutar as mensagens de Boot Ok enviadas pela DHT
self.client.subscribe("ppd/boot_ok")
# Nó se inscreve para escutar as mensagens de Publish Leave enviadas pelo dhtController
self.client.subscribe("ppd/publish_leave")
# Nó se inscreve para escutar as mensagens de Leave enviadas pelos nós
self.client.subscribe("ppd/leave")
# Nó se inscreve para escutar as mensagens de Leave Complete enviadas pela DHT
self.client.subscribe("ppd/leave_complete")
self.client.on_message = self.on_message
# Nó publica join no Broker informando seu NodeId
self.client.publish("ppd/join", self.id)
print(f"NodeId: {self.id} fez Join")
self.client.loop_start()
time.sleep(30000)
# Função de callback para lidar com as mensagens recebidas
def on_message(self, client, userdata, message):
topic = message.topic
# Join
if (topic == 'ppd/join'):
payload = int(message.payload.decode("utf-8"))
if (payload == self.id): return
# Ao receber uma mensagem de Publish Leave, signifca que no dhtController foi indicado que queremos remover um NodeId
# Se o id enviado for o id do nó, ele publica a mensagem de Leave
if (topic == 'ppd/publish_leave'):
payload = int(message.payload.decode("utf-8"))
if (payload == self.id):
self.client.publish("ppd/leave", self.id)
# Ao receber uma mensagem de Leave, se não for o nó que enviou, o nó envia uma mensagem de Leave Ok, indicando que recebeu o leave
if (topic == 'ppd/leave'):
payload = int(message.payload.decode("utf-8"))
if (payload != self.id):
self.client.publish("ppd/leave_ok", self.id)
# Ao receber uma mensagem de Put, o nó verifica se a chave está dentro do endereço de responsabilidade do nó, se tiver, adiciona a chave e valor enviadas
# e publica uma mensagem de Put Ok indicando ao client que a adição ocorreu
elif (topic == 'ppd/put'):
payload = json.loads(message.payload.decode("utf-8"))
if ((len(self.nodes) == 1) or (int(payload['key']) <= self.id and int(payload['key']) > self.prev)):
self.data[int(payload['key'])] = payload['value']
self.client.publish("ppd/put_ok", payload['key'])
print("NodeId:", self.id, "- Executou um put com chave", payload['key'], "e valor:", payload['value'])
# Ao receber uma mensagem de Get, o nó verifica se a chave está dentro do endereço de responsabilidade do nó, se tiver, o nó publica
# uma mensagem de Get Ok informando o valor para a chave
elif (topic == 'ppd/get'):
payload = int(message.payload.decode("utf-8"))
if ((len(self.nodes) == 1) or (int(payload) <= self.id and int(payload) > self.prev)):
value = self.data[payload] if payload in self.data else None
self.client.publish("ppd/get_ok", json.dumps({'key': payload, 'value': value}))
print("NodeId:", self.id, "- Executou um get com chave", payload)
# Ao receber uma mensagem de Boot Ok, o nó armazena todos os NodeIds da DHT e calcula seu antecessor e sucessor
elif (topic == 'ppd/boot_ok'):
payload = list(map(int, message.payload.decode("utf-8").split()))
self.nodes = payload
self.order_nodes()
# Ao receber uma mensagem de Leave Complete, o nó armazena todos os NodeIds da DHT e calcula seu antecessor e sucessor
elif (topic == 'ppd/leave_complete'):
payload = list(map(int, message.payload.decode("utf-8").split()))
# A DHT envia os NodeIds já sem o NodeId que publicou a mensagem de Leave
if (self.id in payload):
self.nodes = payload
self.order_nodes()
else:
# Se o id não for encontrado, quer dizer que é o nó que publicou a mensagem de leave e ele para de escutar as mensagens do broker
self.client.loop_stop()
# Função que ordena os nodes e define seu sucessor e antecessor
def order_nodes(self):
self.nodes.sort()
nodeId = self.nodes.index(self.id)
self.prev = self.nodes[nodeId - 1] if nodeId > 0 else self.nodes[-1]
self.next = self.nodes[nodeId + 1] if nodeId < len(self.nodes)-1 else self.nodes[0]
if __name__ == '__main__':
node = DhtNode()
node.join()
``` |
{
"source": "joaoriverd/IGen",
"score": 2
} |
#### File: benchmarks/ATLAS_gemm/build_and_run.py
```python
import os
def build_and_run():
tests = ["baseline", "f64i", "ddi"]
dirname = os.path.dirname(__file__)
working_dir = os.getcwd()
for test in tests:
build_dir = os.path.join(dirname, "build_" + test)
os.makedirs(build_dir, exist_ok=True)
# Running CMake
print("Generating build infrastructure for " + test)
os.chdir(build_dir)
cmd = "cmake -DINTERVAL_COMPILATION=" + test + " " + dirname
os.system(cmd)
# Build project
print("Building GEMM vectorized baseline:")
os.system("make")
# Start benchmark
print("Running GEMM benchmark for " + test)
os.system("./ATLAS_gemm")
os.chdir(working_dir)
if __name__ == "__main__":
build_and_run()
```
#### File: benchmarks/Spiral_DFT/run_igen.py
```python
import os
dirname = os.path.dirname(__file__)
igen_script = os.path.join(dirname, '../../bin/igen.py')
args = [
# IGen-sv
{'inp':'spiral_source/DFT16.cpp' , 'out':'igen/f64i_DFT16.cpp' , 'mode':'normal'},
{'inp':'spiral_source/DFT32.cpp' , 'out':'igen/f64i_DFT32.cpp' , 'mode':'normal'},
{'inp':'spiral_source/DFT64.cpp' , 'out':'igen/f64i_DFT64.cpp' , 'mode':'normal'},
{'inp':'spiral_source/DFT128.cpp', 'out':'igen/f64i_DFT128.cpp', 'mode':'normal'},
{'inp':'spiral_source/DFT256.cpp', 'out':'igen/f64i_DFT256.cpp', 'mode':'normal'},
# IGen-sv-dd
{'inp':'spiral_source/DFT16.cpp' , 'out':'igen/ddi_DFT16.cpp' , 'mode':'promote_dd'},
{'inp':'spiral_source/DFT32.cpp' , 'out':'igen/ddi_DFT32.cpp' , 'mode':'promote_dd'},
{'inp':'spiral_source/DFT64.cpp' , 'out':'igen/ddi_DFT64.cpp' , 'mode':'promote_dd'},
{'inp':'spiral_source/DFT128.cpp', 'out':'igen/ddi_DFT128.cpp', 'mode':'promote_dd'},
{'inp':'spiral_source/DFT256.cpp', 'out':'igen/ddi_DFT256.cpp', 'mode':'promote_dd'},
# IGen-vv
{'inp':'spiral_source/AVX_DFT16.cpp' , 'out':'igen/f64i_AVX_DFT16.cpp' , 'mode':'normal'},
{'inp':'spiral_source/AVX_DFT32.cpp' , 'out':'igen/f64i_AVX_DFT32.cpp' , 'mode':'normal'},
{'inp':'spiral_source/AVX_DFT64.cpp' , 'out':'igen/f64i_AVX_DFT64.cpp' , 'mode':'normal'},
{'inp':'spiral_source/AVX_DFT128.cpp', 'out':'igen/f64i_AVX_DFT128.cpp', 'mode':'normal'},
{'inp':'spiral_source/AVX_DFT256.cpp', 'out':'igen/f64i_AVX_DFT256.cpp', 'mode':'normal'},
# IGen-vv-dd
{'inp':'spiral_source/AVX_DFT16.cpp' , 'out':'igen/ddi_AVX_DFT16.cpp' , 'mode':'promote_dd'},
{'inp':'spiral_source/AVX_DFT32.cpp' , 'out':'igen/ddi_AVX_DFT32.cpp' , 'mode':'promote_dd'},
{'inp':'spiral_source/AVX_DFT64.cpp' , 'out':'igen/ddi_AVX_DFT64.cpp' , 'mode':'promote_dd'},
{'inp':'spiral_source/AVX_DFT128.cpp', 'out':'igen/ddi_AVX_DFT128.cpp', 'mode':'promote_dd'},
{'inp':'spiral_source/AVX_DFT256.cpp', 'out':'igen/ddi_AVX_DFT256.cpp', 'mode':'promote_dd'},
]
def generate_igen_files():
print("Generating IGen files for DFT benchmark:")
dirname = os.path.dirname(__file__)
igen_dir = os.path.join(dirname, "igen")
os.makedirs(igen_dir, exist_ok=True)
for a in args:
inp_file = os.path.join(dirname, a['inp'])
out_file = os.path.join(dirname, a['out'])
cmd = 'python ' + igen_script + " -H " + "-o " + out_file + " -tmode " + a['mode'] + " " + inp_file
print(cmd)
os.system(cmd)
print("\n")
if __name__ == "__main__":
generate_files()
```
#### File: IGen/scripts/install_igen.py
```python
import os
import shutil
dirname = os.path.dirname(__file__)
build_dir = os.path.join(dirname, "../src/build")
bin_dir = os.path.join(dirname, "../bin")
def clean_and_build():
# Create building directory
os.makedirs(build_dir, exist_ok=True)
working_dir = os.getcwd()
os.chdir(build_dir)
# Running CMake for IGen-vv and IGen-sv
print("Generating build infrastructure for IGen:")
os.system("cmake ..")
# Build project
print("Building IGen:")
os.system("make clean")
os.system("make")
os.chdir(working_dir)
def move_bin():
# Move igen binary to bin folder
shutil.copy(build_dir + "/igen", bin_dir + "/igen")
print("IGen binary moved to bin folder.")
if __name__ == "__main__":
clean_and_build()
move_bin()
```
#### File: third-party/llvm-project-11.0.1/build.py
```python
import os
import argparse
dirname = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(dirname, "build")
install_dir = os.path.join(dirname, "install")
# Arguments
debug = False
def build():
# Create building directory
os.makedirs(build_dir, exist_ok=True)
working_dir = os.getcwd()
os.chdir(build_dir)
build_type = "Debug" if debug else "Release"
# Running CMake for IGen-vv and IGen-sv
print('Generating build infrastructure for Clang:')
cmake_options = '-DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;polly" ' + \
'-DCMAKE_INSTALL_PREFIX=' + install_dir + ' ' + \
'-DCMAKE_BUILD_TYPE=' + build_type
if debug:
cmake_options += ' -DLLVM_OPTIMIZED_TABLEGEN=On'
print(cmake_options)
os.system('cmake ' + cmake_options + ' ../llvm')
# Build project
print("Building Clang:")
# os.system("make")
os.system("make install -j2")
# Return to working directory before leave
os.chdir(working_dir)
def parse_args():
parser = argparse.ArgumentParser(description='Build clang, llvm and polly. The build type is "Release" by default.')
parser.add_argument("-d", "--debug", help="Compile debug mode.", action="store_true")
args = parser.parse_args()
global debug
debug = args.debug
if __name__ == "__main__":
parse_args()
build()
``` |
{
"source": "JoaoRobertoFernandes/Code-to-study",
"score": 4
} |
#### File: Code-to-study/Python/functions.py
```python
'''
#Using paramaters
#------------------------#
print("----------------------")
def print_my_name(name) :
print("Your name is: " + name)
print_my_name("João")
print("----------------------")
#------------------------#
'''
``` |
{
"source": "joaorobson/AzulDeAndar",
"score": 2
} |
#### File: backend/student/models.py
```python
from django.db import models
from django.utils.translation import ugettext_lazy as _
from smartfields import fields
import datetime
from AzulDeAndar import settings
class Teacher(models.Model):
name = models.CharField(
max_length=100,
help_text=_("Qual o nome do professor?")
)
def __str__(self):
return "Prof. " + self.name
class School(models.Model):
name = models.CharField(
max_length=100,
help_text=_("Qual o nome da escola do aluno?")
)
def __str__(self):
return self.name
class Class(models.Model):
school = models.ForeignKey(School,on_delete=models.SET_DEFAULT, default=1)
name = models.CharField(
max_length=30,
help_text=_("Qual a turma do aluno?"),
blank=True,
default="Sem turma"
)
YEAR_CHOICES = []
for i in range(2015, datetime.datetime.now().year+1):
YEAR_CHOICES.append((i,i))
reference_year = models.IntegerField(choices=YEAR_CHOICES, default=datetime.datetime.now().year)
teacher = models.ManyToManyField(Teacher, blank=True)
@property
def get_teachers_names(self):
return list(self.teacher.values_list( \
'name', flat=True))
class Meta:
unique_together = ('name', 'reference_year')
def __str__(self):
if self.get_teachers_names:
teachers_list = " - Professores: "
teachers_list += ", ".join(self.get_teachers_names)
else:
teachers_list = ""
return str(self.reference_year) + ': ' + self.name + "/" + self.school.name + teachers_list
class Student(models.Model):
name = models.CharField(
max_length=100,
help_text=_("Qual o nome do aluno?")
)
date_of_birth = models.DateField(
blank=True,
default='1998-03-08',
)
@property
def convert_date_of_birth(self):
return self.date_of_birth.strftime('%d/%m/%y')
@property
def get_telephone_numbers(self):
return list(self.telephone_set.all().values_list( \
'telephone_number', flat=True))
@property
def schools_names(self):
list_of_schools = list(self.school_class.all())
list_of_schools.sort(key=lambda x:x.reference_year)
return [item.__str__() for item in list_of_schools]
image = fields.ImageField(
upload_to="images/",
help_text=_("Coloque a foto do(a) aluno(a)."),
blank=True
)
father_name = models.CharField(
max_length=100,
help_text=_("Qual o nome do pai do(a) aluno(a)?"),
blank=True
)
mother_name = models.CharField(
max_length=100,
help_text=_("Qual o nome da mãe do(a) aluno(a)?"),
blank=True
)
responsible = models.CharField(
max_length=100,
help_text=_("Quem é o responsável pelo(a) aluno(a)?"),
default="Os pais",
)
adress = models.CharField(
max_length=142,
help_text=_("Qual o endereço do(a) aluno(a)?"),
blank=True
)
special_education_needs = models.CharField(
max_length=200,
help_text=_("Qual a necessidade de educação especial?"),
default="Nenhuma",
)
school_class = models.ManyToManyField(Class, blank=True)
def __str__(self):
return "Estudante " + self.name
class Telephone(models.Model):
telephone_number = models.CharField(
max_length=15,
help_text=_("Qual o telefone para contato?"),
blank=True
)
student = models.ForeignKey(Student, on_delete=models.CASCADE,null=True, blank=True)
```
#### File: backend/student/views.py
```python
from django.shortcuts import render
from rest_framework import serializers, viewsets, generics
from .serializers import *
# Create your views here.
def index(request):
return render(request, 'example/index.html')
class StudentViewSet(viewsets.ModelViewSet):
queryset = Student.objects.order_by('name')
serializer_class = StudentSerializer
class TelephoneViewSet(viewsets.ModelViewSet):
queryset = Telephone.objects.all()
serializer_class = TelephoneSerializer
class ClassViewSet(viewsets.ModelViewSet):
queryset = Class.objects.filter(reference_year=2018)
serializer_class = ClassSerializer
class StudentByClassList(generics.ListAPIView):
serializer_class = StudentSerializer
def get_queryset(self):
required_class = self.kwargs['class']
class_id = Class.objects.filter(reference_year=2018,name=required_class). \
get().id
print(class_id)
return Student.objects.filter(school_class=class_id).order_by('name')
``` |
{
"source": "joaorobsonR/algoritmo1",
"score": 3
} |
#### File: algoritmo1/UNIVESPalgortimo_1/testeaula.py
```python
def media(n1, n2):
m = (n1 + n2)/2
return m
print(media(n1=10, n2=5))
def juros(preco, juros):
res = preco * (1 + juros/100)
return res
print(juros(preco=10, juros=50))
``` |
{
"source": "joaorobsonR/livro-python-ex",
"score": 4
} |
#### File: livro-python-ex/livropython/ex03.2.py
```python
def fib(x):
if x < 2:
return x
else:
return fib(x-1) + fib(x-2)
n = int(input('digite um termo da sequencia fibonaci: '))
a = 0
b = 1
i = 0
while n < 2:
print('\033[32mdigite um termo maior ou igual a dois\n'
'pois o primeiro e segundo termo são\n'
'0 e 1 respectivamente.\033[m')
n = int(input('digite um termo da sequencia fibonaci: '))
print('0, 1,', end='')
while i < n - 2:
c = a + b
print(' {},'.format(c), end='')
a = b
b = c
i += 1
print(' fim')
``` |
{
"source": "JoaoRodrigues/binding_affinity",
"score": 2
} |
#### File: JoaoRodrigues/binding_affinity/predict_NIS.py
```python
from __future__ import print_function, division
__author__ = ["<NAME>", "<NAME>"]
import os
import sys
from lib.freesasa import execute_freesasa
from lib.models import NIS
from lib.utils import _check_path
from lib.parsers import parse_structure
from lib import aa_properties
def analyse_nis(sasa_dict, acc_threshold=0.05, selection=None):
"""
Returns the percentages of apolar, polar, and charged
residues at the interface, according to an accessibility
criterion.
"""
_data = aa_properties.aa_character_protorp
_char_to_index = lambda x: {'A': 0, 'C': 1, 'P': 2}.get(x)
count = [0, 0, 0]
for res, rsa in sasa_dict.iteritems():
chain, resn, resi = res
if rsa >= acc_threshold:
aa_character = _data[resn]
aa_index = _char_to_index(aa_character)
count[aa_index] += 1
percentages = map(lambda x: 100*x/sum(count), count)
# print('[+] No. of buried interface residues: {0}'.format(sum(count)))
return percentages
def calculate_interface_atoms(cmplx_asa, free_asa, sasa_diff_threshold=1):
"""
Calculates number of interface atoms in complex based on
surface area differences between unbound and bound structures.
"""
n_int_atoms = 0
for atom, bound_asa in cmplx_asa.iteritems():
atom_free = free_asa[atom]
asa_change = atom_free - bound_asa
if asa_change >= sasa_diff_threshold:
n_int_atoms += 1
return n_int_atoms
if __name__ == "__main__":
try:
import argparse
from argparse import RawTextHelpFormatter
except ImportError as e:
print('[!] The binding affinity prediction tools require Python 2.7+', file=sys.stderr)
raise ImportError(e)
ap = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
ap.add_argument('structf', help='Structure to analyse in PDB or mmCIF format')
ap.add_argument('--acc-threshold', type=float, default=0.05, help='Accessibility threshold for BSA analysis')
ap.add_argument('-q', '--quiet', action='store_true', help='Outputs only the predicted affinity value')
_co_help = """
By default, all intermolecular contacts are taken into consideration,
a molecule being defined as an isolated group of amino acids sharing
a common chain identifier. In specific cases, for example
antibody-antigen complexes, some chains should be considered as a
single molecule.
Use the --selection option to provide collections of chains that should
be considered for the calculation. Separate by a space the chains that
are to be considered _different_ molecules. Use commas to include multiple
chains as part of a single group:
--selection A B => Contacts calculated (only) between chains A and B.
--selection A,B C => Contacts calculated (only) between chains A and C; and B and C.
--selection A B C => Contacts calculated (only) between chains A and B; B and C; and A and C.
"""
sel_opt = ap.add_argument_group('Selection Options', description=_co_help)
sel_opt.add_argument('--selection', nargs='+', metavar=('A B', 'A,B C'))
cmd = ap.parse_args()
if cmd.quiet:
_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
# Parse structure
struct_path = _check_path(cmd.structf)
structure, n_chains, n_res = parse_structure(struct_path)
print('[+] Parsed structure file {0} ({1} chains, {2} residues)'.format(structure.id, n_chains, n_res))
# Make groups from user option or PDB chains
if cmd.selection:
selection_dict = {}
for igroup, group in enumerate(cmd.selection):
chains = group.split(',')
for chain in chains:
if chain in selection_dict:
errmsg = 'Selections must be disjoint sets: {0} is repeated'.format(chain)
raise ValueError(errmsg)
selection_dict[chain] = igroup
else:
selection_dict = dict([(c.id, nc) for nc, c in enumerate(structure.get_chains())])
# Complex SASA
cmplx_asa, cmplx_rsa = execute_freesasa(structure, selection=selection_dict)
_, nis_c, nis_p = analyse_nis(cmplx_rsa, acc_threshold=cmd.acc_threshold, selection=selection_dict)
# Interface atoms
free_asa = {}
for group in selection_dict:
group_asa, _ = execute_freesasa(structure, selection=group)
free_asa.update(group_asa)
interface_atoms = calculate_interface_atoms(cmplx_asa, free_asa)
# Affinity Calculation
ba_val = NIS(nis_c, nis_p, interface_atoms)
print('[+] Percentage of polar NIS residues: {0:3.2f}'.format(nis_p))
print('[+] Percentage of charged NIS residues: {0:3.2f}'.format(nis_c))
print('[+] No. of (buried) interface atoms: {0}'.format(interface_atoms))
print('[++] Predicted binding affinity: {0:8.3f}'.format(ba_val))
if cmd.quiet:
sys.stdout = _stdout
print('{0}\t{1:8.3f}'.format(struct_path, ba_val))
``` |
{
"source": "JoaoRodrigues/biopdb-benchmark",
"score": 2
} |
#### File: JoaoRodrigues/biopdb-benchmark/benchmark_parsers.py
```python
import argparse
import gzip
import logging
import os
import pathlib
import sys
import time
import traceback
import warnings
from xml.dom import minidom # for pretty-printing
from xml.etree.ElementTree import (
Element, SubElement, tostring,
)
import psutil
from Bio.PDB import PDBParser, MMCIFParser
from Bio.PDB import PDBIO, MMCIFIO
def setup_logging():
handler = logging.StreamHandler()
formatter = logging.Formatter(
fmt='[%(asctime)s] %(message)s',
datefmt='%H:%M:%S'
)
handler.setFormatter(formatter)
root_logger = logging.getLogger()
root_logger.handlers = [] # clear handler list
root_logger.addHandler(handler)
root_logger.setLevel(logging.INFO)
def summarize_structure(structure):
"""Returns a dictionary with properties of the structure."""
n_models = 0
# per model
n_chains = 0
n_resids = 0
n_ptmuts = 0 # DisorderedResidues
n_icodes = 0
n_aatoms = 0 # all atoms
n_hatoms = 0 # hetatm
n_altloc = 0
for model in structure:
n_models += 1
for chain in model: # all models should be the same anyway
n_chains += 1
for resid in chain:
is_ptmut = resid.is_disordered() == 2
n_ptmuts += int(is_ptmut)
if is_ptmut:
children = resid.disordered_get_list()
else:
children = [resid]
for child in children: # silly, but practical
n_icodes += int(child.id[2] != ' ')
n_resids += 1
for atom in child.get_unpacked_list():
n_hatoms += int(atom.parent.id[0] != ' ')
n_aatoms += 1
n_altloc += int(bool(atom.altloc.strip()))
return {
'models': n_models,
'chains': n_chains,
'residues': n_resids,
'res-icode': n_icodes,
'res-ptmut': n_ptmuts,
'all-atoms': n_aatoms,
'het-atoms': n_hatoms,
'altlocs': n_altloc,
}
def test_element_assignment(structure):
for residue in structure.get_residues():
for atom in residue.get_unpacked_list():
# Test element assignment
og_elem = atom.element.strip()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
atom._assign_element(element=None) # force reassignment
serial = atom.serial_number
name = atom.name
assert og_elem and og_elem == atom.element, \
(
f'Element mismatch for atom {serial}-{name}:'
f' "{og_elem}" != "{atom.element}"'
)
def main():
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument(
'infmt',
choices=['pdb', 'mmcif'],
help='File format of input files.'
)
ap.add_argument(
'folder',
type=pathlib.Path,
help='Top-level folder with input files'
)
ap.add_argument(
'--no-continue',
action='store_true',
default=False,
help='Parses all input files, ignoring existing results.'
)
ap.add_argument(
'--strict',
action='store_true',
default=False,
help='Parse with PDBParser PERMISSIVE=0'
)
args = ap.parse_args()
# Setup logging
setup_logging()
permissive_bool = not args.strict
if args.infmt == 'pdb':
parser = PDBParser(PERMISSIVE=permissive_bool, QUIET=1)
writer = PDBIO()
elif args.infmt == 'mmcif':
parser = MMCIFParser(QUIET=1)
writer = MMCIFIO()
flist = sorted(args.folder.rglob('*.gz'))
xmllist = sorted(args.folder.rglob('*.xml'))
if not args.no_continue and xmllist:
logging.info(f'Found {len(xmllist)} existing result files')
xmlset = {
f.stem: f for f in xmllist
}
fset = {f.stem: f for f in flist}
remainder = set(fset.keys()) - set(xmlset.keys())
logging.info(f'Resuming benchmark: {len(remainder)} files left')
flist = sorted(fset[f] for f in remainder)
else:
logging.info(f'Found {len(flist)} files')
n_digits = len(str(len(flist))) # for fmting
for idx, fpath in enumerate(flist, start=1):
try:
# Parse
with gzip.open(fpath, mode='rt') as handle:
t0 = time.time()
s = parser.get_structure(fpath.name, handle)
t1 = time.time()
read_time = t1 - t0
data = summarize_structure(s)
# Write
writer.set_structure(s)
t0 = time.time()
writer.save('io.temp')
t1 = time.time()
write_time = t1 - t0
# Round-trip
s2 = parser.get_structure('new', 'io.temp')
data2 = summarize_structure(s2)
assert data == data2, f'Summaries differ: {data} != {data2}'
test_element_assignment(s) # raises assert if failed
except Exception as err:
with fpath.with_suffix('.failed').open('w') as f:
print(err, file=f)
print(traceback.format_exc(), file=f)
status = 'failed'
else:
# Write XML file with numbers
root = Element('structure')
root.set('path', fpath.name)
root.set('parse_time', f'{read_time:5.3f}')
root.set('write_time', f'{write_time:5.3f}')
for key, value in data.items():
child = SubElement(root, key)
child.text = str(value)
# Reparse for pretty print
xml = minidom.parseString(tostring(root, 'utf-8'))
# Write to file
with fpath.with_suffix('.xml').open('w') as f:
f.write(xml.toprettyxml(indent=' '))
# Clear XML memory
root.clear()
xml.unlink()
del root, xml
status = 'ok'
finally:
try:
os.remove('io.temp')
except Exception:
pass
memusage = psutil.virtual_memory().percent
logging.info(
f'{idx:>{n_digits}d}/{len(flist)} {fpath.parent.name}/{fpath.name}: {status} | mem% = {memusage}',
) # to check for leaks
if __name__ == '__main__':
main()
``` |
{
"source": "JoaoRodrigues/minnie",
"score": 2
} |
#### File: minnie/core/filtering.py
```python
import pandas as pd
import sys
import glob
import os
import re
import numpy as np
import logging
logging.basicConfig(stream=sys.stdout,
level=logging.INFO,
format='[%(asctime)s] %(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
#inside pathx (MD)
def time_freq_filter(filex,complexName,per):
pathx = os.getcwd()
file = os.path.basename(filex)
fName = complexName
bondtype = file.split(".csv")[0].split("_merged_")[1]
first = pd.read_csv(filex)
os.chdir(pathx)
if not os.path.exists(f'{complexName}/04_time_freq_filter'):
os.makedirs(f'{complexName}/04_time_freq_filter', exist_ok=True)
pathxx=f'{pathx}/{complexName}/04_time_freq_filter'
os.chdir(pathxx)
pathy=pathxx+"/"+str(per)+"_freq_filtered"
if not os.path.exists(str(per)+"_freq_filtered"):
os.makedirs(str(per)+"_freq_filtered", exist_ok=True)
os.chdir(pathy)
if first.empty:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy = pd.DataFrame(columns=["donor_acceptor","NumSpp","total","percentage"])
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
os.chdir("..")
if not os.path.exists(str(per)+"_freq_perres"):
os.makedirs(str(per)+"_freq_perres")
pathq=pathy+"/"+str(per)+"_freq_perres"
os.chdir(pathq)
first_perres=pd.DataFrame(columns=['itype', 'donor_chain', 'acceptor_chain', 'donor_resnm', 'acceptor_resnm',
'donor_resid','acceptor_resid', 'donor_atom', 'acceptor_atom','chain_type',
"prot_or_dna",'specificity',"time"])
first_perres.to_csv (pathq+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq_perres.csv", index=None)
else:
#fIRST
logging.info('Finding percentages: {}'.format(fName))
firstx = []
for adx in first.donor_acceptor.unique () :
bbx = first[first["donor_acceptor"] == adx]
firstx.append([adx,
bbx.time.unique().size/first.time.unique().size*100])
firstxy = pd.DataFrame(firstx)
firstxy.columns = ["donor_acceptor","percentage"]
logging.info('Writing to file percentage: {}'.format(fName))
morefirstxy = firstxy[firstxy.percentage > float(per)]
if len(morefirstxy.donor_acceptor) == 0:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy = pd.DataFrame(columns=firstxy.columns)
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
os.chdir("..")
if not os.path.exists(str(per) + "_freq_perres"):
os.makedirs(str(per) + "_freq_perres")
pathq = pathy + "/" + str(per) + "_freq_perres"
os.chdir(pathq)
first_perres= pd.DataFrame(columns=first.columns)
first_perres.to_csv(pathq + "/" + fName + "_" + bondtype + "_" + str(per) + "_freq_perres.csv", index=None)
else:
pathz = pathy + "/" + str(per) + "_freq"
if not os.path.exists(str(per) + "_freq"):
os.makedirs(str(per) + "_freq")
os.chdir(pathz)
morefirstxy.to_csv (pathz+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq.csv", index=None)
logging.info('Writing to file list: {}'.format(fName))
first_perres = pd.DataFrame()
for da in morefirstxy.donor_acceptor.unique():
df = first[first.donor_acceptor == da]
first_perres=first_perres.append(df)
first_perres.sort_values(by="time",inplace=True)
first_perres.reset_index(drop=True)
os.chdir("..")
if not os.path.exists(str(per)+"_freq_perres"):
os.makedirs(str(per)+"_freq_perres")
pathq=pathy+"/"+str(per)+"_freq_perres"
os.chdir(pathq)
first_perres.to_csv (pathq+"/"+fName+"_"+bondtype+"_"+str(per)+"_freq_perres.csv", index=None)
def make_freq_folders(pathy,per):
"""
Creates folders to write and read common and complex-specific bonds within 05_compare_cx_spp folder
:param pathy: path to 05_compare_cx_spp
:param per: time percentage
"""
import os
os.chdir(pathy)
pathz=pathy+"/"+str(per)+"_freq_filtered"
if not os.path.exists(str(per)+"_freq_filtered"):
os.makedirs(str(per)+"_freq_filtered",exist_ok=True)
for fold in ["_freq","_freq_perres"]:
os.chdir(pathz)
#to add freq
pathq=pathz+"/"+str(per)+fold
if not os.path.exists(str(per)+fold):
os.makedirs(str(per)+fold,exist_ok=True)
os.chdir(pathq)
pathq_common=pathq+"/common"
if not os.path.exists("common"):
os.makedirs("common",exist_ok=True)
os.chdir(pathq)
pathq_spp=pathq+"/complex_specific"
if not os.path.exists("complex_specific"):
os.makedirs("complex_specific",exist_ok=True)
def get_paths(pathy,per,fold,com_spp):
import os
os.chdir(pathy)
PathToWrite = pathy + "/" + per + "_" + "freq_filtered/" + per + fold + "/" + com_spp
return PathToWrite
def compare_bonds(complexName,per):
pathx = os.getcwd()
fName = complexName[0]
sName = complexName[1]
file_lists_freq_fName = glob.glob(f'{pathx}/{fName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}_freq/*csv')
file_lists_freq_sName = glob.glob(f'{pathx}/{sName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}_freq/*csv')
file_lists_freq = file_lists_freq_fName + file_lists_freq_sName
ToCompare = {}
for filex in file_lists_freq:
file = os.path.basename(filex)
if fName in filex:
Name = fName
else:
Name = sName
bondtype = file.split(f'{Name}_')[1].split("_")[0]
if bondtype == "ring":
bondtype = "ring_stacking"
first = pd.read_csv(filex)
if bondtype in ToCompare.keys():
ToCompare[bondtype].update({Name: first})
else:
ToCompare.update({bondtype: {Name: first}})
for bondtype in ToCompare.keys():
os.chdir(pathx)
pathy = f'{pathx}/{fName}/05_compare_complex'
if not os.path.exists(f'{pathx}/{fName}/05_compare_complex'):
os.makedirs(f'{pathx}/{fName}/05_compare_complex',exist_ok=True)
os.chdir(pathy)
pathz = f'{pathx}/{sName}/05_compare_complex'
if not os.path.exists(f'{pathx}/{sName}/05_compare_complex'):
os.makedirs(f'{pathx}/{sName}/05_compare_complex',exist_ok=True)
os.chdir(pathz)
make_freq_folders(pathy, per)
fold="_freq"
morefirstxy = ToCompare[bondtype][fName]
fold="_freq_perres"
patha=f'{pathx}/{fName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}{fold}'
first = pd.read_csv(patha+"/"+fName+"_"+bondtype+"_"+str(per)+fold+".csv")
#SECOND
make_freq_folders(pathz, per)
fold="_freq"
moresecxy = ToCompare[bondtype][sName]
logging.info("sName : {}".format(sName))
fold="_freq_perres"
patha=f'{pathx}/{sName}/04_time_freq_filter/{str(per)}_freq_filtered/{str(per)}{fold}'
sec = pd.read_csv(patha+"/"+sName+"_"+bondtype+"_"+str(per)+fold+".csv")
#find bonds specific to first one
logging.info("Specific to {}".format(fName))
i = 0
spp_first= pd.DataFrame(columns=morefirstxy.columns)
common_first= pd.DataFrame(columns=morefirstxy.columns)
for item in morefirstxy.donor_acceptor:
item_swapped = item.split(":")[1]+":"+item.split(":")[0]
if item in moresecxy.donor_acceptor.unique():
common_first = common_first.append(pd.DataFrame(morefirstxy.iloc[i,:]).transpose())
elif item_swapped in moresecxy.donor_acceptor.unique():
common_first = common_first.append(pd.DataFrame(morefirstxy.iloc[i,:]).transpose())
else:
spp_first = spp_first.append(pd.DataFrame(morefirstxy.iloc[i,:]).transpose())
i = i+1
spp_first.sort_values(by="donor_acceptor", ascending=False)
spp_first.reset_index(drop=True,inplace=True)
fold="_freq"
com_spp="complex_specific"
pathq_spp=get_paths(pathy,str(per),fold,com_spp)
spp_first.to_csv (pathq_spp+"/"+fName+"_"+bondtype+"_compared_spec.csv", index=False)
common_first.sort_values(by="donor_acceptor", ascending=False)
common_first.reset_index(drop=True,inplace=True)
com_spp="common"
pathq_common=get_paths(pathy,str(per),fold,com_spp)
common_first.to_csv (pathq_common+"/"+fName+"_"+bondtype+"_compared_common.csv", index=False)
#find bonds specific to second one
logging.info("Specific to {}".format(sName))
i = 0
spp_sec= pd.DataFrame(columns=moresecxy.columns)
common_sec= pd.DataFrame(columns=moresecxy.columns)
for item in moresecxy.donor_acceptor:
item_swapped = item.split(":")[1] + ":" + item.split(":")[0]
if item in morefirstxy.donor_acceptor.unique():
common_sec = common_sec.append(pd.DataFrame(moresecxy.iloc[i,:]).transpose())
elif item_swapped in morefirstxy.donor_acceptor.unique():
common_sec = common_sec.append(pd.DataFrame(moresecxy.iloc[i,:]).transpose())
else:
spp_sec = spp_sec.append(pd.DataFrame(moresecxy.iloc[i,:]).transpose())
i = i+1
spp_sec.sort_values(by="donor_acceptor", ascending=False)
spp_sec.reset_index(drop=True,inplace=True)
fold="_freq"
com_spp="complex_specific"
pathq_spp=get_paths(pathz,str(per),fold,com_spp)
spp_sec.to_csv (pathq_spp+"/"+sName+"_"+bondtype+"_compared_spec.csv", index=False)
com_spp = "common"
pathq_common = get_paths(pathz, str(per), fold, com_spp)
common_sec.sort_values(by="donor_acceptor", ascending=False)
common_sec.reset_index(drop=True,inplace=True)
common_sec.to_csv (pathq_common+"/"+sName+"_"+bondtype+"_compared_common.csv", index=False)
#find bonds specific to first one
logging.info("Specific list to {}".format(fName))
spp_first_perres= pd.DataFrame(columns=first.columns)
for item in spp_first.donor_acceptor.unique():
f = first[first.donor_acceptor == item]
spp_first_perres = spp_first_perres.append(f)
spp_first_perres.sort_values(by="time", ascending=False)
spp_first_perres.reset_index(drop=True,inplace=True)
fold="_freq_perres"
com_spp="complex_specific"
pathl_spp=get_paths(pathy,str(per),fold,com_spp)
spp_first_perres.to_csv (pathl_spp+"/"+fName+"_"+bondtype+"_compared_spec_perres.csv", index=False)
common_first_perres= pd.DataFrame(columns=first.columns)
for item in common_first.donor_acceptor.unique():
f = first[first.donor_acceptor == item]
common_first_perres = common_first_perres.append(f)
common_first_perres.sort_values(by="time", ascending=False)
common_first_perres.reset_index(drop=True,inplace=True)
com_spp="common"
pathl_common=get_paths(pathy,str(per),fold,com_spp)
common_first_perres.to_csv (pathl_common+"/"+fName+"_"+bondtype+"_compared_common_perres.csv", index=False)
#find bonds specific to second one
spp_sec_perres= pd.DataFrame(columns=sec.columns)
for item in spp_sec.donor_acceptor.unique():
f = sec[sec.donor_acceptor == item]
spp_sec_perres = spp_sec_perres.append(f)
spp_sec_perres.sort_values(by="time", ascending=False)
spp_sec_perres.reset_index(drop=True,inplace=True)
logging.info(f'Writing to {pathl_spp}')
logging.info(f'Writing to {pathl_spp}/{sName}_{bondtype}_compared_spec_perres.csv')
fold="_freq_perres"
com_spp="complex_specific"
pathl_spp=get_paths(pathz,str(per),fold,com_spp)
spp_sec_perres.to_csv (pathl_spp+"/"+sName+"_"+bondtype+"_compared_spec_perres.csv", index=False)
common_sec_perres= pd.DataFrame(columns=sec.columns)
for item in common_first.donor_acceptor.unique():
f = sec[sec.donor_acceptor == item]
common_sec_perres = common_sec_perres.append(f)
common_sec_perres.sort_values(by="time", ascending=False)
common_sec_perres.reset_index(drop=True,inplace=True)
logging.info(f'Writing to {pathl_common}')
logging.info(f'Writing to {pathl_common}/{sName}_{bondtype}_compared_common_perres.csv')
com_spp="common"
pathl_common=get_paths(pathz,str(per),fold,com_spp)
common_sec_perres.to_csv (pathl_common+"/"+sName+"_"+bondtype+"_compared_common_perres.csv", index=False)
```
#### File: interfacea/tests/test_interactions.py
```python
import os
import unittest
import interfacea as ia
TESTDIR = os.path.dirname(os.path.abspath(__file__))
class TestInteractionAnalyzer(unittest.TestCase):
"""Test suite for the InteractionAnalyzer class.
"""
def setUp(self):
pp_path = os.path.join(TESTDIR, 'data', 'protein_protein.cif')
pl_path = os.path.join(TESTDIR, 'data', 'protein_ligand.pdb')
pp_complex = ia.read(pp_path)
pl_complex = ia.read(pl_path)
self.pp_ia = ia.InteractionAnalyzer(pp_complex)
self.pl_ia = ia.InteractionAnalyzer(pl_complex)
# Test Finders
def test_find_cations(self):
"""testing find_cations()
"""
pp_ia = self.pp_ia
pp_ia.cations = None # clear just in case
pp_ia.find_cations()
n_res_cations = len(pp_ia.cations) # should be 24
n_cations = sum(map(len, pp_ia.cations.values())) # should be 25
self.assertEqual(n_res_cations, 24)
self.assertEqual(n_cations, 25)
def test_find_anions(self):
"""testing find_anions()
"""
pp_ia = self.pp_ia
pp_ia.anions = None
pp_ia.find_anions()
n_res_anions = len(pp_ia.anions)
n_anions = sum(map(len, pp_ia.anions.values()))
self.assertEqual(n_res_anions, 28)
self.assertEqual(n_anions, 28)
def test_find_hydrophobic(self):
"""testing find_hydrophobics()
"""
pp_ia = self.pp_ia
pp_ia.hydrophobics = None
pp_ia.find_hydrophobics()
n_res = len(pp_ia.hydrophobics)
n_groups = sum(map(len, pp_ia.hydrophobics.values()))
self.assertEqual(n_res, 166)
self.assertEqual(n_groups, 166)
```
#### File: interfacea/tests/test_read.py
```python
"""
Unit tests for read() function
"""
import os
import unittest
import interfacea as ia
TESTDIR = os.path.dirname(os.path.abspath(__file__))
class TestRead(unittest.TestCase):
def setUp(self):
self.n_residues = 6
self.n_atoms = 106
self.n_chains = 2
def test_missingFile(self):
"""Tests exception throwing when reading non-existent file.
"""
fpath = os.path.join(TESTDIR, 'illusions', 'void.pdb')
with self.assertRaises(ia.structure.StructureError):
ia.read(fpath)
def test_notSupportedExtension(self):
"""Tests exception throwing when reading file with unsupported extension.
"""
fpath = os.path.join(TESTDIR, 'illusions', 'void.pdb')
with self.assertRaises(ia.structure.StructureError):
ia.read(fpath)
def test_wrongFileType(self):
"""Tests exception throwing when reading file with wrong user-defined type.
"""
fpath = os.path.join(TESTDIR, 'data', 'mini.pdb')
with self.assertRaises(ia.structure.StructureError):
ia.read(fpath, ftype='cif')
def test_readPDB(self):
"""Tests reading/parsing a sample PDB file.
"""
fpath = os.path.join(TESTDIR, 'data', 'mini.pdb')
s = ia.read(fpath)
top = s.topology
self.assertEqual(top.getNumAtoms(), self.n_atoms)
self.assertEqual(top.getNumResidues(), self.n_residues)
self.assertEqual(top.getNumChains(), self.n_chains)
def test_readCIF(self):
"""Tests reading/parsing a sample mmCIF file.
"""
fpath = os.path.join(TESTDIR, 'data', 'mini.cif')
s = ia.read(fpath)
top = s.topology
self.assertEqual(top.getNumAtoms(), self.n_atoms)
self.assertEqual(top.getNumResidues(), self.n_residues)
self.assertEqual(top.getNumChains(), self.n_chains)
```
#### File: JoaoRodrigues/minnie/minnie.py
```python
import os
import pandas as pd
import logging
import sys
import argparse
import core
from core import analysis
from core import filtering
from core import graphs
from core import clean
import glob
import pathos
from pathos.multiprocessing import ProcessingPool as Pool
###### ---- Preparations ---- ######
parser = argparse.ArgumentParser(prog='minnie')
subparsers = parser.add_subparsers(dest="subcommand", title="Commands", metavar="", help="")
def subcommand(parent=subparsers):
def decorator(func):
parser = subparsers.add_parser(func.__name__, help= func.__doc__ , conflict_handler='resolve')
parserg = parser.add_argument_group("parameters to give")
parserg.set_defaults(func=func)
return parserg
return decorator
def option(*args,**kwargs):
def decorator(parserg):
parserg.add_argument(*list(args), **kwargs)
return parserg
return decorator
###### ---- Subcommands ---- ######
# ---- split pdbs ---- #
@option('--help','-h',action='store_true')
@option('--complexName','-cn',dest="complexName",nargs="*", help="Project ID of your complex(s)")
@option('--pdbs','-p',nargs="*", help="Give trajectory(s) as *.pdb")
@subcommand()
def splitpdbs(self):
"""Split trajectory into single frames"""
if self.help:
print(f'\n\033[1mUsage: minnie splitpdbs \n'
f' -cn, --complexName <string> <string> \n '
f' Project ID of your complex(s)\n\n'
f' -p, --pdbs [<traj.pdb>] [<traj.pdb>] \n'
f' Trajectory ID of your complex(s)\033[0m \n\n\n\n'
f'\n\033[1mUsage example:\033[0m\n\n'
" minnie splitpdbs -cn sox4 sox18 -p sox4.pdb sox18.pdb \n"
" minnie splitpdbs -cn sox4 -p sox4.pdb \n")
elif not self.pdbs:
print(f'where is pdb??')
elif not self.complexName:
print(f'Please specify complex name(s)')
elif len(self.pdbs) == 1:
analysis.split_pdbs(self.pdbs[0], self.complexName[0])
elif len(self.pdbs) > 1:
for i in range(len(self.pdbs)):
analysis.split_pdbs(self.pdbs[i],self.complexName[i])
# ---- find bonds ---- #
@option('--help','-h',action='store_true')
@option('--systematic','-s',choices=["True","False"], help="Use this option if you want to analyze multiple pdbs in a folder",
default="True")
@option('--pdbs','-p',nargs="*", help="Give single *.pdb or give folder path ")
@option('--complexName','-cn', help="Project ID of your complex(s)")
@option('-i', choices=["hbonds","ionic","hydrophobic","ring_stacking","all"],default="hbonds",
dest="intType", help="choose interaction type")
@option('-intra',"--includeIntra", choices=["True","False"],default=False,
help="includes intramolecular residue pairs in the search. By default, False.")
@option("-d", default=2.5, dest="hbond_distance",help=" cutoff value to find hbonds")
@subcommand()
def findbonds(self):
"""Calculates interactions between and/or within monomers"""
if self.help:
print("Calculates interactions between and/or within monomers\n"
f'\n\033[1mUsage: minnie findbonds \n'
f' -cn, --complexName <string> \n '
f' Project ID of your complex\n\n'
f' -p, --pdbs [<.pdb>/<path>] (singleframe.pdb) \n'
f' Give single *.pdb or give folder path \n\n'
f' -i [<hbonds>/<ionic>/<hydrophobic>/<ring_stacking>/<all>] (hbonds) \n'
f' Calculates which types of interactions \n\n'
f' -d <float> (2.5) \n'
f' Cut-off to define a hydrogen bond\n\n'
f' -intra, --includeIntra [<"True">/<"False">] ("False") \n'
f' What do you want to analyze, all or only inter-monomer contacts? \033[0m \n\n\n\n'
f'\n\033[1mUsage example:\033[0m\n\n'
" Single frame - minnie findbonds -cn sox4 -p sox4/02_frames/md_0.pdb -i hbonds -s False \n"
" Multiple frames - minnie findbonds -cn sox4 -p sox4/02_frames/* -i hbonds \n"
" Multiple frames - minnie findbonds -cn sox4 -p sox4/02_frames/* -i all \n"
)
elif not self.pdbs:
print(f'where is pdb??')
elif not self.complexName:
print(f'Please specify complex name(s)')
elif (self.systematic) == "True":
pdb_list = self.pdbs
if (self.intType == "all"):
for intType in ["hbonds", "ionic", "hydrophobic", "ring_stacking"]:
pool = Pool(pathos.multiprocessing.cpu_count() - 2)
pool.map(analysis.comb_int, pdb_list, len(pdb_list) * [str(self.complexName)],
len(pdb_list) * [str(intType)], len(pdb_list) * [str(self.includeIntra)],
len(pdb_list) * [str(self.hbond_distance)])
#pool.close()
else:
pool = pathos.multiprocessing.ProcessingPool(pathos.multiprocessing.cpu_count() - 2)
pool.map(analysis.comb_int, pdb_list, len(pdb_list) * [str(self.complexName)],
len(pdb_list) * [str(self.intType)],len(pdb_list) * [str(self.includeIntra)],
len(pdb_list) * [str(self.hbond_distance)] )
pool.close()
analysis.combine_interfacea_results(self.complexName)
elif (self.systematic) == "False":
if (self.intType == "all"):
for intType in ["hbonds", "ionic", "hydrophobic", "ring_stacking"]:
analysis.comb_int(self.pdbs[0], self.complexName, intType, self.includeIntra, self.hbond_distance)
else:
analysis.comb_int(self.pdbs[0],self.complexName,self.intType,self.includeIntra,self.hbond_distance)
analysis.combine_interfacea_results(self.complexName)
# ---- Time filtering ---- #
@option('--help','-h',action='store_true')
@option('--files','-f',nargs="*", help="Files")
@option('--complexName','-cn',help="Project ID of your complex(s)")
@option('--per', help="observation frequency (in %) to classify an interaction as critical?", type = int)
@subcommand()
def timefilter(self):
"""Apply critical interaction filter"""
path_back = os.getcwd()
if self.help:
print(f'\n\033[1mUsage: minnie timefilter \n'
f' -cn, --complexName <string> \n '
f' Project ID of your complex\n\n'
f' -f, --files [<.csv>] \n'
f' Files of your complex\n\n'
f' --per <float> \n'
f' Observation frequency to classify an interaction as critical\033[0m \n\n\n\n'
f'\n\033[1mUsage example:\033[0m\n\n'
" Single file - minnie timefilter -f sox4/03_interfacea_results/hbonds/sox4_merged_hbonds.csv -cn sox4 --per 25 \n"
" Multiple files - minnie timefilter -f sox4/03_interfacea_results/*/sox4_merged_*.csv -cn sox4 --per 25 \n"
)
elif not self.files:
print(f'\nwhere is the file(s) ??\n')
elif not self.complexName:
print(f'\nPlease specify a complex name(s) !!\n')
elif not self.per:
print(f'\nPlease specify a cutoff value to filter out bonds !!\n')
if (self.per):
for filex in self.files:
os.chdir(path_back)
filtering.time_freq_filter(filex,self.complexName,self.per)
#pool = pathos.multiprocessing.ProcessingPool(pathos.multiprocessing.cpu_count() - 2)
#pool.map(filtering.time_freq_filter, self.files,
# len(self.files)*[self.complexName],
# len(self.files)*[self.per])
#pool.close()
# ---- Compare interaction networks between two complex ---- #
@option('--help','-h',action='store_true')
@option('--per', help="observation frequency (in %) to classify an interaction as critical?", type = int)
@option('--complexName','-cn',nargs=2, help="Project ID of your complex(s)")
@subcommand()
def compareCX(self):
"""Calculate common and distinct interactions in two cases"""
if self.help:
print(f'\n\033[1mUsage: minnie compareCX \n'
f' -cn, --complexName <string> <string> \n '
f' Project ID of your complex(s)\n\n'
f' --per <float> \n'
f' Observation frequency to classify an interaction as critical\033[0m \n\n\n\n'
f'\n\033[1mUsage example:\033[0m\n\n'
" minnie compareCX -cn sox4 sox18 --per 25 \n")
else:
filtering.compare_bonds(self.complexName,self.per)
# ---- draw graphs! ---- #
@option('--help','-h',action='store_true')
@option('--per', help="observation frequency (in %) to classify an interaction as critical?", type = int)
@option('--colors',nargs="*", help="Color IDs of the complexes (for plotting)",default=['#D9B4CC', '#6F81A6'])
@option('--chainIDs','-c',nargs=2,dest="chains", help="Give chainID(s)")
@option('--complexName','-cn',nargs=2, help="Project ID of your complex(s)")
@option('-s', choices=["specific","common"],default="specific", dest="spp")
@option('-i', choices=["hbonds","ionic","hydrophobic","ring_stacking","all"],default="hbonds",
dest="intType", help="choose interaction type")
@option('-b',"--between", choices=["protein-dna","all"], dest="between")
@option('--filename', dest="filename",help="filename to use while writing graph",default="")
@subcommand()
def graph(self):
"""aaaaand graphs!"""
try:
if self.help:
print(f'\n\033[1mUsage: minnie graph \n'
f' -cn, --complexName <string> <string> \n'
f' Project IDs of your complex(s)\n\n'
f' --per <float> \n'
f' Observation frequency to classify an interaction as critical \n\n'
f' -b, --between [<protein-dna>/<all>] (all) \n'
f' Between protein-dna or keep all \n\n'
f' -c, --chainIDs <string> <string> \n'
f' Give ChainIDs to proceed\n\n'
f' --filename <string> \n'
f' Give a name to output file (optional)\n\n'
f' --colors [<hex colors>] ("#D9B4CC", "#6F81A6") \n'
f' Color IDs of the complexes (optional)\n\n'
f' -i [<hbonds>/<ionic>/<hydrophobic>/<ring_stacking>/<all>] (hbonds) \n'
f' Calculates which types of interactions \n\n'
f' -s [<specific>/<common>] (specific) \n'
f' Complex-specific or common interactions\033[0m \n\n\n\n'
f'Please do not use "--between" and "--chainIDs" options at the same time\n\n'
f'\n\033[1mUsage example:\033[0m\n\n'
" minnie graph -cn 'sox4' 'sox18' --per 25 -i hbonds -s specific -c A+B C --colors '#D9B4CC' '#6F81A6' \n"
" minnie graph -cn 'sox4' 'sox18' --per 25 -i ionic -c A+B C \n"
" minnie graph -cn 'sox4' 'sox18' --per 25 -i ionic -b protein-dna \n"
" minnie graph -cn 'sox4' 'sox18' --per 25 -i ionic -b protein-dna --filename sox4_sox18_protein_dna \n")
elif (self.between) and (self.chains):
raise Exception()
elif self.intType == "all":
print(self.between)
for intTypex in ["hbonds","ionic","hydrophobic","ring_stacking"]:
if (self.between):
print(intTypex)
df_collec=graphs.filter_todnaall(self.complexName,self.between,self.spp,self.per,str(intTypex))
graphs.draw_fig(df_collec, str(intTypex), self.complexName[0], self.complexName[1],
self.colors[0], self.colors[1],self.filename, self.spp)
elif (self.chains):
df_collec=graphs.filter_todraw(self.complexName,self.chains,self.spp,self.per,str(intTypex))
graphs.draw_fig(df_collec, str(intTypex), self.complexName[0], self.complexName[1],
self.colors[0], self.colors[1],self.filename, self.spp)
elif self.between == "protein-dna":
df_collec=graphs.filter_todnaall(self.complexName,self.between,self.spp,self.per,self.intType)
graphs.draw_fig(df_collec, self.intType, self.complexName[0], self.complexName[1],
self.colors[0], self.colors[1],self.filename, self.spp)
elif self.between == "all":
df_collec=graphs.filter_todnaall(self.complexName,self.between,self.spp,self.per,self.intType)
graphs.draw_fig(df_collec, self.intType, self.complexName[0], self.complexName[1],
self.colors[0], self.colors[1],self.filename, self.spp)
else:
df_collec=graphs.filter_todraw(self.complexName,self.chains,self.spp,self.per,self.intType)
graphs.draw_fig(df_collec, self.intType, self.complexName[0], self.complexName[1],
self.colors[0], self.colors[1],self.filename, self.spp)
except TypeError:
print(f'\nPlease check given parameters''')
except Exception:
print(f'\nPlease, either use -b or -c option''')
# ---- Clean ---- #
@option('--help','-h',action='store_true')
@option('--complexName','-cn',nargs=2, help="Project ID of your complex(s)")
@subcommand()
def clean(self):
"""To remove unnecessary folders"""
if self.help:
print(f'\n\033[1mUsage: minnie clean \n'
f' -cn, --complexName <string> <string> \n '
f' Project ID of your complex(s)\n\n'
f'\n\033[1mUsage example:\033[0m\n\n'
" minnie clean -cn sox4 sox18 \n")
else:
try:
core.clean.cleanx(self.complexName[0])
except FileNotFoundError:
print(f'Nothing to clean inside {self.complexName[0]}')
try:
core.clean.cleanx(self.complexName[1])
except FileNotFoundError:
print(f'Nothing to clean inside {self.complexName[1]}')
if __name__ == "__main__":
args = parser.parse_args()
if args.subcommand is None:
parser.print_help()
else:
args.func(args)
``` |
{
"source": "JoaoRodrigues/pdb-tools",
"score": 3
} |
#### File: pdb-tools/pdbtools/pdb_tidy.py
```python
import os
import sys
__author__ = "<NAME>"
__email__ = "<EMAIL>"
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options.
"""
# Defaults
option = False
fh = sys.stdin # file handle
if not len(args):
# Reading from pipe with default option
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif len(args) == 1:
# One of two options: option & Pipe OR file & default option
if args[0] == '-strict':
option = True
if sys.stdin.isatty(): # ensure the PDB data is streamed in
emsg = 'ERROR!! No data to process!\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
else:
if not os.path.isfile(args[0]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
elif len(args) == 2:
# Two options: option & File
if not args[0] == '-strict':
emsg = 'ERROR! First argument is not a valid option: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
if not os.path.isfile(args[1]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[1]))
sys.stderr.write(__doc__)
sys.exit(1)
option = args[0][1:]
fh = open(args[1], 'r')
else: # Whatever ...
sys.stderr.write(__doc__)
sys.exit(1)
return (option, fh)
def tidy_pdbfile(fhandle, strict=False):
"""Adds TER/END statements and pads all lines to 80 characters.
If strict is True, does not add TER statements at intra-chain breaks.
"""
not_strict = not strict
def make_TER(prev_line):
"""Creates a TER statement based on the last ATOM/HETATM line.
"""
# Add last TER statement
serial = int(prev_line[6:11]) + 1
rname = prev_line[17:20]
chain = prev_line[21]
resid = prev_line[22:26]
icode = prev_line[26]
return fmt_TER.format(serial, rname, chain, resid, icode)
# TER 606 LEU A 75
fmt_TER = "TER {:>5d} {:3s} {:1s}{:>4s}{:1s}" + " " * 53 + "\n"
records = ('ATOM', 'HETATM')
ignored = ('TER', 'END ', 'END\n', 'CONECT', 'MASTER')
# Iterate up to the first ATOM/HETATM line
prev_line = None
for line in fhandle:
if line.startswith(ignored): # to avoid matching END _and_ ENDMDL
continue
line = line.strip() # We will pad/add \n later to make uniform
# Check line length
line = "{:<80}\n".format(line)
yield line
if line.startswith(records):
prev_line = line
break
# Now go through all the remaining lines
atom_section = False
serial_offset = 0 # To offset after adding TER records
for line in fhandle:
if line.startswith(ignored):
continue
line = line.strip()
# Treat ATOM/HETATM differently
# - no TER in HETATM
if line.startswith('ATOM'):
is_gap = (int(line[22:26]) - int(prev_line[22:26])) > 1
if atom_section and (line[21] != prev_line[21] or (not_strict and is_gap)):
serial_offset += 1 # account for TER statement
yield make_TER(prev_line)
serial = int(line[6:11]) + serial_offset
line = line[:6] + str(serial).rjust(5) + line[11:]
prev_line = line
atom_section = True
elif line.startswith('HETATM'):
if atom_section:
atom_section = False
serial_offset += 1 # account for TER statement
yield make_TER(prev_line)
serial = int(line[6:11]) + serial_offset
line = line[:6] + str(serial).rjust(5) + line[11:]
prev_line = line
elif line.startswith('ANISOU'):
# Fix serial based on previous atom
# Avoids doing the offset again
serial = int(prev_line[6:11])
line = line[:6] + str(serial).rjust(5) + line[11:]
else:
if atom_section:
atom_section = False
yield make_TER(prev_line)
if line.startswith('MODEL'):
serial_offset = 0
if serial > 99999:
emsg = 'ERROR!! Structure contains more than 99.999 atoms.\n'
sys.stderr.write(emsg)
sys.stderr.write(__doc__)
sys.exit(1)
# Check line length
line = "{:<80}\n".format(line)
yield line
else:
if atom_section:
# Add last TER statement
atom_section = False
yield make_TER(prev_line)
# Add END statement
yield "{:<80}\n".format("END")
def main():
# Check Input
strict, pdbfh = check_input(sys.argv[1:])
# Do the job
new_pdb = tidy_pdbfile(pdbfh, strict)
try:
_buffer = []
_buffer_size = 5000 # write N lines at a time
for lineno, line in enumerate(new_pdb):
if not (lineno % _buffer_size):
sys.stdout.write(''.join(_buffer))
_buffer = []
_buffer.append(line)
sys.stdout.write(''.join(_buffer))
sys.stdout.flush()
except IOError:
# This is here to catch Broken Pipes
# for example to use 'head' or 'tail' without
# the error message showing up
pass
# last line of the script
# We can close it even if it is sys.stdin
pdbfh.close()
sys.exit(0)
if __name__ == '__main__':
main()
``` |
{
"source": "JoaoRodrigues/pydca",
"score": 2
} |
#### File: pydca/tests/test_HMMERWrapper.py
```python
import os
import sys
import unittest
from pydca.wrappers import HMMERWrapper
from pydca.wrappers.HMMERWrapper import ParseError
class WrapperTest(unittest.TestCase):
def setUp(self):
"""Common across all tests"""
self.hw = HMMERWrapper
modpath = os.path.abspath(os.path.dirname(__file__))
self.seqfile = os.path.join(modpath, 'data', 'P00929.fasta')
self.badfile = os.path.join(modpath, 'data', 'bad.fasta')
def test_fromfile(self):
"""Reading FASTA file"""
wrapper = self.hw(self.seqfile, mock=True)
n_seqs = len(wrapper.sequences)
self.assertEqual(n_seqs, 2)
def test_fromhandle(self):
"""Reading file-like object"""
with open(self.seqfile, 'r') as handle:
wrapper = self.hw(handle, mock=True)
def test_fromlist(self):
"""Reading an unsupported input format (and failing)"""
self.assertRaises(TypeError, self.hw, [])
def test_readbadformat(self):
"""Reading a bad FASTA file (and failing)"""
self.assertRaises(ParseError, self.hw, self.badfile)
if __name__ == '__main__':
unittest.main(verbosity=2)
``` |
{
"source": "joaoronaldocunha/meetup-scraper",
"score": 3
} |
#### File: joaoronaldocunha/meetup-scraper/meetup.py
```python
import urllib.request
import json
import csv
import time, dateutil.parser
from tkinter import *
from secrets import MEETUP_API
VERSION = (0,1,0)
class MeetupAPI:
def __init__(self, api_key=MEETUP_API):
self.api_key = api_key
self.groups = []
self.retries = 3
def _request_json(self, request):
url = "https://api.meetup.com{}&sign=true&key={}".format(request, self.api_key)
try:
f = urllib.request.urlopen(url)
self.retries = 3
except urllib.error.HTTPError as ex:
ratelimit_remaining = int(ex.getheader("X-RateLimit-Remaining"))
ratelimit_reset = int(ex.getheader("X-RateLimit-Reset"))
if (ratelimit_remaining == 0):
print("The limit of requests has been reached. waiting {} seconds for the reset".format(ratelimit_reset))
time.sleep(ratelimit_reset)
self.retries -= 1
if (self.retries > 0):
print("Trying request again")
return self._request_json(request)
else:
raise ex
str_response = f.read().decode('utf-8')
obj = json.loads(str_response)
return obj
def get_categories(self):
api_category = "/2/categories?"
return self._request_json(api_category)
def print_categories(self):
results = self.get_categories()["results"]
for result in results:
print("{}: {}".format(result["id"], result["name"]))
def get_groups(self, category, latitude, longitude, offset=0):
page = 200 # seems like 200 is the maximum
groups = self._request_json("/find/groups?&page={}&photo-host=public&category={}&lat={}&lon={}&order=most_active&offset={}".format(page, category, latitude, longitude, offset))
if groups != []:
self.get_groups(category, latitude, longitude, offset+1)
self.groups += groups
return self.groups
def get_events(self, group_id, page=200, skip_before=0):
next_event_page = "/2/events?&photo-host=public&group_id={}&page={}&time={},&status=upcoming,past".format(group_id, page, int(skip_before))
results = []
while next_event_page != "":
events = self._request_json(next_event_page)
results += events["results"]
next_event_page = events["meta"]["next"]
return events
def get_lat_long(self, location_query):
""" returns a tuple (lat, lon) with the latitude and longitude of the searched query (top result) """
api_lat_long = "/2/cities?photo-host=public&query={}&page=1".format(location_query)
json_lat_long = self._request_json(api_lat_long)
print("Location selected: {}".format(json_lat_long["results"][0]["name_string"]))
return (json_lat_long["results"][0]["lat"], json_lat_long["results"][0]["lon"])
def run_scraper(meetup, category, location, skip_before):
filename = 'meetup.csv'
lat, lon = meetup.get_lat_long(location)
meetup.get_groups(category, lat, lon)
print("Found {} groups".format(len(meetup.groups)))
try:
f = csv.writer(open(filename, 'w', newline=''))
except PermissionError:
print("Error! Is the spreadsheet being used?")
return
print("Pulling data from Meetup... This may take a while")
f.writerow(["ID", "Name", "Category", "Members", "City", "Country", "Link", "Total Events (after {})".format(skip_before.strftime('%d/%m/%Y')), "Avg Attendees per Event", "Date Created"])
counter = 0
for x in meetup.groups:
try:
counter += 1
print("{}: {} ({} members)".format(counter, x["name"], x["members"]))
events = meetup.get_events(x["id"], skip_before=(skip_before.timestamp()*1000))
total_attendees = 0
num_of_events = 0.001 # avoid divide by 0 error
for event in events['results']:
print(event)
total_attendees += int(event['yes_rsvp_count'])
num_of_events += 1
f.writerow([x["id"], x["name"], x["category"]["shortname"], x["members"],
x["city"], x["country"], x["link"], total_attendees,
int(total_attendees/num_of_events),
time.strftime('%d/%m/%Y', time.gmtime(x["created"]/1000.))])
except Exception as e:
print("Error: {}, skipping group".format(e))
print("'{}' successfully saved".format(filename))
def main():
meetup = MeetupAPI()
meetup.print_categories()
try:
category = int(input("Choose your category (eg. 34): "))
location = str(input("Choose your location: "))
skip_before = dateutil.parser.parse(input("Skip events occurring before (YYYYMMDD): "))
except ValueError:
print("Error!")
run_scraper(meetup, category, location, skip_before)
if __name__ == "__main__":
main()
``` |
{
"source": "joaorpsgomes/rtabmap_ros",
"score": 2
} |
#### File: catkin_generated/installspace/transform_to_tf.py
```python
import rospy
import tf
from geometry_msgs.msg import TransformStamped
def callback(transform):
global br
global frame_id
global child_frame_id
local_frame_id = transform.header.frame_id
local_child_frame_id = transform.child_frame_id
if not local_frame_id:
local_frame_id = frame_id
if not local_child_frame_id:
local_child_frame_id = child_frame_id
br.sendTransform(
(transform.transform.translation.x, transform.transform.translation.y, transform.transform.translation.z),
(transform.transform.rotation.x, transform.transform.rotation.y, transform.transform.rotation.z, transform.transform.rotation.w),
transform.header.stamp,
child_frame_id,
frame_id)
if __name__ == "__main__":
rospy.init_node("transform_to_tf", anonymous=True)
frame_id = rospy.get_param('~frame_id', 'world')
child_frame_id = rospy.get_param('~child_frame_id', 'transform')
br = tf.TransformBroadcaster()
rospy.Subscriber("transform", TransformStamped, callback, queue_size=1)
rospy.spin()
``` |
{
"source": "joaorura/diario-da-pomada",
"score": 3
} |
#### File: ointment-diary-integration-test/tests/user_role.py
```python
from utils import clear_database
import requests
from requests.models import Response
from tests.auth_signup import signup_send
DATA_TEST = {
"email": "<EMAIL>",
"birthDate": "1999-02-02",
"fullName": "<NAME>",
"password": "<PASSWORD>",
"healthCard": "<PASSWORD>5678901<PASSWORD>",
"nationalCard": "12654624455"
}
def user_role_send(host, token):
response = requests.get(f'{host}/user/role', headers={"Authorization": f"Bearer {token}"})
return response
def test_0(host, token):
response = user_role_send(host, token)
data = response.json()
print("User Role - Verificando acesso aos dados", end="")
try:
if response.status_code == 200 \
and len(data.keys()) == 1 \
and data['role'] == 'user':
print(" - Passou")
else:
print(" - Não Passou")
except:
print(" - Não Passou")
def user_role_test(host):
response = signup_send(host, DATA_TEST)
if response.status_code != 201:
print("User Role - Falha em preparar ambiente!!!")
return
data = response.json()
token = data["access_token"]
test_0(host, token)
clear_database()
``` |
{
"source": "joaorura/HangmanGameOnline-HGC",
"score": 3
} |
#### File: client/back/process_all.py
```python
from multiprocessing import Process, Manager
from interface.server_to_game.game_process import GameProcess
from interface.server_to_game.inter_game import InterGame
from interface.server_to_game.menu_process import MenuProcess
from time import sleep
class ProcessAll(Process):
def __init__(self, queue_front, queue_send, queue_receive):
self.queue_send = queue_send
self.queue_receive = queue_receive
self.intergame = InterGame(queue_front, self.queue_send, self.queue_receive)
super().__init__(target=self._run)
def _run(self):
while True:
if self.queue_receive.empty():
sleep(0.1)
continue
aux = self.queue_receive.get()
test = aux['type']
if test == "menu":
run = MenuProcess(self.intergame, aux, self.queue_send)
elif test == "game":
run = GameProcess(self.intergame, aux, self.queue_send)
else:
raise RuntimeError
run.start()
```
#### File: front/menu/sub_menu.py
```python
import tkinter as tk
from front.menu.loading_page import LoadingPage
from utils.utils_front import front_page
from tkinter.messagebox import showwarning
class SubMenu(tk.Frame):
def __init__(self, text, function, master):
self.text = text
self.function = function
self.loading_page = None
self.loading_frame = None
super().__init__(master)
self.title = tk.Label(self, text=text[0])
self.title.pack()
self.input_name_1 = tk.Label(self, text=text[1])
self.input_name_1.pack()
self.input_1 = tk.Text(self, height=1, padx=2, pady=2)
self.input_1.pack()
self.input_name_2 = tk.Label(self, text=text[2])
self.input_name_2.pack()
self.input_2 = tk.Text(self, height=1, padx=2, pady=2)
self.input_2.pack()
self.input_name_3 = tk.Label(self, text=text[3])
self.input_name_3.pack()
self.input_3 = tk.Text(self, height=1, padx=2, pady=2)
self.input_3.pack()
commit_button = tk.Button(self, text="Ok", command=self._aux_create)
commit_button.pack()
super().pack()
@staticmethod
def _alert_message(text):
showwarning(title="Warning", message=text)
def _aux_create(self):
result_1 = self.input_1.get("1.0", "end-1c")
result_2 = self.input_2.get("1.0", "end-1c")
result_3 = self.input_3.get("1.0", "end-1c")
aux = self.function(result_1, result_2, result_3)
if aux is not None:
self._alert_message(aux)
else:
self._create_loading()
def _create_loading(self):
if self.loading_page is not None:
return
self.loading_page = front_page("50x50", lambda: None, self)
self.loading_frame = LoadingPage(self.loading_page)
def close_loading(self):
self.destroy()
self.loading_page = None
self.loading_frame = None
def destroy(self):
if self.loading_page is not None:
self.loading_page.destroy()
self.loading_page = None
self.loading_frame = None
super().destroy()
```
#### File: interface/server_to_game/game_process.py
```python
class GameProcess:
def __init__(self, intergame, data, queue):
self.intergame = intergame
self.data = data
self.queue = queue
def _init_game(self):
if not self.data["status"]:
self.intergame.exit_room(False)
self.intergame.alert("Problems with room, please create another.")
def _att_state(self):
self.intergame.att_state(self.data["state"])
def _end_game(self):
self.intergame.exit_room(False)
def _alert(self):
self.intergame.alert(self.data["message"])
def start(self):
aux = self.data["subtype"]
if aux == "init":
self._init_game()
elif aux == "att_state":
self._att_state()
elif aux == "end":
self._end_game()
elif aux == "alert":
self._alert()
else:
raise RuntimeError("Error in GameProcess")
```
#### File: client/utils/utils_front.py
```python
import tkinter as tk
def front_page(size, close_function, master):
page = tk.Toplevel()
page.geometry(size)
page.protocol("WM_DELETE_WINDOW", close_function)
page.transient(master)
page.focus_force()
page.grab_set()
return page
```
#### File: HangmanGameOnline-HGC/server/main.py
```python
from interface.interface import Interface
def main():
interface = Interface("localhost", 20)
interface.start_server()
if __name__ == '__main__':
main()
``` |
{
"source": "joaorura/k-NN_Iris_Classificator",
"score": 3
} |
#### File: src/process_ml/KNN.py
```python
from utils.check_functions import check_values, check_type
from math import inf
class KNN:
def _check_values(self):
check_values(self._data)
if self._k < 1:
raise RuntimeError("K deve ser maior que 1.")
check_type(self._k, int, "K deve ser um inteiro.")
check_type(self._classifications, list, "As classificacoes devem ser uma lista")
def _zero_count_process(self):
for i in self._classifications:
self._count_process[i] = [0, None]
def __init__(self, data, classifications, k=1):
self._data = data
self._k = k
self._classifications = classifications
self._check_values()
self._count_process = {}
self._zero_count_process()
self._space_size = len(self._data["list"][0])
def _check_consult_args(self, instance):
check_type(instance, list, "A instancia deve ser uma lista")
if len(instance) != self._space_size:
raise RuntimeError(f"A instancia deve ter tamanho: {self._space_size}")
check_type(instance[len(instance) - 1], str, "O ultimo elemento da lista deve ser uma string.")
if instance[len(instance) - 1] not in self._classifications:
raise RuntimeError(f"O ultimo elemento da lista deve possuir uma das classes.\n"
f"\tClasses: {self._classifications}")
for i in instance[1:len(instance) - 1]:
if type(i) != float:
raise RuntimeError(f"Os {self._space_size - 1} primeiros devem ser floats.")
def consult(self, instance):
self._check_consult_args(instance)
results = []
for element in self._data["list"]:
aux = 0
for i in range(1, self._space_size - 1):
aux += (element[i] - instance[i]) ** 2
aux_result = (aux, element)
results.append(aux_result)
results.sort()
for i in range(0, self._k):
classification = results[i][1][self._space_size - 1]
self._count_process[classification][0] += 1
self._count_process[classification][1] = results[i][1][:self._space_size - 1]
aux = [-inf, None, None]
for i in self._count_process:
if aux[0] < self._count_process[i][0]:
aux[0] = self._count_process[i][0]
aux[1] = self._count_process[i][1]
aux[2] = i
self._zero_count_process()
return tuple(aux)
``` |
{
"source": "joao-salomao/SQL-Generator",
"score": 3
} |
#### File: SQL-Generator/sql_generator/sql_generator.py
```python
from sys import argv
from typing import Union
from numpy import int64, float64
from werkzeug.datastructures import FileStorage
from pandas import read_excel, read_csv, DataFrame
from pandas._libs.tslibs.timestamps import Timestamp
from datetime import datetime
ALLOWED_EXTENSIONS = {'xlsx', 'csv'}
ALLOWED_OPERATIONS = {'insert', 'update', 'delete'}
def file_is_allowed(filename: str) -> bool:
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def operation_is_allowed(operation: str) -> bool:
return operation in ALLOWED_OPERATIONS
def parse_value_to_sql_builder(value: Union[str, int64, float64, Timestamp]) -> str:
if isinstance(value, str) or isinstance(value, Timestamp) or isinstance(value, datetime):
return '"{}"'.format(value)
if isinstance(value, int64) or isinstance(value, float64):
return "{}".format(str(value))
raise Exception("Error parsing value to sql builder")
def create_insert_sql(df: DataFrame, table_name: str) -> str:
sql = 'INSERT INTO ' + table_name + '('
columns = df.columns
len_columns = len(columns)
for i in range(len_columns):
if i < len_columns - 1:
sql = sql + columns[i] + ', '
else:
sql = sql + columns[i] + ') VALUES '
temp = ''
formatter = ''
for i in range(df.shape[0]):
for k in range(len_columns):
value = parse_value_to_sql_builder(df[columns[k]][i])
if k == 0:
formatter = "({}, "
temp = formatter.format(value)
elif k < len_columns-1:
formatter = temp + "{}, "
temp = formatter.format(value)
else:
formatter = temp + "{})"
temp = formatter.format(value)
if i < df.shape[0] - 1 :
sql = sql + temp + ', '
else:
sql = sql + temp + ';'
return sql
def create_update_sql(df: DataFrame, table_name: str) -> str:
sql = ''
columns = df.columns
len_columns = len(columns)
base = 'UPDATE ' + table_name + ' SET'
formatter = ''
temp = base
for i in range(df.shape[0]):
for k in range(len_columns):
column = columns[k]
value = parse_value_to_sql_builder(df[columns[k]][i])
if k < len_columns - 2:
formatter = temp + " " + column + " = {},"
temp = formatter.format(value)
elif k < len_columns - 1:
formatter = temp + " " + column + " = {}"
temp = formatter.format(value)
else:
formatter = temp + " " + column + " {}; "
temp = formatter.format(str(df[columns[k]][i]))
sql = sql + temp
temp = base
return sql.strip()
def create_delete_sql(df: DataFrame, table_name: str) -> str:
sql = ''
columns = df.columns
len_columns = len(columns)
base = 'DELETE FROM ' + table_name + ' WHERE'
formatter = ''
temp = base
for i in range(df.shape[0]):
for k in range(len_columns):
column = columns[k]
value = parse_value_to_sql_builder(df[columns[k]][i])
if k < len_columns - 1:
formatter = temp + " " + column + " = {} AND"
temp = formatter.format(value)
else:
formatter = temp + " " + column + " = {}; "
temp = formatter.format(value)
sql = sql + temp
temp = base
return sql.strip()
def get_dataframe(file: Union[FileStorage, str], filename: str) -> DataFrame:
if file_is_allowed(filename) == False:
raise Exception("File not allowed")
try:
ext = filename.rsplit('.', 1)[1]
if ext == 'xlsx':
return read_excel(file, sheet_name='Sheet1')
return read_csv(file)
except Exception as e:
raise e
def generate_sql(file: Union[FileStorage, str], filename: str, table_name: str, operation: str) -> str:
if operation_is_allowed(operation) == False:
raise Exception('Operation not allowed')
df = get_dataframe(file, filename)
return {
'insert': create_insert_sql,
'update': create_update_sql,
'delete': create_delete_sql
}[operation](df, table_name)
def main():
if validate_args() == False:
return
operation = argv[1]
file_path = argv[3]
table_name = argv[2]
try:
sql = generate_sql(file_path, file_path, table_name, operation)
open("generated_sql.sql", "w").write(sql)
print('SQL successfully generated !')
except Exception as e:
print('Some error occurred when generating the SQL. Try Again.')
print(e)
def validate_args() -> bool:
if len(argv) < 4:
print('> To few arguments <')
print('You must pass the operation, the table name and the name of the file to be read')
print('Example: python cli_app.py insert users users.xlsx')
return False
if operation_is_allowed(argv[1]) == False:
print('> Operation not allowed <')
print('The allowed operations are: insert, update, delete.')
return False
if file_is_allowed(argv[3]) == False:
print('> File not allowed <')
print('The allowed extensions are: xlsx and csv.')
return False
if (__name__ == '__main__'):
main()
``` |
{
"source": "joaosalvado10/CS599-Final-Project",
"score": 2
} |
#### File: model/ddpg/ddpg.py
```python
from __future__ import print_function
import os
import traceback
import json
import numpy as np
import tensorflow as tf
from .replay_buffer import ReplayBuffer
from ..base_model import BaseModel
def build_summaries():
episode_reward = tf.Variable(0.)
tf.summary.scalar("Reward", episode_reward)
episode_ave_max_q = tf.Variable(0.)
tf.summary.scalar("Qmax_Value", episode_ave_max_q)
portfolio_value = tf.Variable(0.)
tf.summary.scalar("Portfolio Value", portfolio_value)
summary_vars = [episode_reward, episode_ave_max_q, portfolio_value]
summary_ops = tf.summary.merge_all()
return summary_ops, summary_vars
class DDPG(BaseModel):
def __init__(self, env, sess, actor, critic, actor_noise, obs_normalizer=None, action_processor=None,
config_file='config/default.json',
model_save_path='weights/ddpg/ddpg.ckpt', summary_path='results/ddpg/'):
with open(config_file) as f:
self.config = json.load(f)
assert self.config != None, "Can't load config file"
np.random.seed(self.config['seed'])
if env:
env.seed(self.config['seed'])
self.model_save_path = model_save_path
self.summary_path = summary_path
self.sess = sess
# if env is None, then DDPG just predicts
self.env = env
self.actor = actor
self.critic = critic
self.actor_noise = actor_noise
self.obs_normalizer = obs_normalizer
self.action_processor = action_processor
self.summary_ops, self.summary_vars = build_summaries()
def initialize(self, load_weights=True, verbose=True):
""" Load training history from path. To be add feature to just load weights, not training states
"""
if load_weights:
try:
variables = tf.global_variables()
param_dict = {}
saver = tf.train.Saver()
saver.restore(self.sess, self.model_save_path)
for var in variables:
var_name = var.name[:-2]
if verbose:
print('Loading {} from checkpoint. Name: {}'.format(var.name, var_name))
param_dict[var_name] = var
except:
traceback.print_exc()
print('Build model from scratch')
self.sess.run(tf.global_variables_initializer())
else:
print('Build model from scratch')
self.sess.run(tf.global_variables_initializer())
def train(self, save_every_episode=1, verbose=True, debug=False):
""" Must already call intialize
Args:
save_every_episode:
print_every_step:
verbose:
debug:
Returns:
"""
print("starting train ddpg")
writer = tf.summary.FileWriter(self.summary_path, self.sess.graph)
self.actor.update_target_network()
self.critic.update_target_network()
np.random.seed(self.config['seed'])
num_episode = self.config['episode']
batch_size = self.config['batch size']
gamma = self.config['gamma']
self.buffer = ReplayBuffer(self.config['buffer size'])
# main training loop
for i in range(num_episode):
if verbose and debug:
print("Episode: " + str(i) + " Replay Buffer " + str(self.buffer.count()))
previous_observation = self.env.reset()
if self.obs_normalizer:
previous_observation = self.obs_normalizer(previous_observation)
ep_reward = 0
ep_ave_max_q = 0
# keeps sampling until done
for j in range(self.config['max step']):
action = self.actor.predict(np.expand_dims(previous_observation, axis=0)).squeeze(
axis=0) + self.actor_noise()
if self.action_processor:
action_take = self.action_processor(action)
else:
action_take = action
# step forward
observation, reward, done, info = self.env.step(action_take)
portfolio_value = info["portfolio_value"]
if self.obs_normalizer:
observation = self.obs_normalizer(observation)
# add to buffer
self.buffer.add(previous_observation, action, reward, done, observation)
if self.buffer.size() >= batch_size:
# batch update
s_batch, a_batch, r_batch, t_batch, s2_batch = self.buffer.sample_batch(batch_size)
# Calculate targets
target_q = self.critic.predict_target(s2_batch, self.actor.predict_target(s2_batch))
y_i = []
for k in range(batch_size):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + gamma * target_q[k])
# Update the critic given the targets
predicted_q_value, _ = self.critic.train(
s_batch, a_batch, np.reshape(y_i, (batch_size, 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
a_outs = self.actor.predict(s_batch)
grads = self.critic.action_gradients(s_batch, a_outs)
self.actor.train(s_batch, grads[0])
# Update target networks
self.actor.update_target_network()
self.critic.update_target_network()
ep_reward += reward
previous_observation = observation
#print(portfolio_value.shape)
# print(ep_reward.shape)
if done or j == self.config['max step'] - 1:
summary_str = self.sess.run(self.summary_ops, feed_dict={
self.summary_vars[0]: ep_reward,
self.summary_vars[1]: ep_ave_max_q / float(j),
self.summary_vars[2] : portfolio_value
})
writer.add_summary(summary_str, i)
writer.flush()
print('Episode: {:d}, Reward: {:.2f}, Qmax: {:.4f}'.format(i, ep_reward, (ep_ave_max_q / float(j))))
break
self.save_model(verbose=True)
print('Finish.')
def predict(self, observation):
""" predict the next action using actor model, only used in deploy.
Can be used in multiple environments.
Args:
observation: (batch_size, num_stocks + 1, window_length)
Returns: action array with shape (batch_size, num_stocks + 1)
"""
if self.obs_normalizer:
observation = self.obs_normalizer(observation)
action = self.actor.predict(observation)
if self.action_processor:
action = self.action_processor(action)
return action
def predict_single(self, observation):
""" Predict the action of a single observation
Args:
observation: (num_stocks + 1, window_length)
Returns: a single action array with shape (num_stocks + 1,)
"""
if self.obs_normalizer:
observation = self.obs_normalizer(observation)
action = self.actor.predict(np.expand_dims(observation, axis=0)).squeeze(axis=0)
if self.action_processor:
action = self.action_processor(action)
return action
def save_model(self, verbose=False):
if not os.path.exists(self.model_save_path):
os.makedirs(self.model_save_path, exist_ok=True)
saver = tf.train.Saver()
model_path = saver.save(self.sess, self.model_save_path)
print("Model saved in %s" % model_path)
``` |
{
"source": "joaosamorim/Codenation",
"score": 3
} |
#### File: Data-Science-Online/Semana 3/semana3.py
```python
import streamlit as st
import pandas as pd
import altair as alt
def criar_histograma(coluna, df):
chart = alt.Chart(df, width=600).mark_bar().encode(
alt.X(coluna, bin=True),
y='count()', tooltip=[coluna, 'count()']
).interactive()
return chart
def criar_barras(coluna_num, coluna_cat, df):
bars = alt.Chart(df, width = 600).mark_bar().encode(
x=alt.X(coluna_num, stack='zero'),
y=alt.Y(coluna_cat),
tooltip=[coluna_cat, coluna_num]
).interactive()
return bars
def criar_boxplot(coluna_num, coluna_cat, df):
boxplot = alt.Chart(df, width=600).mark_boxplot().encode(
x=coluna_num,
y=coluna_cat
)
return boxplot
def criar_scatterplot(x, y, color, df):
scatter = alt.Chart(df, width=800, height=400).mark_circle().encode(
alt.X(x),
alt.Y(y),
color = color,
tooltip = [x, y]
).interactive()
return scatter
def cria_correlationplot(df, colunas_numericas):
cor_data = (df[colunas_numericas]).corr().stack().reset_index().rename(columns={0: 'correlation', 'level_0': 'variable', 'level_1': 'variable2'})
cor_data['correlation_label'] = cor_data['correlation'].map('{:.2f}'.format) # Round to 2 decimal
base = alt.Chart(cor_data, width=500, height=500).encode( x = 'variable2:O', y = 'variable:O')
text = base.mark_text().encode(text = 'correlation_label',color = alt.condition(alt.datum.correlation > 0.5,alt.value('white'),
alt.value('black')))
# The correlation heatmap itself
cor_plot = base.mark_rect().encode(
color = 'correlation:Q')
return cor_plot + text
def main():
st.image('logo.png', width=200)
st.title('AceleraDev Data Science')
st.subheader('Semana 3 - Análise de dados exploratória')
st.image('https://media.giphy.com/media/R8bcfuGTZONyw/giphy.gif', width=200)
file = st.file_uploader('Escolha a base de dados que deseja analisar (.csv)', type = 'csv')
if file is not None:
st.subheader('Estatística descritiva univariada')
df = pd.read_csv(file)
aux = pd.DataFrame({"colunas": df.columns, 'tipos': df.dtypes})
colunas_numericas = list(aux[aux['tipos'] != 'object']['colunas'])
colunas_object = list(aux[aux['tipos'] == 'object']['colunas'])
colunas = list(df.columns)
col = st.selectbox('Selecione a coluna :', colunas_numericas)
if col is not None:
st.markdown('Selecione o que deseja analisar :')
mean = st.checkbox('Média')
if mean:
st.markdown(df[col].mean())
median = st.checkbox('Mediana')
if median:
st.markdown(df[col].median())
desvio_pad = st.checkbox('Desvio padrão')
if desvio_pad:
st.markdown(df[col].std())
kurtosis = st.checkbox('Kurtosis')
if kurtosis:
st.markdown(df[col].kurtosis())
skewness = st.checkbox('Skewness')
if skewness:
st.markdown(df[col].skew())
describe = st.checkbox('Describe')
if describe:
st.table(df[colunas_numericas].describe().transpose())
st.subheader('Visualização dos dados')
st.image('https://media.giphy.com/media/Rkoat5KMaw2aOHDduz/giphy.gif', width=200)
st.markdown('Selecione a visualizacao')
histograma = st.checkbox('Histograma')
if histograma:
col_num = st.selectbox('Selecione a Coluna Numerica: ', colunas_numericas,key = 'unique')
st.markdown('Histograma da coluna : ' + str(col_num))
st.write(criar_histograma(col_num, df))
barras = st.checkbox('Gráfico de barras')
if barras:
col_num_barras = st.selectbox('Selecione a coluna numerica: ', colunas_numericas, key = 'unique')
col_cat_barras = st.selectbox('Selecione uma coluna categorica : ', colunas_object, key = 'unique')
st.markdown('Gráfico de barras da coluna ' + str(col_cat_barras) + ' pela coluna ' + col_num_barras)
st.write(criar_barras(col_num_barras, col_cat_barras, df))
boxplot = st.checkbox('Boxplot')
if boxplot:
col_num_box = st.selectbox('Selecione a Coluna Numerica:', colunas_numericas,key = 'unique' )
col_cat_box = st.selectbox('Selecione uma coluna categorica : ', colunas_object, key = 'unique')
st.markdown('Boxplot ' + str(col_cat_box) + ' pela coluna ' + col_num_box)
st.write(criar_boxplot(col_num_box, col_cat_box, df))
scatter = st.checkbox('Scatterplot')
if scatter:
col_num_x = st.selectbox('Selecione o valor de x ', colunas_numericas, key = 'unique')
col_num_y = st.selectbox('Selecione o valor de y ', colunas_numericas, key = 'unique')
col_color = st.selectbox('Selecione a coluna para cor', colunas)
st.markdown('Selecione os valores de x e y')
st.write(criar_scatterplot(col_num_x, col_num_y, col_color, df))
correlacao = st.checkbox('Correlacao')
if correlacao:
st.markdown('Gráfico de correlação das colunas númericas')
st.write(cria_correlationplot(df, colunas_numericas))
if __name__ == '__main__':
main()
``` |
{
"source": "JoaoSantinha/streamlit-leaderboard",
"score": 3
} |
#### File: JoaoSantinha/streamlit-leaderboard/dicescoring.py
```python
import json
import numpy as np
import os
import io
def dice_score(ref, pred):
# from https://stackoverflow.com/questions/49759710/calculating-dice-co-efficient-between-two-random-images-of-same-size
if ref.shape != pred.shape:
raise ValueError("Shape mismatch: img and img2 must have to be of the same shape.")
else:
intersection = np.logical_and(ref, pred)
value = (2. * intersection.sum()) / (ref.sum() + pred.sum())
return value
def dice_list(reference_json, user_json, image_width=1024, image_height=1024):
f_reference = open(reference_json)
if isinstance(user_json, str):
f_user = open(user_json)
else:
f_user = io.StringIO(user_json.getvalue().decode("utf-8"))
# f_user = json.load(stringio)
data_reference = json.load(f_reference)
data_user = json.load(f_user)
list_dice_scores = []
# if "covid27" in reference_json:
if "_via_img_metadata" in data_reference:
gt_patients_list = data_reference["_via_img_metadata"]
else:
gt_patients_list = data_reference
# gt_patients_list = data_reference["_via_img_metadata"]
if "_via_img_metadata" in data_user:
user_patients_list = data_user["_via_img_metadata"]
else:
user_patients_list = data_user
for key in gt_patients_list:
if key in user_patients_list:
np_reference = np.zeros([image_width, image_height])
np_user = np.zeros([image_width, image_height])
for region in gt_patients_list[key]["regions"]:
if region['shape_attributes']['name'] == 'rect':
x_start = region['shape_attributes']['x']
y_start = region['shape_attributes']['y']
x_end = region['shape_attributes']['width'] + x_start
y_end = region['shape_attributes']['height'] + y_start
np_reference[x_start:x_end, y_start:y_end] = 1
else: # doesn't have rect type of region so we should skip it
break
for region in user_patients_list[key]["regions"]:
if region['shape_attributes']['name'] == 'rect':
x_start = region['shape_attributes']['x']
y_start = region['shape_attributes']['y']
x_end = region['shape_attributes']['width'] + x_start
y_end = region['shape_attributes']['height'] + y_start
np_user[x_start:x_end, y_start:y_end] = 1
dice_score_patient = dice_score(np_reference, np_user)
if not np.isnan(dice_score_patient):
list_dice_scores.append(dice_score_patient)
else: # reference didn't had rect but user drew rect
list_dice_scores.append(0)
else:
for region in gt_patients_list[key]["regions"]:
if region['shape_attributes']['name'] == 'rect':
list_dice_scores.append(0)
print("Not segmented by used")
else:
print("Not rect tool")
return list_dice_scores
def get_score_all_users(directory, ground_truth_file, user_files_list):
reference_json = os.path.join(directory, ground_truth_file)
scores_users = []
for user_file in user_files_list:
user_json = os.path.join(directory, user_file)
dice_scores = dice_list(reference_json, user_json)
user_score = round(np.sum(np.asarray(dice_scores)) * 10)
scores_users.append(user_score)
order_users = np.argsort(scores_users)
return order_users, scores_users
# if __name__ == '__main__':
# # example of input and call to get order and scores
# # inputs are directory where files are located, ground truth json filename, list of json users annotations filenames
# order, score = get_score_all_users('/Users/joaosantinha/Downloads',
# 'via_project_9Dec2020_15h40m_Les_ground_truth.json',
# ['via_project_8Dec2020_15h28m_jane_with_missing_keys.json',
# 'via_project_18May2021_13h3m_Pedro.json',
# 'via_project_20May2021_10h53m-6_Lilli.json'])
# print('Order: ', order+1, '\nScore: ', score)
```
#### File: JoaoSantinha/streamlit-leaderboard/leaderboardtb.py
```python
import streamlit as st
import pandas as pd
import json
import os
from datetime import datetime
import dicescoring
import numpy as np
import io
# from sklearn.metrics import (accuracy_score, auc, f1_score, precision_score, recall_score,
# mean_absolute_error, mean_squared_error, r2_score)
st.set_option('deprecation.showfileUploaderEncoding', False)
# funtions
def relative_time(t_diff):
days, seconds = t_diff.days, t_diff.seconds
if days > 0:
return f"{days}d"
else:
hours = t_diff.seconds // 3600
minutes = t_diff.seconds // 60
if hours >0 : #hour
return f"{hours}h"
elif minutes >0:
return f"{minutes}m"
else:
return f"{seconds}s"
def get_leaderboard_dataframe(csv_file = 'leaderboardTB.csv', greater_is_better = True):
df_leaderboard = pd.read_csv('leaderboardTB.csv', header = None)
df_leaderboard.columns = ['Username', 'Score', 'Submission Time']
df_leaderboard['counter'] = 1
df_leaderboard = df_leaderboard.groupby('Username').agg({"Score": "max",
"counter": "count",
"Submission Time": "max"})
df_leaderboard = df_leaderboard.sort_values("Score", ascending = not greater_is_better)
df_leaderboard = df_leaderboard.reset_index()
df_leaderboard.columns = ['Username','Score', 'Entries', 'Last']
df_leaderboard['Last'] = df_leaderboard['Last'].map(lambda x: relative_time(datetime.now() - datetime.strptime(x, "%Y%m%d_%H%M%S")))
return df_leaderboard
def app():
# Title
st.title("TB Competition Leaderboard")
# Username Input
username = st.text_input("Username", value = "Joao", max_chars= 20,)
username = username.replace(",","") # for storing csv purpose
st.header(f"Hi {username} !!!")
# Check if master data has been registered:
master_files = os.listdir('master')
if ("via_project_9Dec2020_15h40m_Les_ground_truth.json" not in master_files):
st.text("Admin please insert ground truth data")
else:
greater_is_better = True# if metric_type in ["MAE", "MSE"] else True # CHANGE HERE AS YOU WANT
uploaded_file = st.file_uploader("Upload Submission json File", type='json')
groud_truth_file = 'master/tb_ground_truth.json'
if st.button("SUBMIT"):
if uploaded_file is None:
st.text("UPLOAD FIRST")
else:
# save submission
stringio = io.StringIO(uploaded_file.getvalue().decode("utf-8"))
json_uploaded_submission = json.load(stringio)
datetime_now = datetime.now().strftime("%Y%m%d_%H%M%S")
datetime_now = datetime.now().strftime("%Y%m%d_%H%M%S")
filename_submission = f"submission/sub_{username}__{datetime_now}.json"
with open(filename_submission, 'w') as outfile:
json.dump(json_uploaded_submission, outfile)
# calculate score
dice_scores_user = dicescoring.dice_list(groud_truth_file, uploaded_file)
score = round(np.sum(np.asarray(dice_scores_user)) * 10)
score = round(score,5)
st.text(f"YOUR Dice socer: {score}")
# save score
with open("leaderboardTB.csv", "a+") as leaderboard_csv:
leaderboard_csv.write(f"{username},{score},{datetime_now}\n")
# Showing Leaderboard
st.header("Leaderboard")
if os.stat("leaderboardTB.csv").st_size == 0:
st.text("NO SUBMISSION YET")
else:
df_leaderboard = get_leaderboard_dataframe(csv_file = 'leaderboardTB.csv', greater_is_better = greater_is_better)
st.write(df_leaderboard)
# To register master data
if username == 'pokerad_admin_siim21': # CHANGE HERE AS YOU WANT
change_master_key = st.checkbox('Change Ground Truth File')
if change_master_key:
# Master Data Frame
uploaded_file_master = st.file_uploader("Upload Ground Truth File", type='json')
if uploaded_file_master is not None:
stringio = io.StringIO(uploaded_file_master.getvalue().decode("utf-8"))
# f_uploaded_submission_master = open(stringio)
# json_uploaded_submission_master = json.load(f_uploaded_submission_master)
json_uploaded_submission_master = json.load(stringio)
datetime_now = datetime.now().strftime("%Y%m%d_%H%M%S")
with open('master/tb_ground_truth.json', 'w') as outfile:
json.dump(json_uploaded_submission_master, outfile)
``` |
{
"source": "joaoschumacher/Snake-Game",
"score": 4
} |
#### File: joaoschumacher/Snake-Game/snake_game.py
```python
import pygame
from pygame.locals import *
import random
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
BLUE = (0, 255, 255)
screen_width = 400
screen_height = 400
pygame.init()
screen = pygame.display.set_mode((screen_width,screen_height))
pygame.display.set_caption('Snake game')
icon = pygame.image.load('img/snake_icon.png')
pygame.display.set_icon(icon)
clock = pygame.time.Clock()
score = 0
font_over = pygame.font.SysFont("bahnschrift", 25)
score_font = pygame.font.SysFont("bahnschrift", 35)
font_hub = pygame.font.SysFont("bahnschrift", 15)
def on_grid_random():
x = random.randint(0, screen_width-10)
y = random.randint(0, screen_height-10)
return x//10 * 10, y//10 * 10
def collision(c1, c2):
return (c1[0] == c2[0]) and (c1[1] == c2[1])
def collision_snake(c1):
for i in range(len(c1) - 1, 0, -1):
if c1[i] == c1[0]:
return True
def text(msg, color, x1, y1, font):
mesg = font.render(msg, True, color)
screen.blit(mesg, [x1, y1])
def Score(score):
v = score_font.render("Your Score: " + str(score), True, (BLUE))
screen.blit(v, [5, 5])
def gameLoop():
game_over = False
snake = [(screen_width/2, screen_height/2)]
snake_skin = pygame.Surface((10, 10))
snake_skin.fill(WHITE)
apple_pos = on_grid_random()
apple = pygame.Surface((10, 10))
apple.fill(RED)
my_direction = LEFT
while True:
clock.tick(15)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
if event.type == KEYDOWN:
if event.key == K_q:
pygame.quit()
if event.key == K_r:
gameLoop()
if event.key == K_UP and my_direction != DOWN:
my_direction = UP
if event.key == K_RIGHT and my_direction != LEFT:
my_direction = RIGHT
if event.key == K_DOWN and my_direction != UP:
my_direction = DOWN
if event.key == K_LEFT and my_direction != RIGHT:
my_direction = LEFT
if collision(snake[0], apple_pos):
apple_pos = on_grid_random()
snake.append((0, 0))
if collision_snake(snake):
game_over = True;
if (snake[0][0] < 0) or (snake[0][0] > screen_width-10) or (snake[0][1] < 0) or (snake[0][1] > screen_height-10):
game_over = True
for i in range(len(snake) - 1, 0, -1):
snake[i] = (snake[i-1][0], snake[i-1][1])
if my_direction == UP:
snake[0] = (snake[0][0], snake[0][1]-10)
if my_direction == RIGHT:
snake[0] = (snake[0][0]+10, snake[0][1])
if my_direction == DOWN:
snake[0] = (snake[0][0], snake[0][1]+10)
if my_direction == LEFT:
snake[0] = (snake[0][0]-10, snake[0][1])
screen.fill(BLACK)
screen.blit(apple, apple_pos)
Score(len(snake)-1)
text("R - Restart / Q - Quit", BLUE, 10, screen_height-20, font_hub)
for pos in snake:
screen.blit(snake_skin, pos)
if game_over == True:
screen.fill(BLACK)
text("You Lost!", RED, 20, 50, font_over)
text("Press R to Play Again or Q to Quit", RED, 20, 100, font_over)
pygame.display.update()
pygame.display.update()
gameLoop()
``` |
{
"source": "joaoschweikart/python_projects",
"score": 4
} |
#### File: joaoschweikart/python_projects/ex098 contagem.py
```python
from time import sleep
def contador(i, f, r):
if i < f:
f = f+1
for c in range(i, f, r):
print(c, end=' ')
sleep(0.6)
elif i > f:
f = f-1
if r > 0:
r = r * -1
for c in range(i, f, r):
print(c, end=' ')
sleep(0.6)
elif r == 0:
r = r - 1
for c in range(i, f, r):
print(c, end=' ')
sleep(0.6)
else:
for c in range(i, f, r):
print(c, end=' ')
sleep(0.6)
print('=-'*20)
print('CONTAGEM DE 1 até 10 DE 1 em 1')
contador(1, 10, 1)
print('FIM!')
print('=-'*20)
print('CONTAGEM DE 10 até 0 DE 2 em 2')
sleep(0.6)
contador(10, 0, -2)
print('FIM!')
print('=-'*20)
print('Agora é sua vez de personalizar a contagem!')
sleep(0.6)
inicio = int(input('INÍCIO: '))
fim = int(input('FIM: '))
razao = int(input('RAZÃO: '))
print('=-'*20)
if razao < 0:
print(f'CONTAGEM DE {inicio} até {fim} DE {razao*-1} em {razao*-1}:')
elif razao == 0:
print(f'CONTAGEM DE {inicio} até {fim} DE {razao + 1} em {razao + 1}:')
else:
print(f'CONTAGEM DE {inicio} até {fim} DE {razao} em {razao}:')
sleep(0.6)
contador(inicio, fim, razao)
print('FIM!')
print('=-'*20)
``` |
{
"source": "JoaoSevergnini/verificacao_perfis_metalicos",
"score": 3
} |
#### File: JoaoSevergnini/verificacao_perfis_metalicos/mascara_laminados.py
```python
from math import sqrt, pi
from perfil_de_aco import PerfilDeAço
from material import Material
import pandas as pd
# ==================
# RENOMEAR A CLASSE
# ==================
class NomeDoTipoDePerfil(PerfilDeAço):
"""
Esta classe define um perfil de aço laminado do tipo XXXX.
Parameter
----------
nome: 'str'
nome da seção transversal de acordo com a tabela da IASC
material: 'Material', 'list', 'dict'
material que compõe a seção.
"""
# =================================================================================================
# li = NÚMERO DA LINHA ONDE COMEÇA O TIPO DE SEÇÃO ESPECIFICA DE ACORDO COM A TABELA DA AISC -1
# lf = NÚMERO DA LINHA ONDE TERMINA O TIPO DE SEÇÃO ESPECIFICA DE ACORDO COM A TABELA DA AISC + 1
li = XX
lf = XX
# =================================================================================================
perfis = pd.read_excel('aisc-shapes-database-v15.0.xlsx', 1).iloc[li:lf, 84:]
def __init__(self, nome, material):
perfil = self.perfis[self.perfis['EDI_Std_Nomenclature.1'] == nome]
# ===============================================================
# COLOCAR AQUI AS PROPRIEDADES ESPECIFICAS DO PERFIL A SER CRIADO
# ===============================================================
A = float(perfil['A.1'])
Ix = float(perfil['Ix.1']) * 1E6
Iy = float(perfil['Iy.1']) * 1E6
J = float(perfil['J.1']) * 1E3
Wx = float(perfil['Sx.1']) * 1E3
Wy = float(perfil['Sy.1']) * 1E3
Zx = float(perfil['Zx.1']) * 1E3
Zy = float(perfil['Zy.1']) * 1E3
Cw = float(perfil['Cw.1']) * 1E9
# =========================================================
# INDICAR A POSIÇÃO DO CENTRO DE CORTE (xo, yo) EM RELAÇÃO
# AO CENTRO GEOMÉTRICO DA SEÇÃO
# =========================================================
xo = #FÓRMULA PARA O CALCULO DE XO
yo = #FÓRMULA PARA O CALCULO DE YO
# =======================================
# INDICAR A SIMETRIA DO TIPO DE PERFIL
#
# True = simetrico
# False = Não simetrico
#
# simetria[0] - indica simetria no eixo X
# simetria[1] - indica simetria no eixo Y
# =======================================
simetria = [True, True]
super().__init__(A, Ix, Iy, J, Wx, Wy, Zx, Zy, xo, yo, Cw, material, simetria)
self.esb_alma = #FÓRMULA PARA O CALCULO DA ESBELTEZ DA ALMA DE ACORDO COM AS PROPRIEDADES DO PERFIL
self.esb_mesa = #FÓRMULA PARA O CALCULO DA ESBELTEZ DA MESA DE ACORDO COM AS PROPRIEDADES DO PERFIL
# ====================================
# COLOCAR AQUI OS MÉTODOS PROPERTIES
# ====================================
# -------------------------------------------------------------------------------------
# --------------------------Verificações de resistência--------------------------------
# -------------------------------------------------------------------------------------
# ----------------------------------NBR 8800-------------------------------------------
# COMPRESSÃO
# -----------
def par_esbeltez_limites_AL_Ncrd(self):
# =================================================================================================
# DEVE SER IMPLEMENTADO APENAS NOS PERFIL QUE APRESENTEM ELEMENTOS
# APOIADOS LIVRES ( seções do tipo I, C, L e T )
# CASO NÃO EXISTA ELEMENTOS DESSE TIPO ESSE MÉTODO PODE SER EXCLUÍDO
# coef1 = COEFICIENTE PARA O CÁLCULO DO LIMITE DE PLASTIFICAÇÃO DE ACORDO COM A SEÇÃO F.2 DA NBR8800
# coef2 = COEFICIENTE PARA O CÁLCULO DO LIMITE DE ESCOAMENTO DE ACORDO COM A SEÇÃO F.2 DA NBR8800
# =================================================================================================
return coef1 * self.raiz_E_fy, coef2 * self.raiz_E_fy
def fator_Qs(self):
# =============================================================================
# DEVE SER IMPLEMENTADO APENAS NOS PERFIL QUE APRESENTEM ELEMENTOS
# APOIADOS LIVRES ( seções do tipo I, C, L e T )
# CASO NÃO EXISTA ELEMENTOS DESSE TIPO ESSE MÉTODO DEVE RETORNAR O VALOR 1
# =============================================================================
# elp = esbeltez limite para plastificação
# elr = esbeltez limite para início de escoamento
elp, elr = self.par_esbeltez_limites_AL_Ncrd()
# =============================================================================
# coef3 = COEFICIENTE PARA O CÁLCULO DE QS DE ACORDO COM A SEÇÃO F.2 DA NBR8800
# coef4 = COEFICIENTE PARA O CÁLCULO DE QS DE ACORDO COM A SEÇÃO F.2 DA NBR8800
# coef5 = COEFICIENTE PARA O CÁLCULO DE QS DE ACORDO COM A SEÇÃO F.2 DA NBR8800
# =============================================================================
if elp > self.esb_mesa:
return 1
if elp < self.esb_mesa <= elr:
return coef3 - coef4 * self.esb_alma * sqrt(self.material.fy / self.material.E)
elif self.esb_mesa > elr:
return coef5 * self.material.e / (self.material.fy * self.esb_mesa ** 2)
def fator_Qa(self, frc):
# ========================================================================
# DEVE SER IMPLEMENTADO APENAS NOS PERFIL QUE APRESENTAM ELEMENTOS
# APOIADOS APOIADOS ( seções do tipo I, C, e TUBULARES )
# CASO NÃO EXISTA ELEMENTOS DESSE TIPO ESSE MÉTODO DEVE RETORNAR O VALOR 1
# ========================================================================
tensao = self.material.fy * frc
# =================================
# INDICAR O VALOR DO COEFICIENTE ca
#
# ca = 0.38 - TUBOS RETANGULARES
# ca = 0.34 - OUTROS PERFIS
# =================================
# =====================================
# ADAPTAR O VALOR DE ca ABAIXO CASO NECESSÁRIO
#======================================
ca = 0.34
# ========================================================================
# PARA CADA ELEMENTO APOIADO-APOIADO DA SEÇÃO TRANSVERSAL A LARGURA EFETIVA
# DEVE SER CALCULADO COM A FÓRMULA DA LARGURA EFETIVA bef ABAIXO
#
# bef = 1.92 . t . sqrt(E / tensao) . [ 1 - ca / (b/t) . sqrt( E / tensao)]
#
# SENDO QUE O VALOR NÃO PODE ULTRAPASSAR A LARGURA
# =========================================================================
# =====================================================================================
# ADAPTAR AS EQUAÇÕES ABAIXO PARA ATENDER TODOS OS ELEMENTOS APOIADOS APOIADOS DA SEÇÃO
# E SUBSTITUIR 'b' E 't' PELA PROPRIEDADE DA CLASSE QUE REPRESENTAM A LARGURA E ESPESSURA
# DO ELEMENTO AA CONSIDERADO
# =======================================================================================
bef = 1.92 * 't' * sqrt(self.material.E / tensao) * \
(1 - ca / 'b/t' * sqrt(self.material.E / tensao))
bef = bef if bef < 'b' else 'b'
Aef = self.A - ('b' - bef) * 't'
return Aef / self.A
# CORTANTE
# -----------
@property
def Awx(self):
# ===================================================================================
# RETORNAR O CÁLCULO Awx ESPECÍFICO DA SEÇÃO DE ACORDO COM A SEÇÃO 5.4.3.1 DA NBR8800
# ====================================================================================
return
@property
def Awy(self):
# ===================================================================================
# RETORNAR O CÁLCULO Awy ESPECÍFICO DA SEÇÃO DE ACORDO COM A SEÇÃO 5.4.3.1 DA NBR8800
# ====================================================================================
return #FORMULA DO Aw
# CORTANTE EM X
def kv_Vrdx(self, a=None):
# =================================================================================================
# RETORNAR O CÁLCULO kv ESPECÍFICO EM RELAÇÃO A X DA SEÇÃO DE ACORDO COM A SEÇÃO 5.4.3.1 DA NBR8800
# =================================================================================================
return # VALOR DO kv
# CORTANTE EM Y
def kv_Vrdy(self, a=None):
# =================================================================================================
# RETORNAR O CÁLCULO kv ESPECÍFICO EM RELAÇÃO A Y DA SEÇÃO DE ACORDO COM A SEÇÃO 5.4.3.1 DA NBR8800
# =================================================================================================
return # VALOR DO kv
# MOMENTO EM X
# ------------
# Estado Limite FLT
def par_esbeltez_limite_Mrdx_FLT(self):
# =============================================================================
# DEVE SER IMPLEMENTADO APENAS SE A SEÇÃO APRESENTAR O ESTADO LIMITE FLT QUANDO
# FLETIDO NO EIXO X
#
# CASO NÃO EXISTA ELEMENTOS DESSE TIPO ESSE MÉTODO DEVE SER EXCLUÍDO
# =============================================================================
# ===========================================================================
# OS PARAMETROS DE ESBELTEZ LIMITE DE PLASTIFICAÇÃO (elp) E O PARAMETRO DE
# DE ESBELTEZ LIMITE DE INICIO DE ESCOAMENTO (elr) DEVEM SER IMPLEMENTADOS DE
# ACORDO COM A TABELA G.1 DO ANEXO G DA NBR8800:2008
#============================================================================
# parâmetro de esbeltez limite de plastificação (elp)
elp = #FÓRMULA PARA O CÁLCULO DO PARÂMETRO DE ESBELTEZ
# parâmetro de esbeltez limite de início de escoamento (elr)
elr = #FÓRMULA PARA O CÁLCULO DO PARÂMETRO DE ESBELTEZ
return elp, elr
def Mrx_FLT(self):
# =============================================================================
# DEVE SER IMPLEMENTADO APENAS SE A SEÇÃO APRESENTAR O ESTADO LIMITE FLT QUANDO
# FLETIDO NO EIXO X
#
# CASO NÃO EXISTA ELEMENTOS DESSE TIPO ESSE MÉTODO DEVE SER EXCLUÍDO
# =============================================================================
# ================================================================================
# O MOMENTO FLETOR DE INICIO DE ESCOAMENTO DE ACORDO COM A TABELA G.1 E SEÇÃO G.2
# ANEXO G DA NBR8800:2008, É DADO PELO PRODUTO DA TENSÃO DE ESCOAMENTO (fy),
# REDUZIDO PELA TENSÃO RESIDUAL, E O MÓDULO DE RESISTÊNCIA ELÁSTICO DA SEÇÃO
# Mr = (fy - tensao residual) . W
#
# CASO EXISTA TENSÃO RESIDUAL, ELA É TOMADA COMO SENDO 30% DA TENSÃO DE ESCOAMENTO
# SENDO ASSIM Mr = 0.7 . fy . W
#
# VER TABELA G1 PARA OBTER O CÁLCULO DA TENSÃO RESIDUAL ESPECIFICA DO PERFIL
#=================================================================================
return 0.7 * self.material.fy * self.Wx
def Mcrx_FLT(self, Cb, Lb):
# =============================================================================
# DEVE SER IMPLEMENTADO APENAS SE A SEÇÃO APRESENTAR O ESTADO LIMITE FLT QUANDO
# FLETIDO NO EIXO X
#
# CASO NÃO EXISTA ELEMENTOS DESSE TIPO ESSE MÉTODO DEVE SER EXCLUÍDO
# =============================================================================
# ======================================================================================
# O MOMENTO FLETOR DE FLAMBAGEM ELÁSTICO DE ACORDO COM A TABELA G.1 DO
# ANEXO G DA NBR8800:2008
#
# VER TABELA G1 E SEÇÃO G.2 PARA OBTER O CÁLCULO DA TENSÃO RESIDUAL ESPECÍFICA DO PERFIL
#========================================================================================
Mcr = # FÓRMULA PARA O CÁLCULO DO Mcr VER TABELA G.1 E SEÇÃO G.2 DO ANEXO G DA NBR8800:2008
return Mcr
def Mnx_FLT(self, Cb, Lb):
# =============================================================================
# DEVE SER IMPLEMENTADO APENAS SE A SEÇÃO APRESENTAR O ESTADO LIMITE FLT QUANDO
# FLETIDO NO EIXO X
#
# CASO NÃO EXISTA ELEMENTOS DESSE TIPO ESSE MÉTODO DEVE RETORNAR O MOMENTO DE
# PLASTIFICAÇÃO Mplx
# ==============================================================================
# ==========================================================================
# PARA OS TIPOS DE PERFIS DA TABELA G.1 DO ANEXO G O PROCEDIMENTO DE CÁLCULO
# É REALIZADO DE ACORDO COM O QUE ESTÁ IMPLEMENTADO ABAIXO, PARA OUTROS CASOS
# VER ITENS DA SEÇÃO G.2 DO ANEXO G DA NBR8800:2008
# =============================================================================
esbeltez = self.indice_esbeltez_X(Lb)
elp, elr = self.par_esbeltez_limite_Mrdx_FLT()
if esbeltez < elp:
return self.Mplx
if elp < esbeltez < elr:
return Cb * (self.Mplx - (self.Mplx - self.Mrx_FLT()) * (esbeltez - elp) / (elr - elp))
elif esbeltez > elp:
return self.Mcrx_FLT(Cb, Lb)
# Estado Limite FLM
def par_esbeltez_limite_Mrdx_FLM(self):
return 0.38 * self.raiz_E_fy, 0.83 * sqrt(1 / 0.7) * self.raiz_E_fy
def Mrx_FLM(self):
return 0.7 * self.material.fy * self.Wx
def Mcrx_FLM(self):
return 0.69 * self.material.E * self.Wx / self.esb_mesa ** 2
def Mnx_FLM(self):
elp, elr = self.par_esbeltez_limite_Mrdx_FLM()
if self.esb_mesa < elp:
return self.Mplx
if elp < self.esb_mesa < elr:
return self.Mplx - (self.Mplx - self.Mrx_FLM()) * (self.esb_mesa - elp) / (elr - elp)
elif self.esb_mesa > elr:
return self.Mcrx_FLM()
# Estado Limite FLA
def par_esbeltez_limite_Mrdx_FLA(self):
return 3.76 * self.raiz_E_fy, 5.7 * self.raiz_E_fy
def Mrx_FLA(self):
return self.material.fy * self.Wx
def Mnx_FLA(self):
elp, elr = self.par_esbeltez_limite_Mrdx_FLA()
if self.esb_alma < elp:
return self.Mplx
elif elp < self.esb_alma < elr:
return self.Mplx - (self.Mplx - self.Mrx_FLA()) * (self.esb_alma - elp) / (elr - elp)
# MOMENTO EM Y
# ------------
# Estado Limite FLT
def Mny_FLT(self, Cb=None, Lb=None):
return self.Mply
# Estado Limite FLM
def par_esbeltez_limite_Mrdy_FLM(self):
return 0.38 * self.raiz_E_fy, 0.83 * sqrt(1 / 0.7) * self.raiz_E_fy
def Mry_FLM(self):
return 0.70 * self.material.fy * self.Wy
def Mcry_FLM(self):
return 0.69 * self.material.E / self.esb_mesa ** 2 * self.Wy
def Mny_FLM(self):
elp, elr = self.par_esbeltez_limite_Mrdy_FLM()
if self.esb_mesa < elp:
return self.Mply
elif elp < self.esb_mesa < elr:
return self.Mply - (self.Mply - self.Mry_FLM()) * (self.esb_mesa - elp) / (elr - elp)
elif self.esb_mesa > elr:
return self.Mcry_FLM
# Estado Limite FLA
def Mny_FLA(self):
return self.Mply
``` |
{
"source": "joaosferreira/udiff",
"score": 2
} |
#### File: src/tests/test_builtin_diffs.py
```python
import uarray as ua
import unumpy as np
import numpy as onp
import torch
import dask.array as da
import udiff
import sparse
from math import *
from random import uniform
import unumpy.numpy_backend as NumpyBackend
import unumpy.torch_backend as TorchBackend
import unumpy.dask_backend as DaskBackend
import unumpy.sparse_backend as SparseBackend
from numpy.testing import *
import pytest
ua.set_global_backend(NumpyBackend)
LIST_BACKENDS = [
NumpyBackend,
# DaskBackend,
# SparseBackend,
pytest.param(
TorchBackend,
marks=pytest.mark.xfail(reason="PyTorch not fully NumPy compatible."),
),
]
FULLY_TESTED_BACKENDS = [NumpyBackend, DaskBackend]
try:
import unumpy.xnd_backend as XndBackend
import xnd
from ndtypes import ndt
LIST_BACKENDS.append(XndBackend)
FULLY_TESTED_BACKENDS.append(XndBackend)
except ImportError:
XndBackend = None # type: ignore
LIST_BACKENDS.append(
pytest.param(
None, marks=pytest.mark.skip(reason="xnd is not importable")
)
)
try:
import unumpy.cupy_backend as CupyBackend
import cupy as cp
LIST_BACKENDS.append(pytest.param(CupyBackend))
except ImportError:
LIST_BACKENDS.append(
pytest.param(
(None, None), marks=pytest.mark.skip(reason="cupy is not importable")
)
)
EXCEPTIONS = {
(DaskBackend, np.in1d),
(DaskBackend, np.intersect1d),
(DaskBackend, np.setdiff1d),
(DaskBackend, np.setxor1d),
(DaskBackend, np.union1d),
(DaskBackend, np.sort),
(DaskBackend, np.argsort),
(DaskBackend, np.lexsort),
(DaskBackend, np.partition),
(DaskBackend, np.argpartition),
(DaskBackend, np.sort_complex),
(DaskBackend, np.msort),
(DaskBackend, np.searchsorted),
}
@pytest.fixture(scope="session", params=LIST_BACKENDS)
def backend(request):
backend = request.param
return backend
def generate_test_data(n_elements=10, a=None, b=None):
if a is None:
a = -10
if b is None:
b = 10
x_arr = [uniform(a + 1e-3, b - 1e-3) for i in range(n_elements)]
return x_arr
@pytest.mark.parametrize(
"method, y_d, domain",
[
(np.positive, lambda x: 1, None),
(np.negative, lambda x: -1, None),
(np.exp, lambda x: pow(e, x), None),
(np.exp2, lambda x: pow(2, x) * log(2), None),
(np.log, lambda x: 1 / x, (0, None)),
(np.log2, lambda x: 1 / (x * log(2)), (0, None)),
(np.log10, lambda x: 1 / (x * log(10)), (0, None)),
(np.sqrt, lambda x: 0.5 * pow(x, -0.5), (0, None)),
(np.square, lambda x: 2 * x, None),
(np.cbrt, lambda x: 1 / 3 * pow(x, -2 / 3), (0, None)), # Negative numbers cannot be raised to a fractional power
(np.reciprocal, lambda x: -1 / pow(x, 2), (None, 0)),
(np.sin, lambda x: cos(x), None),
(np.cos, lambda x: -sin(x), None),
(np.tan, lambda x: 1 / cos(x) ** 2, None),
(np.arcsin, lambda x: 1 / sqrt(1 - x ** 2), (-1, 1)),
(np.arccos, lambda x: -1 / sqrt(1 - x ** 2), (-1, 1)),
(np.arctan, lambda x: 1 / (1 + x ** 2), None),
(np.sinh, lambda x: cosh(x), None),
(np.cosh, lambda x: sinh(x), (1, None)),
(np.tanh, lambda x: 1 / cosh(x) ** 2, (-1, 1)),
(np.arcsinh, lambda x: 1 / sqrt(1 + x ** 2), None),
(np.arccosh, lambda x: 1 / sqrt(-1 + x ** 2), (1, None)),
(np.arctanh, lambda x: 1 / (1 - x ** 2), (-1, 1))
],
)
def test_unary_function(backend, method, y_d, domain):
if domain is None:
x_arr = generate_test_data()
else:
x_arr = generate_test_data(a=domain[0], b=domain[1])
y_d_arr = [y_d(xa) for xa in x_arr]
try:
with ua.set_backend(backend), ua.set_backend(udiff, coerce=True):
x = np.asarray(x_arr)
x.var = udiff.Variable('x')
ret = method(x)
except ua.BackendNotImplementedError:
if backend in FULLY_TESTED_BACKENDS:
raise
pytest.xfail(reason="The backend has no implementation for this ufunc.")
if isinstance(ret, da.Array):
ret.compute()
assert_allclose(ret.diffs[x].arr, y_d_arr)
@pytest.mark.parametrize(
"func, y_d, domain",
[
(lambda x: (2 * x + 1) ** 3, lambda x: 6 * (2 * x + 1) ** 2, (0.5, None)),
(lambda x: np.sin(x ** 2) / (np.sin(x)) ** 2, lambda x: (2 * x * np.cos(x ** 2) * np.sin(x) - 2 * np.sin(x ** 2) * np.cos(x)) / (np.sin(x)) ** 3, (0, pi)),
(lambda x: (np.log(x ** 2)) ** (1 / 3), lambda x: 2 * (np.log(x ** 2)) ** (-2/3) / (3 * x), (1, None)),
(lambda x: np.log((1 + x) / (1 - x)) / 4 - np.arctan(x) / 2, lambda x: x ** 2 / (1 - x ** 4), (-1, 1)),
(lambda x: np.arctanh(3 * x ** 3 + x ** 2 + 1), lambda x: (9 * x ** 2 + 2 * x) / (1 - (3 * x ** 3 + x ** 2 + 1) ** 2), (0, None)),
(lambda x: np.sinh(np.cbrt(x)) + np.cosh(4 * x ** 3) , lambda x: np.cosh(np.cbrt(x)) / (3 * x ** (2/3)) + 12 * (x ** 2) * np.sinh(4 * x ** 3), (1/4, None)),
(lambda x: np.log(1 + x ** 2) / np.arctanh(x), lambda x: ((2 * x * np.arctanh(x) / (1 + x ** 2)) - (np.log(1 + x ** 2) / (1 - x ** 2))) / (np.arctanh(x)) ** 2, (0, 1))
],
)
def test_arbitrary_function(backend, func, y_d, domain):
if domain is None:
x_arr = generate_test_data()
else:
x_arr = generate_test_data(a=domain[0], b=domain[1])
try:
with ua.set_backend(backend), ua.set_backend(udiff, coerce=True):
x = np.asarray(x_arr)
x.var = udiff.Variable('x')
ret = func(x)
y_d_arr = y_d(x)
except ua.BackendNotImplementedError:
if backend in FULLY_TESTED_BACKENDS:
raise
pytest.xfail(reason="The backend has no implementation for this ufunc.")
if isinstance(ret, da.Array):
ret.compute()
assert_allclose(ret.diffs[x].arr, y_d_arr.arr)
```
#### File: src/udiff/_uarray_plug.py
```python
from uarray import wrap_single_convertor
from unumpy import ufunc, ndarray
import unumpy
import functools
import unumpy as np
import uarray as ua
from . import _builtin_diffs
from ._diff_array import DiffArray
from typing import Dict
_ufunc_mapping: Dict[ufunc, np.ufunc] = {}
__ua_domain__ = "numpy"
_implementations: Dict = {
unumpy.arange: lambda start, stop=None, step=None, **kw: da.arange(
start, stop, step, **kw
),
unumpy.asarray: DiffArray,
}
def __ua_function__(func, args, kwargs, tree=None):
from udiff import SKIP_SELF
from ._func_diff_registry import global_registry
extracted_args = func.arg_extractor(*args, **kwargs)
arr_args = tuple(x.value for x in extracted_args if x.type is np.ndarray)
input_args = tuple(
x.value for x in extracted_args if x.coercible and x.type is np.ndarray
)
if tree is None:
tree = compute_diff_tree(*input_args)
with SKIP_SELF:
if len(arr_args) == 0:
out = func(*args, **kwargs)
return DiffArray(out)
a, kw = replace_arrays(
func, args, kwargs, (x.arr if x is not None else None for x in arr_args)
)
out_arr = func(*a, **kw)
out = DiffArray(out_arr)
for k in tree:
diff_args = []
for arr in arr_args:
if arr is None:
diff_args.append(None)
continue
if k in arr.diffs:
diff_args.append((arr, arr.diffs[k]))
else:
diff_args.append((arr, np.broadcast_to(0, arr.shape)))
a, kw = replace_arrays(func, args, kwargs, diff_args)
with ua.set_backend(NoRecurseBackend(tree[k])):
if func is np.ufunc.__call__:
diff_arr = global_registry[a[0]](*a[1:], **kw)
else:
diff_arr = global_registry[func](*a, **kw)
out.diffs[k] = diff_arr
return out
def compute_diff_tree(*arrs, diffs=None):
if diffs is None:
diffs = {}
for arr in arrs:
for var, diff in arr.diffs.items():
diffs[var] = compute_diff_tree(diff, diffs=diffs.get(var, {}))
return diffs
def replace_arrays(func, a, kw, arrays):
d = tuple(func.arg_extractor(*a, **kw))
arrays = tuple(arrays)
new_d = []
j = 0
for i in d:
if i.type is np.ndarray:
new_d.append(arrays[j])
j += 1
else:
new_d.append(i.value)
return func.arg_replacer(a, kw, tuple(new_d))
@wrap_single_convertor
def __ua_convert__(value, dispatch_type, coerce):
if dispatch_type is np.ndarray:
if value is None:
return value
if isinstance(value, DiffArray):
return value
if coerce:
import udiff
with udiff.SKIP_SELF:
return DiffArray(np.asarray(value))
return NotImplemented
return value
def replace_self(func):
@functools.wraps(func)
def inner(self, *args, **kwargs):
if self not in _ufunc_mapping:
return NotImplemented
return func(_ufunc_mapping[self], *args, **kwargs)
return inner
class NoRecurseBackend:
def __init__(self, tree=None):
self._tree = tree
__ua_domain__ = __ua_domain__
__ua_convert__ = staticmethod(__ua_convert__)
def __ua_function__(self, f, a, kw):
return __ua_function__(f, a, kw, tree=self._tree)
def __eq__(self, other):
import udiff
return isinstance(other, NoRecurseBackend) or other is udiff
``` |
{
"source": "joaosoaresa/Analise-de-Sentimentos-com-Twitter",
"score": 3
} |
#### File: joaosoaresa/Analise-de-Sentimentos-com-Twitter/Twitter.py
```python
from tweepy import *
from pandas import DataFrame
from textblob import TextBlob
from matplotlib.pyplot import *
import numpy as np
def twitter():
auth = OAuthHandler('RIvAic3yDT8qxHKT4NymRQ275', '<KEY>')
auth.set_access_token('<KEY>', '<KEY>')
'''
try:
usuario = input("Informe o usuario: ")
except TweepError:
print(TweepError.message[0]['code'])
'''
op = 'n'
usuario = input("Informe o usuario: ")
api = API(auth)
user = api.get_user(usuario)
def usuario_novo(): #Funçao para novo usuario
usuario = input("Informe o usuario: ")
user = api.get_user(usuario)
print("Nome do usuario: ", user.name, "\n")
return usuario
def infor(user): #Informações do usuario atual
print("Seguindo: ", user.friends_count) # Quantas pessoas ele/a segue
print("Seguidores: ", user.followers_count) # Quantas pessoas seguem ele/a
print("Localização do usuario: ", user.location) # Local do perfil
print("Linguagem do usuario:", user.lang) # linguagem do usuario
print('foto do usuario:', user.profile_image_url) # foto de perfil
print("\n")
op = input("Finalizar? (s) ou (n) \n")
return op
def amigos(user): #Informa lista de amigos do usuario e suas respectivas informaçoes
for friend in user.followers(): #lista de amigos do usuario
print("nome de usuario: ",friend.screen_name, "\n nome no perfil: ",friend.name) #amigo do usuario
print("Seguidores: ",friend.followers_count) #Numero de seguidores do amigo
print("Seguindo: ", friend.friends_count) # Quantas pessoas ele/a segue
op = input("Finalizar? (s) ou (n) \n")
return op
def publica(user): #Informa as publicações do usuario
sentimentos = []
tweets_publicos = api.user_timeline(user_id = user.id, count=10, page=1)# Publicações da timeline publica do usuario inserido
for tweet in tweets_publicos:
analysis = TextBlob(tweet.text)
sentimentos.append(analysis.sentiment.polarity)
if analysis.detect_language() != 'en':
traducao = TextBlob(str(analysis.translate(to='en')))
print('Tweet: {0} - Sentimento: {1}'.format(tweet.text, traducao.sentiment.polarity))
else:
print('Tweet: {0} - Sentimento: {1}'.format(tweet.text, analysis.sentiment.polarity))
sentimentalismo(sentimentos)
op = input("Finalizar? (s) ou (n) \n")
return op
def pesquica(): #Realiza pesquisa sobre o tema referente e mostra usuarios
pesquica_input = input("pesquisar sobre : ")
resultados = []
usuarios = []
sentimentos = []
for tweet in Cursor(api.search, q=pesquica_input).items(10):
#print(tweet.text, '\n')
analysis = TextBlob(tweet.text)
resultados.append(tweet)
usuarios.append(tweet.author.name)
sentimentos.append(analysis.sentiment.polarity)
def process_results(resultados):
id_list = [tweet.id for tweet in resultados]
lista = DataFrame(id_list, columns=["id"])
# lista["ID"] = [tweet.author.id for tweet in results]
# lista["descrição"] = [tweet.author.description for tweet in resultados]
lista["Usuario"] = [tweet.author.name for tweet in resultados]
#lista["seguindo"] = [tweet.author.followers_count for tweet in resultados]
#lista["seguidores"] = [tweet.author.friends_count for tweet in resultados]
lista["texto"] = [tweet.text for tweet in resultados]
lista["localização"] = [tweet.author.location for tweet in resultados]
#lista["Sentimentos"] = analysis.sentiment.polarity
return lista
lista = process_results(resultados)
print(lista.head())
sentimentalismo(sentimentos)
op = input("Finalizar? (s) ou (n) \n")
return op
def sentimentalismo(sentimentos):
hist(sentimentos, align='mid')
xlabel('Sentimentos')
ylabel('Incidência')
title('Histograma de Sentimentos')
axis([0, 1, 0, 10])
show()
print("Nome do usuario: ", user.name,"\n") #Imprime o nome do usuario
while op == 'n' or op == 'N':
selec = input("O que deseja fazer? \n ver publicações (publica) \n ver amigos (amigos) \n ver informações (infor) \n Pesquisar sobre tema (pesquisa) \n Novo usuario (user) \n Finalizar (s) \n")
if selec == 'publica': #Se selecionada a opçao de Publicações do usuario atual
op = publica(user)
elif selec == 'amigos':#Se selecionada a opçao de Amigos do usuario atual
op = amigos(user)
elif selec == 'infor':#Se selecionada a opçao de Informações do usuario atual
op = infor(user)
elif selec == 'user': #Usado para alterar o usuario atual
usuario = usuario_novo()
user = api.get_user(usuario)
elif selec == "pesquisa": #Se selecionada a opção realizar pesquisa
op = pesquica()
elif selec == 's' or op == 's':#Se selecionada a opção Finalizar
break
else:
print("Opção Invalida \n")
continue
return 0
rede = input("Confirme que você não é um robô: \n (twitter)")
if rede == 'twitter':
twitter()
else:
print("rede invalida ou inexistente")
``` |
{
"source": "joaosoares/xray_teeth_detection",
"score": 3
} |
#### File: xray_teeth_detection/src/data_preprocessing.py
```python
from typing import Callable, Dict, List, Tuple, Type, Union, Text
import cv2
import numpy as np
from matplotlib import pyplot as plt
from functools import reduce
Image = Type[np.ndarray]
FnWithArgs = Tuple[Callable[..., Image], Dict[Text, int]]
FnWithoutArgs = Tuple[Callable[[Image], Image]]
FunctionList = List[Union[FnWithArgs, FnWithoutArgs]]
class Preprocessor:
@staticmethod
def apply(pipeline: FunctionList, images: Union[Image, List[Image]]) -> List[Image]:
"""Applies a preprocessing function to a list of images"""
if isinstance(images, np.ndarray):
images = [images]
def apply_fn(obj, fun):
if fun[1]:
return fun[0](obj, **fun[1])
return fun[0](obj)
return [reduce(apply_fn, pipeline, image) for image in images]
@staticmethod
def bilateral(image, diameter=9, sigma_color=150, sigma_space=150, times=1):
filtered = image
for _ in range(times):
filtered = cv2.bilateralFilter(
image, d=diameter, sigmaColor=sigma_color, sigmaSpace=sigma_space
)
return filtered
@classmethod
def median_filter(cls, img, ksize, times):
filtered = img
for i in range(times):
filtered = cv2.medianBlur(img, ksize=ksize)
return filtered
@classmethod
def errosion(cls, img, ksize):
return cv2.erode(img, kernel=ksize)
@classmethod
def dilatation(cls, img, ksize):
return cv2.dilate(img, kernel=ksize)
@classmethod
def top_hat_processing(cls, img, ksize):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, ksize=(ksize, ksize))
return cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel=kernel)
@classmethod
def laplacian(cls, img):
return cv2.Laplacian(img, ddepth=cv2.CV_64F)
@classmethod
def show_image(cls, img):
plt.imshow(img, cmap="gray")
plt.show()
@staticmethod
def sobel(img, scale=1, delta=0):
ddepth = cv2.CV_16S
grad_x = cv2.Sobel(img, ddepth, 1, 0, ksize=3, scale=scale, delta=delta)
grad_y = cv2.Sobel(img, ddepth, 0, 1, ksize=3, scale=scale, delta=delta)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
return cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
@classmethod
def apply_scharr(cls, img, scale, delta):
ddepth = cv2.CV_16S
grad_x = cv2.Scharr(
img, ddepth, 1, 0, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT
)
grad_y = cv2.Scharr(
img, ddepth, 0, 1, scale=scale, delta=delta, borderType=cv2.BORDER_DEFAULT
)
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
return cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
if __name__ == "__main__":
img = cv2.imread("../data/Radiographs/01.tif", flags=cv2.IMREAD_GRAYSCALE)
img = Preprocessor.bilateral_filter(
img, diameter=9, sigma_color=150, sigma_space=150, times=1
)
# img = Preprocessor.median_filter(img, ksize=5, times=5)
# img = Preprocessor.top_hat_processing(img, ksize=150)
img = Preprocessor.apply_sobel(img, scale=1, delta=0)
Preprocessor.show_image(img)
```
#### File: xray_teeth_detection/src/evaluator.py
```python
from typing import Text, Type, Union, Dict, List, cast
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import mean_squared_error
from auto_initializator import AutoInitializator
from image_shape import ImageShape
from manual_initializator import ManualInitializator
from shapeutils import plot_image_shape
from active_shape_model import ActiveShapeModel
from incisors import Incisors
ImageShapesDict = Dict[Incisors, List[ImageShape]]
class Evaluator:
def __init__(
self,
initial: ImageShapesDict,
expected: ImageShapesDict,
printable: ImageShapesDict = None,
) -> None:
self.initial = initial
self.expected = expected
if printable is None:
self.printable = expected
else:
self.printable = printable
self.actual: ImageShapesDict = {}
def quantitative_eval(self):
"""Performs leave-one-out evaluation"""
root_mean_squared_errors = {}
for incisor in Incisors:
initial = self.expected[incisor]
expected = self.expected[incisor]
printable = self.printable[incisor]
self.actual[incisor] = []
root_mean_squared_errors[incisor] = []
print(incisor)
for i, (initial_imgshp, expected_imgshp, printable_imgshp) in enumerate(
zip(initial, expected, printable)
):
# Create array with all other expected image shapes
other_image_shapes = [
imgshp for imgshp in expected if (imgshp is not expected_imgshp)
]
# Find the ASM
asm = ActiveShapeModel.from_image_shapes(other_image_shapes)
# Apply found ASM of initial imageshape
actual_image_shape = asm.fit_to_image(initial_imgshp)
self.actual[incisor].append(actual_image_shape)
# Calculate mean_squared error
rmse = self.root_mean_squared_error(actual_image_shape, expected_imgshp)
print(rmse)
root_mean_squared_errors[incisor].append(rmse)
# Save image
self.save_image_shape(
ImageShape(printable_imgshp.image, actual_image_shape.shape),
f"./actual-{incisor}-{i}",
)
def root_mean_squared_error(
self, actual: ImageShape, expected: ImageShape
) -> float:
"""Calculates mean squarred error for an image shape."""
return np.sqrt(mean_squared_error(actual.shape.points, expected.shape.points))
def qualitative_eval(self):
"""Saves all image shapes so they can be compared. Must be ran after
quantitative eval"""
for incisor in Incisors:
for i, imgshp in enumerate(self.actual):
filename = f"./actual-{i}-{incisor}"
self.save_image_shape(imgshp, filename)
def save_image_shape(self, image_shape: ImageShape, filename: Text) -> None:
"""Saves an image shape with a given filename"""
plot_image_shape(
image_shape, display=False, dots=False, interpol=False, vecs=False
)
plt.savefig(filename + ".png", dpi=600)
plt.clf()
```
#### File: xray_teeth_detection/src/incisors.py
```python
from enum import Enum
from active_shape_model import ActiveShapeModel
from image_shape import ImageShape
from shape import Shape
class Incisors(Enum):
UPPER_OUTER_LEFT = 1
UPPER_INNER_LEFT = 2
UPPER_INNER_RIGHT = 3
UPPER_OUTER_RIGHT = 4
LOWER_OUTER_LEFT = 5
LOWER_INNER_LEFT = 6
LOWER_INNER_RIGHT = 7
LOWER_OUTER_RIGHT = 8
@classmethod
def active_shape_models(cls, images, incisors=None):
"""
Computes the ActiveShapeModels for each incisor. Returns two dicts,
one with active shape models and the other with the corresponding
image_shapes used for each incisor.
"""
if incisors is None:
incisors = cls
active_shape_models = {}
all_image_shapes = {}
for incisor in incisors:
image_shapes = [
ImageShape(image, Shape.from_file(i, incisor.value))
for i, image in enumerate(images, 1)
]
all_image_shapes[incisor] = image_shapes
active_shape_models[incisor] = ActiveShapeModel.from_image_shapes(
image_shapes
)
return active_shape_models, all_image_shapes
```
#### File: xray_teeth_detection/src/point.py
```python
from typing import NamedTuple
import numpy as np
# Represents a point of the format (x, y), refering to the index
# of an np.ndarray
class Point(NamedTuple):
"""Represents a point with x and y coords"""
x: int
y: int
def add_noise(self):
"""Returns a new point with random added noise"""
p_with_noise = Point(
int(self.x + np.random.normal(scale=5.0)),
int(self.y + np.random.normal(scale=5.0)),
)
return p_with_noise
def round(self):
"""Returns a new point with the closest integer coordinates"""
rounded_p = Point(int(round(self.x)), int(round(self.y)))
return rounded_p
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Point(self.x - other.x, self.y - other.y)
def __truediv__(self, other):
if isinstance(other, Point):
return Point(self.x / other.x, self.y / other.y)
return Point(self.x / other, self.y / other)
``` |
{
"source": "joaosousafranco/microdc-init",
"score": 3
} |
#### File: microdc-init/microdc/parse_arguments.py
```python
import argparse
import os
import sys
from datetime import datetime
def check_date_file(datefile):
try:
with open(datefile, 'r') as file:
date = file.readline().strip()
file.close()
datetime.strptime(date, '%d %b %Y %H:%M')
return True
except ValueError:
raise ValueError("Date in {} is wrong".format(datefile))
except FileNotFoundError:
return False
def parse_args(argv):
"""
Parse all command line arguments and return a dict
"""
parser = argparse.ArgumentParser(description='''Generate shell commands from
a yaml config file to setup MicroDC.''')
required = parser.add_argument_group('required arguments')
required.add_argument('--config',
metavar='CONFIG',
required=2,
help='The location of the YAML config file')
parser.add_argument('--stack',
metavar='STACK',
help='Which stack to apply')
parser.add_argument('--tool',
metavar='TOOL',
help='Which tool to use')
parser.add_argument('--account',
metavar='ACCOUNT',
default='nonprod',
help='Set the AWS account to use')
parser.add_argument('--bootstrap',
action='store_true',
help='Run bootsrap steps')
parser.add_argument('--env',
metavar='ENV',
default='None',
help='Which environment to modify')
parser.add_argument('--datefile',
metavar='DATEFILE',
default='.datefile',
help='The datefile is used to check fi the setup process has been run.\
Defaults to .datefile in the workdir.')
parser.add_argument('--setup',
action='store_true',
help='Download MicroDC components')
parser.add_argument('--overwrite',
action='store_true',
help='Overwrite the various MicroDC component repos when using --setup')
parser.add_argument('--workdir',
metavar='WORKDIR',
help='MicroDC working folder eg. /home/user/.microdc')
parser.add_argument('action',
metavar='ACTION',
help='Action to perform: up or dowm')
arguments = parser.parse_args(argv)
if not check_date_file("{}/{}".format(arguments.workdir, arguments.datefile)):
if not arguments.setup:
parser.print_help()
print("\nERR please run with --setup\n")
sys.exit(2)
return arguments
``` |
{
"source": "joaoteixeira88/pyguard",
"score": 2
} |
#### File: pyguard/exception/base_exception.py
```python
class BaseGuardException(Exception):
def __init__(self, message=None):
self.message = message
def __str__(self):
return f'{self.__class__.__name__}, {self.message}'
```
#### File: pyguard/guard/guard_iterable.py
```python
from collections.abc import Iterable
from string import Template
from constants.templates import Templates
from exception.argument_empty_exception import ArgumentEmptyException
from exception.argument_exception import ArgumentException
from guard.configurations import GenericParameterName
def not_any(param: Iterable, param_name: str = None, message=None) -> None:
"""
Guards the specified :param param from containing no elements by throwing an exception of type
ArgumentEmptyException with a specific :param message when the precondition has not been met
:param param: The param to be checked
:param param_name: The name of the param to be checked, that will be included in the exception
:param message: The message that will be included in the exception
"""
if not param_name:
param_name = GenericParameterName
if not message:
message = Template(template=Templates.NotAnyMessage).substitute(var=param_name)
if not param or len(param) == 0:
raise ArgumentEmptyException(message=message)
def min_count(param: Iterable, threshold: int, param_name: str = None, message=None) -> None:
"""
Guards the specified :param param from containing less elements than :param threshold by throwing
an exception of type ArgumentException with a specific :param message when the precondition has not been met
:param param: The param to be checked
:param threshold: The threshold against which the param will be checked
:param param_name: The name of the param to be checked, that will be included in the exception
:param message: The message that will be included in the exception
"""
if not param_name:
param_name = GenericParameterName
if not message:
message = Template(template=Templates.MinCountMessage).substitute(var=param_name, value=threshold)
if len(param) < threshold:
raise ArgumentException(message=message)
def contains_duplicated(param: Iterable, message=None):
"""
Guards the specified :param param from having duplicated values by throwing
an exception of type ArgumentException with a specific :param message when the precondition has not been met
:param param: The param to be checked
:param message: The message that will be included in the exception
"""
if not message:
message = Template(template=Templates.ContainDuplicatedMessage).substitute()
if len(param) != len(set(param)):
raise ArgumentException(message=message)
```
#### File: pyguard/tests/test_guard_numeric.py
```python
import pytest
from exception.argument_out_of_range_exception import ArgumentOutOfRangeException
from guard import Guard
@pytest.mark.parametrize(
"param, value, param_name, message, expected",
[
(2, 1, None, "param cannot be greater than 1.", pytest.raises(ArgumentOutOfRangeException)),
(
5.43345, 3.3434, "test", "test cannot be greater than 3.3434.",
pytest.raises(ArgumentOutOfRangeException)),
]
)
def test_NotGreaterThan_GreaterThanThreshsold_RaisedArgumentOutOfRangeException(param, value, param_name,
message, expected):
with expected:
Guard.not_greater_than(param=param, threshold=value, param_name=param_name, message=message)
@pytest.mark.parametrize(
"param, value",
[
(1, 2),
(3.43345, 5.3434)
]
)
def test_NotGreaterThan_LowerThanThreshold_RaisedArgumentOutOfRangeException(param, value):
Guard.not_greater_than(param=param, threshold=value)
@pytest.mark.parametrize(
"param, value, param_name, message, expected",
[
(1, 2, None, "parameter cannot be less than 2.", pytest.raises(ArgumentOutOfRangeException)),
(
3.43345, 5.3434, "test", "test cannot be less than 5.3434.",
pytest.raises(ArgumentOutOfRangeException)),
]
)
def test_NotLessThan_LessThanThreshold_RaisedArgumentOutOfRangeException(param, value, param_name,
message, expected):
with expected as err:
Guard.not_less_than(param=param, thershold=value, param_name=param_name)
assert message in str(err.value)
@pytest.mark.parametrize(
"param, value",
[
(2, 1),
(5.43345, 3.3434)
]
)
def test_NotLessThan_GreaterThanThreshold_RaisedArgumentOutOfRangeException(param, value):
Guard.not_less_than(param=param, thershold=value)
``` |
{
"source": "Joao-Tiago-Almeida/NLG-model",
"score": 2
} |
#### File: NLG-model/offline model/DefaultText.py
```python
import os
import glob
import pandas
import re
import numpy as np
from sklearn import model_selection
from sklearn import linear_model
from sklearn import svm
from sklearn.metrics import explained_variance_score
import matplotlib.pyplot as plt
import seaborn as sns
import json
from NER import detect_ner_classes
#from Server import nlp # not used
from googletrans import Translator
from NLGML import write_text
import pickle
import datetime
from statistics import mean
import time
report = {}
'''
Outros tableaus:
https://public.tableau.com/views/DashboardExample_14/DashboardExample?:embed=y&:display_count=yes&:showTabs=y&:showVizHome=no
https://public.tableau.com/views/money1_13/CashInstruments?%3Aembed=y&%3AshowVizHome=no&%3Adisplay_count=y&%3Adisplay_static_image=y&%3AbootstrapWhenNotified=true
https://public.tableau.com/views/money_0/Growth?%3Aembed=y&%3AshowVizHome=no&%3Adisplay_count=y&%3Adisplay_static_image=y&%3AbootstrapWhenNotified=true
https://public.tableau.com/views/RegisteredVehiclesOpenDataProject/BrandBenchmark?:showVizHome=n&:embed=t
https://public.tableau.com/views/ThePulpFictionConnection/PulpFictionConnection?:showVizHome=n&:embed=t
https://public.tableau.com/views/CashlessSociety/CashlessSociety?:showVizHome=n&:embed=t
https://public.tableau.com/views/EuropeanParliamentElection2019/Dashboard1?:showVizHome=n&:embed=t
https://public.tableau.com/shared/2YKXPSN27?:showVizHome=n&:embed=t
https://public.tableau.com/en-us/gallery/costs-using-car?tab=viz-of-the-day&type=viz-of-the-day
https://public.tableau.com/views/Womensrepresentationinpoliticsvizforsocialgood/WomeninPolitics?:showVizHome=n&:embed=t
https://public.tableau.com/views/TopLinkedInSkillsfor20142015and2016/LinkedInDashboard?:showVizHome=n&:embed=t
https://public.tableau.com/views/BigBookofLineCharts/BBLC1?:showVizHome=n&:embed=t
https://public.tableau.com/views/TheMeatMap/meat-dash?:showVizHome=n&:embed=t
https://public.tableau.com/views/Banksy/Home?:showVizHome=n&:embed=t
https://public.tableau.com/views/500womenscientistsdesktop/Dashboard1?:showVizHome=n&:embed=t
https://public.tableau.com/views/2018W27NewYorkRatSightings/NewYorkRatSightings?:showVizHome=n&:embed=t 5/Titanic?:showVizHome=n&:embed=t"
'''
#Global variable that has the NER classes.
ner_classes = ["time","entity","location","litter"]
'''Returns to the report data the number of columns and rows of the dataset, or sub datasets.'''
def basic_char(dataset):
tmp = {}
tmp["n_rows"] = dataset.shape[0]
tmp["n_col"] = dataset.shape[1]
return tmp
'''-------------------------------------
Drops columns with standard deviation 0
-------------------------------------'''
def drop_col_no_std(dataset):
corr_dataset = dataset
for h in dataset.columns.values.tolist():
if dataset[h].dtype.kind == "i" or dataset[h].dtype.kind == "f":
if dataset.std()[h] == 0:
corr_dataset = corr_dataset.drop(columns=h)
# Drop columns with no numeric values
else:
corr_dataset = corr_dataset.drop(columns=h)
return corr_dataset
'''-------------------------------------------------------------------------------------------------------------
The flag represents two cases. The first one where no supergroup is detected and so the value for this variable
will be None. The second one, where the flag will hold a string, for example "Domain" or "Water"
that represents the subdataframe (Supergroup value).
------------------------------------------------------------------------------------------------------------"'''
def calc_corr(corr,flag:bool):
#init
correlations = set()
#it only makes sense to calculate a correlation if there are more than 2 numeric columns.
if(len(corr.columns) > 1):
#There is no supergroup.
# correlation heatmap
try:
annot = len(report['sub_dfs'].keys()) <= 10 # avoid overlap of values
plt.clf()
ax = sns.heatmap(
corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
annot=annot,
square=True,
cmap='viridis',
mask = np.eye(corr.columns.size, dtype=bool),
vmin=-1,
vmax=1,
cbar_kws={'label': "Correlation's Colour Scale"})
name = f"hmx-{time.strftime('%e-%b-%Y_%H:%M:%S')}.png"
ax.figure.savefig(os.path.join('images', name), bbox_inches='tight')
if report["tipo"] == "json":
report['hmx'] = f"<p><style>img {{max-width: 100%;height: auto;}}</style><img src='{os.getcwd()}/images/{name}'></p>"
else:
report['hmx'] = f"<p><style>img {{max-width: 100%;height: auto;}}</style><img src='http://localhost:5000/images/{name}'></p>"
except:
report['hmx'] = ""
for h in corr.columns.values.tolist():
corr_column = corr.drop(h)
#For each column gets the greatest modular correlation
best_value = corr_column.apply(lambda x: max(x.min(), x.max(), key=abs))[h]
row_id = corr_column[h].idxmin() if best_value < 0 else corr_column[h].idxmax()
# If correlation is strongly negative or positive
if best_value >= 0.75 or best_value <= -0.75:
correlations.add(frozenset([h,row_id, best_value]))
correlations = [list(x) for x in correlations]
#Remember, if the flag is != None, that means it is a value of the supergroup.
if(flag == None):
report["corr"] = correlations
else:
if(correlations != []):
report["corr"][flag] = correlations
return correlations
'''--------------------------------------------------------------------
Returns correlations for a given excel. It treats two different cases.
When a supergroup is or not detected.
---------------------------------------------------------------------'''
def excel_correlation(dataset,report_data,dataset_dic):
#Supergroup exists.
if(report_data["super_group"] != None):
res = []
#Iterates each dataframe(There is a dataframe for each Supergroup value)
for key in dataset_dic:
dataset = dataset_dic[key]
res.append(pre_correlation(dataset,report_data,key))
return res
else: #Supergroup does not exist
return pre_correlation(dataset,report_data)
'''----------------------------------------------------------------------
Adds to report the tableau correlations. Has in consideration the two cases:
Where there is, isn´t a supergroup.
-----------------------------------------------------------------------'''
def tableau_correlation(dataset,report_data,dataset_dic:dict):
sg = report_data["super_group"]
#Caso nao existam supergrupos no tableau.
if(sg == None):
pre_correlation(dataset,report_data)
else:
#Stores the correlations.
# Analysis only the column with the most changes, because it is the most likely to have notorious correlations and it keeps the program quick
col_name = report_data['global_analysis']['std_max']['category']
#Iterates Supergroup values.
tmp = pandas.DataFrame()
for key in dataset_dic.keys():
tmp[key] = dataset_dic[key][col_name].reset_index(drop=True)
calc_corr(tmp.corr(),None)
'''Prepares the dataframe before extracting correlations.'''
def pre_correlation(dataset,report_data,key = None):
corr_dataset = drop_col_no_std(dataset)
#Removes NER columns from the correlation.
for classe in ner_classes:
if report_data[classe] != None:
if report_data[classe] in list(corr_dataset.columns.values):
corr_dataset = corr_dataset.drop([report_data[classe]],axis = 1)
calc_corr(corr_dataset.corr(),key)
def common_member(a, b):
a_set = set(a)
b_set = set(b)
# check length
if len(a_set.intersection(b_set)) > 0:
return(a_set.intersection(b_set))
else:
return []
def drop_empty_columns(dataset):
#Drop columns with no values
for h in dataset.columns.values.tolist():
if dataset[h].isnull().sum() == dataset.shape[0]:
dataset = dataset.drop(columns=h)
return dataset
'''--------------------------------------------------------------------
Returns a dictionary with 2 keys. Each key value is an array
containing the name of the columns that are either numeric or categoric.
--------------------------------------------------------------------'''
def get_column_types(dataset):
col_labels = dataset.columns.values.tolist()
col_types = {"numeric" : [] , "categoric" : []}
for label in col_labels:
#numeric cols
if dataset[label].dtypes == "float64" or dataset[label].dtypes == "int64":
col_types["numeric"].append(label)
else:
try: #numeric cols
dataset[label] = pandas.to_numeric(dataset[label])
col_types["numeric"].append(label)
except: #categoric cols
col_types["categoric"].append(label)
report["numeric"] = col_types["numeric"]
report["categoric"]= col_types["categoric"]
return col_types
'''-----------------------------------------------------------------------------
For each numeric column of the dataset registers the NER values for the maximum
and the minimum of entity,location or time. Receive as arguments:
dataset,
numeric column being iterated in the dataset.
supergroup value in the dataset.
returns a dictionary with 3 keys(entity,time location) with the column as values.
------------------------------------------------------------------------------'''
def init_report_ner():
for ner in ner_classes:
report[ner] = None
'''--------------------------------------------------------------------------
For each ner class(location, entity and time) it is stored the column with the
highest probability if the column is not the supergroup column.
---------------------------------------------------------------------------'''
def get_ner():
ner = {}
init_report_ner()
for n_c in ner_classes:
ner[n_c] = {}
#For any column in the dataset.
for col in report["cols_info"]:
if(report["cols_info"][col] != None):
prev_val = 0
#Gets the class and the probability.
classe = report["cols_info"][col]["ner"][0]
val = report["cols_info"][col]["ner"][1]
if (classe != "NULL" and val > prev_val and col != report["super_group"] ):
ner[classe] = col #If it is a requirement to return the probability in the future, it is easy.
prev_val = val
report[classe] = col
return ner
'''---------------------------------------------------------------
This function adds to the report for each numeric column
the NER class values with respect to the maximum and minimum.
df - dataframe.
actual_col - numeric column being iterated.
cat_col - categoric column that gave positive to NER.
super_group_val - super group value being iterated now.
class_type - categoric column class
----------------------------------------------------------------'''
def add_max_min(df,actual_col,cat_col,super_group_val,class_type):
if df[actual_col].dtype.name != "object":
maxim = report["sub_dfs"][super_group_val][actual_col]["max"]
minim = report["sub_dfs"][super_group_val][actual_col]["min"]
if(len(df.loc[df[actual_col] == maxim][cat_col].values) > 0):
max_categoric = df.loc[df[actual_col] == maxim][cat_col].values
min_categoric = df.loc[df[actual_col] == minim][cat_col].values
report["sub_dfs"][super_group_val][actual_col]["max_"+str(class_type)] = max_categoric
report["sub_dfs"][super_group_val][actual_col]["min_"+str(class_type)] = min_categoric
'''---------------------------------------------------------------------
Adds to each max and min of a numeric columns
the values of the categoric cells considered as entity,
time and location.
---------------------------------------------------------------------'''
def add_ner(dataset,actual_col,super_group_val,ner):
#for entity,time or location.
for key in ner.keys():
if ner[key] != actual_col and ner[key] != {}:
add_max_min(dataset,actual_col,ner[key],super_group_val,key)
'''--------------------------------------------------------------------
dataset - Sub-dataset from the original fragmented in the values of the
supergroup - (super_group_val)
super_group_val - Value of the Supergroup.
---------------------------------------------------------------------'''
def basic_numeric_analysis(dataset,super_group_val = None):
info = {}
col_types = get_column_types(dataset)
num_dataset = dataset[col_types["numeric"]].copy()
for column in list(num_dataset.columns):
preInfo = num_dataset[column].describe()
info[column] = preInfo
# allows general analyse
if super_group_val is None:
return advanced_numeric_analysis(info,dataset)
#Adds info of all columns to the report.
report["sub_dfs"][super_group_val] = info
ner = get_ner()
for column in dataset.columns:
add_ner(dataset,column,super_group_val,ner)
'''---------------------------------------
General analysis
----------------------------------------'''
def advanced_numeric_analysis(info_columns:dict,dataset):
# struct example
report['global_analysis'] = {
'global_min': {
'value': '',
'category':'',
'sg': None
},
'global_max': {
'value': '',
'category':'',
'sg': None
},
'std_min': {
'value': '',
'category':''
},
'std_max': {
'value': '',
'category':''
}
}
# Global values
list_max_aux = []
list_min_aux = []
list_group_aux = []
# Columns values
list_std_aux = []
for i in info_columns.keys():
if i == report['time']: # doesn´t analysis time variation
continue
list_max_aux.append(info_columns[i]['max'])
list_min_aux.append(info_columns[i]['min'])
list_group_aux.append(i)
list_std_aux.append(info_columns[i]['std'])
# organize values
global_values = (max(list_max_aux),min(list_min_aux))
global_values_index = (list_max_aux.index(max(list_max_aux)), list_min_aux.index(min(list_min_aux)))
std_values = (max(list_std_aux),min(list_std_aux))
std_values_index = (list_std_aux.index(max(list_std_aux)), list_std_aux.index(min(list_std_aux)))
cat = ('max','min')
for ind, m in enumerate(cat): # index of 'm' in cat
# Max and min global
report['global_analysis']['global_'+m]['value'] = global_values[ind]
category = list_group_aux[global_values_index[ind]]
report['global_analysis']['global_'+m]['category'] = category
index_sg = list(dataset[category]).index(global_values[ind])
try:
report['global_analysis']['global_'+m]['sg'] = dataset[report['super_group']][index_sg].capitalize()
except:
pass # there is no super_group
# std
report['global_analysis']['std_'+m]['value'] = std_values[ind]
category = list_group_aux[std_values_index[ind]]
report['global_analysis']['std_'+m]['category'] = category
'''---------------------------------------
Convert object column to int column.
----------------------------------------'''
def convert_object_int(df):
cols = df.columns.values
for col in cols:
try:
tmp = df[col].replace(r'[,\"\']','', regex=True).replace(r'\s*([^\s]+)\s*', r'\1', regex=True)
df[col] = pandas.to_numeric(tmp)
except:
#print(Couldn´t convert given column to int)
pass
return df
def translate_columns(df):
translator = Translator()
for col in df.columns.values:
translated = translator.translate(col)
if(translated.src != "en"):
df = df.rename(columns={col: translated.text})
return df
'''-------------------------------------------------------------------------------------
Returns a dictionary where each key is a column and the value is a dictionary
where the keys are the values for that column and the value the number of occurrences.
-------------------------------------------------------------------------------------'''
def values_distribution(df):
dist_values = {}
cols = df.columns.values
for col in cols:
#Only the categoric columns are relevant here.
if(np.issubdtype(df[col].dtype, np.number) == False):
#Each column starts as an empty dictionary.
dist_values[col] = {}
#iterates each cell in column
for ind in range(0,len(df)):
#gets cell
val = df[col][ind]
#Obtains the values stored until now in the dictionary.
values = dist_values[col].keys()
#If the value is not present in the dictionary list.
if val not in values:
dist_values[col][val] = 1
else:
dist_values[col][val] += 1
else:
pass
return dist_values
'''----------------------------------------------------------
Returns a list with the columns considered groups.
----------------------------------------------------------'''
def kick_fake_groups(df,dist):
new_df = df
n_rows = df.shape[0]
'''For each column in the dataframe it confirms if it appears in the dataframe.
If not then it is removed from the new dataframe.'''
for col in df.columns.values:
if col not in dist.keys():
new_df = new_df.drop([col], axis=1)
for key in dist:
n_key_vals = len(dist[key].keys())
#If a column has too many values then it is definitely not a group column.
if(n_key_vals > 0.60 * n_rows):
new_df = new_df.drop([key], axis=1)
return new_df.columns.values
'''----------------------------------------------------
Kick groups without variantion in any super group value
----------------------------------------------------'''
def kick_useless_numeric_groups(dataset):
report_aux = report.copy() # avoid changes list during iteraration
for i in report_aux['numeric']:
for j in report_aux['sub_dfs']:
if report_aux['sub_dfs'][j][i]['std'] != 0:
break
else:
for jj in report_aux['sub_dfs']:
report['sub_dfs'][jj].pop(i)
report['numeric'].remove(i)
report['cols_info'].pop(i)
report['basic_char']['n_col'] -= 1 # doesn´t specify for each value
dataset = dataset.drop(columns=i)
return dataset
'''--------------------------------------------------------------------------------------------------
Select supergroup if given in the table. For example in https://public.tableau.com/views/IncomeStatement_10/IncomeStatementYTD?:embed=y&:display_count=y&:origin=viz_share_link, the sg should be "ACCOUNT LEVEL 0 NAME", but it is "Measure Names", because it is identify through "Account Type Sequence"
--------------------------------------------------------------------------------------------------'''
def given_super_group(dataset):
array_names = dataset.columns.values.tolist()
array_bool_2 = [False]*dataset.shape[1]
array_bool_1 = [None]*dataset.shape[1]
for idt in array_names: # column 1
if len(set(dataset[idt])) == 1: # avoids constant column
continue
array_bool_1 = [None]*dataset.shape[1]
for sg in array_names: # column 2 - possible super_group
# The super group have to be string and needs at least 2 values
if len(set(dataset[idt])) <= 1 or not isinstance(list(dataset[idt])[0],str):
array_bool_1[array_names.index(sg)] = False
elif idt == sg: # same columns
continue
for y in set(dataset[idt]): # row to compare
aux10 = dataset.loc[dataset[idt]==y].shape[0] # height of the group in column 1
aux21 = list(dataset.loc[dataset[idt]==y][sg])[0] # row value in column 2 (idt) that match with column 1 (sg) and row y
aux20 = dataset.loc[dataset[sg] == aux21, idt].shape[0] # height of the group in column 2
if len(set(dataset.loc[dataset[idt]==y][sg])) != 1: # a group in a column have to be constant
array_bool_1[array_names.index(sg)] = False
elif aux10 != aux20 or aux10 < 2: # for being an identifier, both height must match and more than one
array_bool_1[array_names.index(sg)] = False
elif not isinstance(aux21,str): # checks if the column 2 may be a super_group based on its type, usually turns the identifier (number) to False
array_bool_1[array_names.index(sg)] = False
else:
array_bool_1[array_names.index(sg)] = True
if any(array_bool_1): # found a sg for identifier idt
for i in range(len(array_bool_1)):
if array_bool_1[i]:
array_bool_2[i] = True
report["super_group_bonus"] = [array_names[z] for z in range(len(array_names)) if array_bool_2[z]] if any(array_bool_2) else []
'''----------------------------------------------------------------------------------------------------------
Adds the supergroup col(commonly refered as 'sg' in the code) to the report and returns it if there is one.
sg -> gives as possability to choose sg
-----------------------------------------------------------------------------------------------------------'''
def find_super_group(dist,group):
super_group_col = None
for column in dist:
if column in group:
if super_group_col == None and len(list(dist[column].keys())) > 1:
super_group_col = column
if super_group_col != None and len(list(dist[column].keys())) < len(list(dist[super_group_col].keys())) and \
len(list(dist[column].keys())) > 1:
super_group_col = column
if super_group_col != None:
report['super_group_bonus'].append(super_group_col)
if len(report['super_group_bonus']) > 0:
report['super_group'] = report['super_group_bonus'].pop()
else:
report['super_group'] = None
'''-----------------------------------------------------------
Classifies columns.
0 - Column is supergroup.
1 - Column is the value of a supergroup.
2 - Column is a features.
In this case the report considers a main column(0), where
other columns are dependant(1). The other columns are features.
------------------------------------------------------------'''
def add_group_columns(df,sub_groups):
cols_info = {}
sub_groups = [] if sub_groups is None else sub_groups
for col in df.columns.values:
cols_info[col] = {}
if col == report["super_group"]:
cols_info[col]["group"] = 0
elif col in sub_groups:
cols_info[col]["group"] = 1
else:
cols_info[col]["group"] = 2
return cols_info
'''----------------------------------------------------------------------------
Receives as arguments sg(supergroup column, which is the name of the column considered
a super group) and ogs (other groups) which is an array containing other groups."
Returns an array of subgroups.
----------------------------------------------------------------------------'''
def get_sub_groups(ogs):
if report['super_group'] in ogs:
return np.delete(ogs, np.where(ogs == report['super_group']))
def check_unique_vals(df,cols_info):
for col in df.columns.values:
if len(df[col].unique()) == 1:
cols_info[col]["unique_val"] = True
else:
cols_info[col]["unique_val"] = False
return cols_info
'''-------------------------------------------------
Adds time span if detected a ner_time_class
-------------------------------------------------'''
def add_timespan(df,col):
res = check_time_format(df[col])
if(res != None):
report["timespan"] = res
else:
startTime = df[col][0]
endTime = df[col][df[col].count() -1]
report["timespan"] = {"begin": startTime , "end": endTime}
return
'''Checks if a given column as a certain data format'''
def check_time_format(col):
minim = None
min_string = None
maxim = None
max_string = None
'''------------------------------------------------------------------------------------------------------------------------------------
The first regex is a simple one to extract the date, the second one is a regex to be sure what kind of temporal format is being analyzed
Because there might be dates like mm/dd/yyyy and dates like dd/mm/yyyy. The rarest regexes should appear first on the list called regexes
The first index of the array corresponds to the Storms tableau case. Index 2 is just a common case.
-------------------------------------------------------------------------------------------------------------------------------------'''
regexes = [['[0-1]*[0-9]/[0-3]*[0-9]/\\d{4}[ ][0-9]+:\\d{2}:\\d{2}[ ](PM|AM)','^[0-3]?[0-9]/[0-3]?[0-9]/(?:[0-9]{2})?[0-9]{2}','%m/%d/%Y'], \
['(\\d|\\d{2})/\\d{2}/\\d{4}','(\\d|\\d{2})/\\d{2}/\\d{4}','%d/%m/%Y'], \
['\\d{4}','\\d{4}','%Y']]
for reg in regexes:
for cell in col:
match = re.search(reg[0],str(cell))
if(match != None):
match = re.search(reg[1],str(cell))
format_str = reg[2]
datetime_obj = datetime.datetime.strptime(match.group(), format_str)
#Init
if(minim == None and len(match.group()) > 0):
minim = datetime_obj
min_string = match.group()
else:
datetimes = [minim,datetime_obj]
new_min = min(datetimes)
if(new_min != minim and len(match.group()) > 0):
minim = new_min
min_string = match.group()
if(maxim == None):
maxim = datetime_obj
max_string = match.group()
else:
datetimes = [maxim,datetime_obj]
new_max = max(datetimes)
if(new_max != maxim):
maxim = new_max
max_string = match.group()
else:
minim = None
maxim = None
break
if(minim != None and maxim != None):
return {"begin": min_string , "end": max_string}
return None
def add_ner_columns(df,cols_info):
res = detect_ner_classes(df)
for col in cols_info.keys():
#iterates keys: entity,time,location.
for key in res.keys():
if col in res[key]:
if(key == "time"):
add_timespan(df,col)
cols_info[col]["ner"] = (key,res[key][col])
break
else:
cols_info[col]["ner"] = ("NULL",0)
return cols_info
def gen_columns_info(df):
if report['super_group'] is None: # if there is no super_group
dist_values = values_distribution(df)
all_groups = kick_fake_groups(df,dist_values)
given_super_group(df)
find_super_group(dist_values,all_groups)
#find_super_group(dist_values,all_groups) #Normal search for super_group
else:
all_groups = report['categoric']
#Returns array of subgroups.
sub_groups = get_sub_groups(all_groups)
cols_info = add_group_columns(df,sub_groups)
cols_info = add_ner_columns(df,cols_info)
cols_info = check_unique_vals(df,cols_info)
return cols_info
'''--------------------------------------------------------------------------
If there is a supergroup splits the dataframe in the values of that
supergroup. Returns a dictionary where the KEYS are the values of the
supergroup and the VALUES are a dataset with the same number of columns.
---------------------------------------------------------------------------'''
def split_df(cols_info,df):
dfs_list = {}
found_super_group = False
for col in cols_info:
#0 refers to a supergroup.
if cols_info[col] != None and cols_info[col]["group"] == 0:
found_super_group = True
for col_val, df in df.groupby(col):
dfs_list[col_val.capitalize()] = df
#Case where there are no super_groups.
if found_super_group == False:
return {"UNIQUE_DF" : df }
return dfs_list
def generate_text_data(dataset,tipo,sg:str=None):
try:
dataset = translate_columns(dataset)
except:
pass
dataset = convert_object_int(dataset)
'''Stores whether it is processing a excel or a tableau'''
report["tipo"] = tipo
report["super_group"] = sg
report["timespan"] = None
cols_info = gen_columns_info(dataset)
'''If there is a supergroup in the dataframe, generates one subdataframe
per subgroup value. Ex: For the mammals excels if "Domain" is picked as the super_group
then the will be sub dataframe for Earth and Water. '''
df_dic = split_df(cols_info,dataset)
#Stores info relative to each sub_df (there is a sub_df for each super_group value)
report["sub_dfs"] = {}
report["cols_info"] = cols_info
report["corr"] = {}
report["basic_char"] = basic_char(dataset)
#For each super group value.
for sg_val in df_dic.keys():
df = df_dic[sg_val]
#Drops the rows with null values.
df = df.dropna(how='any')
report["basic_char"][sg_val] = basic_char(df)
basic_numeric_analysis(df,sg_val)
else:
dataset = kick_useless_numeric_groups(dataset)
basic_numeric_analysis(dataset)
excel_correlation(dataset,report,df_dic) if(tipo == "excel") else tableau_correlation(dataset,report,df_dic)
return write_text(report,ner_classes, dataset)
#Replace Null values from data set
def replace_null_values(dataset):
null_values = ['Nulo', '%null%', '']
dataset = dataset.replace(regex=null_values, value=np.nan)
return dataset
def default_text_gen(data,tipo):
files = glob.glob(os.path.join('images', '*'))
for f in files:
os.remove(f)
dataset = format_labels(data)
dataset = pandas.DataFrame(dataset)
dataset = replace_null_values(dataset)
result = []
sg = None # super group inital
nr = 0 # number of reports wrote
while True:
result.insert(0,generate_text_data(dataset,tipo,sg))
nr+=1
if report['super_group_bonus'] == []:
break
sg = report['super_group_bonus'].pop()
if nr>1: # there is more than 1 report
result.insert(1,f"<p>It was also wrote {nr-1} other relevant Report{'' if nr==2 else 's'}: </p>")
return '\n'.join(result)
'''----------------------------------
Format all Labels to be in title case
----------------------------------'''
def format_labels(data :json):
data_aux = []
for x in data:
l = list(x.keys())
for y in l:
x[y.title()] = x.pop(y)
data_aux.append(x)
return data_aux
```
#### File: NLG-model/offline model/ReportTemplate.py
```python
from random import choice as rnd
from random import randrange as rdr
from random import getrandbits
import ReportWriter as rw
from Templates import temps as temps
from Templates import dictionary as dictionary
import datetime
import uuid
import random
'''------------------------------------------------------------------------
Report strucure build in 2020 SummerInternship - <NAME>
---------------------------------------------------------------------------'''
class report():
def __init__(self, title:str = 'Miss Subject'):
self.title = title if title is not None else 'Miss Subject'
self.text = ''
self.hyperlinks = {}
self.unique_sequence = uniqueid()
def __str__(self):
return self.text
def generate_title(self):
today = datetime.datetime.today()
tmp = '<h1 style="color:PowderBlue; text-align:center;">' + self.title.upper() + '</h1>'
tmp += '<h3 style=" text-align:center;">' + 'SySummerInternshipNLG' + '</h3>'
tmp += '<h4 style=" text-align:center;">' + str(today.strftime("%e %b %Y")) + '</h4>'
self.text += tmp
def generate_terms_of_reference(self, date: dict = None):
# example: https://unilearning.uow.edu.au/report/4biii1.html
tmp = '<p>' + dictionary['TOF_1'] + rnd(dictionary['preposition_about']) + self.title.lower()
if date is None:
tmp += '. '
elif date['begin'] == date['end']:
tmp += str(dictionary['timespan'][0]).format(date['begin'])
else:
tmp += str(dictionary['timespan'][1]).format(date['begin'], date['end'])
tmp += dictionary['TOF_2']
self.text += tmp + '</p>'
def generate_introduction(self, data: dict = None, sg: list = []):
tmp = '<p>'
tmp += str(dictionary['Introduction_1']).format(data['n_cols'], rw.s_or_p(data["n_cols"]), data["n_rows"], rw.s_or_p(data["n_rows"]))
super_group = '<li>{} <- <b>Super Group</b> -> values: ' + hyperlink(sg) + '</li>'
aux1 = '<ol>'+' '.join(['<li>'+w+'</li>' if w != self.title else super_group.format(w) for w in data['categoric'] ])+'</ol>'
aux2 = '<ul>'+' '.join(['<li>'+w+'</li>' for w in data['numeric']])+'</ul>'
tmp += str(dictionary['Introduction_2']).format(len(data['categoric']), rw.s_or_p(data["categoric"]), aux1, len(data['numeric']), rw.s_or_p(data["numeric"]), aux2)
tmp += dictionary['Introduction_3']
self.text += tmp + '</p>'
def generate_super_group_body(self, data: dict = None):
self.text += global_values(data)
self.text += write_by_std(data)
self.text += write_conclusion(data)
def generate_general_body(self, data: dict = None):
self.text += write_general_std(data)
self.text += max_min_global(data)
def generate_draft(self):
if not any(self.hyperlinks):
return ''
tmp = '<hr><h2 style="text-align:center;">Detail Analysis</h2><p>'
tmp += '</p><p>'.join(
[value[0] + collapse(key, value[1], 'Data', self.unique_sequence) + value[2] for key,value in self.hyperlinks.items()]
)
tmp += '</p></hr>'
self.text += tmp
def generate_correlation(self, data: dict = None):
self.text += write_correlation(data)
#True if code is runned by main (local) -> display collapse and image
self.text += collapse(
'Correlation',
data['hmx'],
'Heatmap',
self.unique_sequence,
True if data['tipo'] == 'json' and len(data['super_group_bonus']) == 0 else False)
def add_text(self, string:str):
self.text += f'<p>{string}</p>'
'''------------------------------------------------------------------------
Describes which supergroup changes the most and the least for each category
---------------------------------------------------------------------------'''
def write_by_std(data: dict) -> str:
tmp = '<p style="color:RoyalBlue;">'
# Biggest change for super group
tmp += rnd(dictionary['STD_max/min_fe_sg_1']).format(data['super_group']['name'])
tmp += rnd(dictionary['STD_max/min_fe_sg_2']) + rnd(dictionary['STD_max/min_fe_sg_3'])
tmp += dictionary['noun_singular'] if len(data['by_super_group']['columns']['max']) == 1 else dictionary['noun_plural']
# enumeration
for g in data['by_super_group']['columns']['max'].keys():
tmp += str(dictionary['class_str']).format(hyperlink(g))
tmp += single_or_plural_in_array(data['by_super_group']['columns']['max'][g])
else:
tmp = f'{tmp[:-6]}. ' #remove: ", and "
# mínimo
tmp += rnd(dictionary['preposition_contrast'])
#tmp += rnd(dictionary['STD_max/min_fe_sg_2'])
tmp += rnd(dictionary['STD_max/min_fe_sg_4'])
tmp += dictionary['noun_singular'] if len(data['by_super_group']['columns']['max']) == 1 else dictionary['noun_plural']
# enumeration
for g in data['by_super_group']['columns']['min'].keys():
tmp += str(dictionary['class_str']).format(hyperlink(g))
tmp += single_or_plural_in_array(data['by_super_group']['columns']['min'][g])
else:
tmp = f'{tmp[:-6]}. ' #remove: ", and "
return tmp + '</p>'
'''-------------------------------------------------------------------------------
Describes which category by super_group which has the least and the most variation
--------------------------------------------------------------------------------'''
def write_conclusion(data: dict) -> str:
tmp = '<p style="color:DeepPink;">'
l=data['by_super_group']['all']
tmp += rnd(dictionary['conclusion'])
tmp += rnd(dictionary['STD_max/min_fe_sg_4']) + dictionary['noun_singular']
tmp += f'in {hyperlink(l[0][0])} '
tmp += rnd(dictionary['total_std_average'])
tmp += f' {str(round(l[0][1],2))}'
tmp += f". {rnd(dictionary['preposition_contrast'])}"
tmp += rnd(dictionary['STD_max/min_fe_sg_3']) + dictionary['noun_singular']
tmp += f'in { hyperlink(l[len(l)-1][0])} '
tmp += rnd(dictionary['total_std_average'])
tmp += f' {str(round(l[len(l)-1][1],2))}.'
return tmp + '</p>'
'''-------------------------------------------------------------------------------
Describes which category which has the least and the most variation
--------------------------------------------------------------------------------'''
def write_general_std(data: dict) -> str:
tmp = '<p style="color:LightSeaGreen;">'
aux = data["global"]['std']
tmp += rnd(dictionary['STD_max/min_fe_sg_2']).capitalize()
tmp += rnd(dictionary['STD_max/min_fe_sg_3']) + dictionary['noun_singular']
tmp += f'in {str(aux["max"]["category"])}, '
tmp += f'with a std of {str(round(aux["max"]["value"],2))}. '
tmp += rnd(dictionary['preposition_contrast'])
tmp += rnd(dictionary['STD_max/min_fe_sg_4']) + dictionary['noun_singular']
tmp += f'in {str(aux["min"]["category"])}, '
tmp += f'with a std of {str(round(aux["min"]["value"],2))}. '
return tmp + '</p>'
'''-------------------------------------------------------------------------------
Auxiliar function to write an array, according the amount of values
--------------------------------------------------------------------------------'''
def single_or_plural_in_array(array: dict) -> str:
# only one element
if len(array.keys()) == 1:
(k,v) = array.popitem()
return f'{k} with {str(round(v,2))}%, and '
i = 0
tmp = ''
aux = ''
# multiple elements
for k,v in array.items():
#last element
if i == len(array.items())-1:
tmp = tmp[:-2] # removes comma
tmp += ' and ' + k + ' with {}, respectively, and '
else:
tmp += k + ', '
aux+= str(round(v,2)) + '%; '
i+=1
return tmp.format(aux[:-2]) # removes last semicolon
'''--------------------------
Write max, min, mean, bounds
--------------------------'''
def global_values(data: dict) -> str:
tmp = '<p style="color:Coral;">'
average = {}
max_val = {}
min_val = {}
sg_vals = list(data["super_group"]["vals"].keys())
for sg_val in sg_vals:
sg_val_keys = list(data["super_group"]["vals"][sg_val].keys())
#For each category in a supergroup value
for cat_ind in range(0,len(sg_val_keys)):
cat = sg_val_keys[cat_ind]
if cat not in average:
average[cat] = {}
max_val[cat] = {}
min_val[cat] = {}
average[cat][sg_val] = data["super_group"]["vals"][sg_val][cat]["mean"]
max_val[cat][sg_val] = data["super_group"]["vals"][sg_val][cat]["max"]["value"]
min_val[cat][sg_val] = data["super_group"]["vals"][sg_val][cat]["min"]["value"]
for key in average:
tmp += rw.mean_max_min("average", average, key, True)
tmp += rw.mean_max_min("max", max_val, key, False)
tmp += rw.mean_max_min("min", min_val, key, False)
return tmp + '</p>'
'''--------------------------------
Describes max and min global values
--------------------------------'''
def max_min_global(data: dict) -> str:
tmp = '<p style="color:ForestGreen;">'
minimo_global = data["global"]['values']['min']['value']
maximo_global = data["global"]['values']['max']['value']
str1 = ''
str2 = ''
if data['super_group']['name']:
str1 += f"{hyperlink(data['global']['values']['max']['sg'])}'s "
str2 += f"{hyperlink(data['global']['values']['min']['sg'])}'s "
str1 += str(data["global"]['values']['max']['category'])
str2 += str(data["global"]['values']['min']['category'])
tmp += str(temps["global_extremes"]).format(maximo_global,str1,minimo_global,str2)
tmp += rw.end_sentence()
return tmp + '</p>'
'''------------------------------------
Format sg's values to have an hyperlink
-------------------------------------'''
def hyperlink(l: list) -> str:
if not any(l):
return ''
if isinstance(l, str):
return f'<a href="#{l}">{l}</a>'
aux = [f'<a href="#{s}">{s}</a>; ' for s in l]
#aux = ['<a href="https://www.google.pt" target="_blank">' + s + '</a>; ' for s in l]
return ''.join(aux)
'''----------------------------------------------------------------
Organize correlation's text with base text, commun group and pairs
------------------------------------------------------------------'''
def write_correlation(data_: list) -> str:
'''
https://support.minitab.com/en-us/minitab-express/1/help-and-how-to/modeling-statistics/regression/how-to/correlation/interpret-the-results/
Strength: The larger the absolute value of the coefficient, the stronger the relationship between the variables.
Direction: If both variables tend to increase or decrease together, the coefficient is positive, and the line that represents the correlation slopes upward. If one variable tends to increase as the other decreases, the coefficient is negative, and the line that represents the correlation slopes downward.
'''
data = data_['group_corrs']
topic = 'numeric features' if data_['super_group']['name'] is None else data_['global']['std']['max']['category']
tmp = '<p style="color:purple;">'
tmp += dictionary['Correlation_1']
nr_corr = sum([len(x[1]) for x in data]) # number of correlations found
try:
len(data[0][1]) # random access to guarantee thats it is OK so far
except:
return "I had some problems computing Correlation: Fix me :'("
if len(data[0][1])>1: # whereas exist commun value
tmp += dictionary['Correlation_2_1'].format(
nr_corr,
'' if nr_corr == 1 else 's',
f'{rnd(dictionary["preposition_about"])} {topic}',
hyperlink(data[0][0]),
len(data[0][1]),
'' if len(data[0][1]) == 1 else 's',
)
# when a value has a more than one connection
tmp += '<br>' + ' '.join([multiple_main_value(a) for a in data if len(a[1])>1]).replace(f'<a href="#{data[0][0]}">{data[0][0]}</a>', f'<a href="#{data[0][0]}">it</a>')
else:
tmp += dictionary['Correlation_2_0'].format(
nr_corr,
'' if nr_corr == 1 else 's',
f'{rnd(dictionary["preposition_about"])} {topic}'
)
tmp += f"<br> {pair_values([a for a in data if len(a[1])==1])}" # when a value only connects to one other
return f"{tmp} <br>{dictionary['Correlation_8']}</p>"
'''------------------------------------------------
Separate whereas the pair increase or decrease
-------------------------------------------------'''
def increase_or_decrease(l: list) -> list:
aux = [[],[]]
for x in l:
y = (list(x.keys())[0],list(x.values())[0])
if y[1] < 0:
aux[0].append(y)
else:
aux[1].append(y)
return aux
'''------------------------------
Write text according commun value
------------------------------'''
def multiple_main_value(data: list) -> str:
direction = rdr(len(dictionary['Correlation_3'])) # random word
oposite_direction = 0 if direction == 1 else 1 # random word oposite
tmp = rnd(dictionary['Correlation_4']).format(
hyperlink(data[0]),
dictionary['Correlation_3'][direction],
rnd(dictionary['Correlation_5'])
)
aux = increase_or_decrease(data[1]) # separete regarding the direction
# at least one flag is True
flag_0 = True if aux[0] != [] else False
flag_1 = True if aux[1] != [] else False
if not (flag_0 and flag_1): # only one direction
dir_opcional = oposite_direction if len(aux[0]) > 0 else direction # relate with main variable
col = 0 if len(aux[0]) > 0 else 1
tmp += rnd(dictionary['Correlation_6']).format(
array_names = hyperlink([x[0] for x in aux[col]]),
dir = dictionary['Correlation_3'][dir_opcional],
singular_ = 's' if len(aux[col]) == 1 else '',
array_values = f"({'; '.join([str(round(x[1],2)) for x in aux[col]])})"
)
else: # both directions were found
tmp += dictionary['Correlation_6'][1].format(
array_names = hyperlink([x[0] for x in aux[1]]),
dir = dictionary['Correlation_3'][direction],
singular_ = 's' if len(aux[1]) == 1 else '',
array_values = f"({'; '.join([str(round(x[1],2)) for x in aux[1]])})"
)
tmp += rnd(dictionary['preposition_contrast'])
tmp += dictionary['Correlation_6'][0].format(
array_names = hyperlink([x[0] for x in aux[0]]),
dir = dictionary['Correlation_3'][oposite_direction],
singular_ = 's' if len(aux[0]) == 1 else '',
array_values = f"({'; '.join([str(round(x[1],2)) for x in aux[0]])})"
)
return tmp
'''------------------------------
Write pair(s) correlation text
------------------------------'''
def pair_values(data: list) -> str:
l=['','']
for a in data:
corr_value = list(a[1][0].values())[0]
if corr_value < 0:
l[0] += f'{hyperlink(a[0])} & {hyperlink(list(a[1][0].keys())[0])} w/ {round(corr_value,2)}; '
else:
l[1] += f'{hyperlink(a[0])} & {hyperlink(list(a[1][0].keys())[0])} w/ {round(corr_value,2)}; '
corr_7_1 = dictionary['Correlation_7_1'].format(
'' if len(l[0]) == 0 else 'is' if l[0].count("&") == 1 else 'are',
l[0][:-2] if len(l[0]) > 0 else ''
)
corr_7_2 = dictionary['Correlation_7_2'].format(
'' if len(l[1]) == 0 else 'is' if l[1].count("&") == 1 else 'are',
l[1][:-2] if len(l[1]) > 0 else ''
)
flag_1 = corr_7_1 != dictionary['Correlation_7_1'].format('','')
flag_2 = corr_7_2 != dictionary['Correlation_7_2'].format('','')
if not (flag_1 or flag_2):
return ''
return dictionary['Correlation_7'].format(
s_p = 's' if len(data) > 1 else '',
corr_7_1 = corr_7_1 if flag_1 else '',
corr_7_2 = corr_7_2 if flag_2 else '',
both = ', and ' if flag_1 and flag_2 else '' # when there are two different directions
)
'''-------------------------------------------------
Generates unique id, the server doesn´t take numbers
-------------------------------------------------'''
def uniqueid() -> str:
seed = 65
id = chr(seed)
while True:
yield id
if seed > 122:
seed = 65
id += chr(seed)
else:
seed += 1
id = chr(seed)
'''------------------------------------------------
Makes Collapsible Text, e.g. Tables, images, text
------------------------------------------------'''
def collapse(name: str, code: str, _type: str, unique_sequence, html: bool = False) -> str:
#https://www.w3schools.com/bootstrap/bootstrap_collapse.asp
'''
EXAMPLES
name: <sg_value>; correlation;
code: report text; url; html code;
type: heatmap; data; Report
html: represents if it head has to be introduced
'''
head = '<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/css/bootstrap.min.css">\
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>\
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.4.1/js/bootstrap.min.js"></script>'\
if html else ''
id = next(unique_sequence).replace("\'","") # remove ''
id = f'{name}{id}'.replace(" ","") # remove ' ' and be carefull, for each report the sequence is the same, so it is needed own name
return f'{head}<div class="toggle-container" style="margin-top:10px">\
<button type="button" class="btn btn-info" data-toggle="collapse" data-target="#{id}">Click Here to Toggle View regarding {name}\'s {_type}</button>\
<div id="{id}" class="collapse">\
{code}\
</div>\
</div>'
```
#### File: NLG-model/offline model/ReportWriter.py
```python
from random import randrange
import random
import string
import re
import inflect
from Templates import temps
from ReportTemplate import report, dictionary, hyperlink, collapse
import operator
TAG_RE = re.compile(r'<br>|</b>')
p = inflect.engine()
def count_placeholders(fmt):
count = 0
L = string.Formatter().parse(fmt)
for x in L:
if x[1] is not None:
count += 1
return count
#Returns the plural form of a given word.
def pluralize_noun(noun):
if(noun[-1:] == "y"):
noun = noun[:-1]
return str(noun) + "ies"
else:
return " the " + str(p.plural(noun))
def end_sentence():
return ". "
'''Returns a final conjunction such as "as finally", "as lastly"'''
def final_conjunction():
ind = randrange(len(temps["final_conj_cop"]))
return str(temps["final_conj_cop"][ind])
#Optional: Adds supergroup to the text.'''
def add_sg(data):
tmp = random.uniform(0, 1)
return " the " + str(data["super_group"]["name"]) if (tmp > 0.50) else ""
def pick_verb_person(n_corrs):
return ["was",""] if n_corrs == 1 else ["were","s"]
'''---------------------------------------------------------------------------
s_or_p(Singular or Plural)
returns an "s" or "p" depending if the number of element is singular or plural.
----------------------------------------------------------------------------'''
def s_or_p(arr):
if isinstance(arr, list):
if len(arr) != 1:
return "s"
return ""
else:
if isinstance(arr, int):
if arr != 1:
return "s"
return ""
'''converts array to string'''
def array_to_string(arr):
string = ""
if arr is None:
return string
elif len(arr) > 0:
for x in range(0,len(arr)-1):
string += str(arr[x]) + ", "
string += str(arr[len(arr) - 1])
return string
elif (len(arr) > 1):
return arr[0]
'''Performs minor corrections on the text.'''
def check_ponctuation(text):
r = [[": .",":"],[":.",":"],[": \n.",":\n"],[" ,",","],[" .","."],["\n","#"],["..","."],[". .",""],[" ."," "],[".</b>.",".</b>"],["..","."],[" .","."],[" ,",","]]
for pair in r:
text = text.replace(pair[0],pair[1])
tmp = text.rstrip()
if(tmp[-1:] != "."):
text = text + "."
return text
'''-----------------------------------------------------------------
Writes the text for the report and performs after the text is
written a ponctuation check to make sure there are no mistakes.
-----------------------------------------------------------------'''
def write_final_text(data, df):
R = report(data['super_group']['name'])
R.generate_title()
R.generate_terms_of_reference(data['timespan'])
R.generate_introduction(data['intro'], list(data['super_group']['vals'].keys()))
try:
R.generate_super_group_body(data)
write_body_sg(data,R,df)
except:
R.add_text(write_body_no_sg(data))
R.generate_general_body(data)
R.generate_correlation(data)
R.generate_draft()
text = collapse(
data['super_group']['name'],
str(R),
"Report",
R.unique_sequence
) if len(data['super_group_bonus']) else str(R)
return check_ponctuation(text)
def remove_tags(text):
return TAG_RE.sub('', text)
def upcase_first_letter(s):
return s[0].upper() + s[1:] if len(s) > 0 else s
def lower_first_letter(s):
return s[0].lower() + s[1:] if len(s) > 0 else s
'''------------------------------------------------------------------------------
Performs ponctuation fixes on the text fragment that it is to be added.
text - Previous text until now.
new_text - New fragment to be added.
caps - Binary Flag. If True indicates that new_text should be altered
in order to start as the beginning of the sentence, if false, it should considered
as part of the last sentence from new_test.
------------------------------------------------------------------------------'''
def str_format(text,new_text,caps):
if(caps):
new_text = upcase_first_letter(new_text)
else:
new_text = lower_first_letter(new_text)
pre_proc_text = remove_tags(text)
processed_text = remove_tags(pre_proc_text).rstrip()
if(len(new_text) > 0 and len(processed_text) > 0):
if caps == True:
new_text = upcase_first_letter(new_text.lstrip())
pre_proc_text = pre_proc_text.lstrip()
processed_text = processed_text.lstrip()
if pre_proc_text[-1] not in [".",":",">"] and processed_text[-1] not in [".",":",">"]:
return ". " + new_text
else:
return new_text
else:
if(processed_text[-1] not in [".",":","\n"] and new_text[0] != "," ):
return ", " + new_text
elif(processed_text[-1] not in [".",":"]):
return "." + new_text
elif(processed_text[-1] == "."):
return new_text.capitalize()
elif(len(processed_text) == len(text) and text[-1] != " "):
return " " + str(new_text)
return new_text + ' '
'''--------------------------------------------------------------------
The goal of this function is to write better English. Cause although
the columns have already been classified, a better classification can
provide us more specific vocabulary for writting better words.
For example, if the NER "time" column is age and contains numbers:
it would be better to write "...which as 20 years..." instead of
"in 20 years...".
Receives the name of a column and a category. Categories are the NER cats:
->entity,time,location
---------------------------------------------------------------------'''
def desambig_temp(name,cat,data):
if(cat == "time"):
if ("age" in name.lower()):
return "which has {} <b>{}</b>"
#print("O valor que vou retornar do desambig_temp e: " + str(temps["maxmin_" + str(cat)][randrange(len(temps["maxmin_" + str(cat)]))]))
if (str("maxmin_" +str(cat)) in temps.keys()):
if(data["timespan"] != None):
if(data["timespan"]["begin"] == data["timespan"]["end"]):
return ""
else:
return temps["maxmin_" + str(cat)][randrange(len(temps["maxmin_" + str(cat)]))]
else:
return ""
else:
return ""
def add_maxmin(free_var,string,ner_classes,data):
res = ""
ind = randrange(len(temps[string]))
res += temps["maxmin_text"][0].format(temps[string][ind],free_var[string]["value"])
#Adds entity,time,location max/min if it exists.
for nc in ner_classes:
if(free_var[nc] != {}):
if(len(free_var[string][nc]) == 1):
ner_vals = free_var[string][nc][0]
cat = free_var[nc]
else: #Case where there is more than 1 val for max/min
ner_vals = ""
for x in range(0,len(free_var[string][nc])):
ner_vals += add_copulative_conjunction(x,len(free_var[string][nc]))
ner_vals += str(free_var[string][nc][x])
cat = pluralize_noun(free_var[nc])
template = str_format(res,desambig_temp(cat,nc,data),False)
res += template.format(cat,ner_vals)
return res
def get_corr_word(val):
val = float(val)
if val > 0.90:
return "very strong"
elif val > 0.75:
return "positive"
elif val > -0.75:
return "negative"
elif val > -2: #Just to be safe... Although the value will never be lower than -1.
return "very negative"
def add_no_corrs(text,corrs):
ind = randrange(len(temps["no_corrs"]))
ind2 = randrange(len(temps["analysis"]))
return '<h1 style="color:Fuchsia;">' + str_format(text,temps["no_corrs"][ind].format(temps["analysis"][ind2]),True) + '</h1>'
def add_corrs(text,corrs):
new_text = ""
if corrs == {}:
return add_no_corrs(text,corrs)
else:
ind = randrange(len(temps["corrs_intro"]))
n_corrs = len(list(corrs.keys()))
tmp = pick_verb_person(n_corrs)
new_text += str_format(text,temps["corrs_intro"][ind].
format(tmp[0],n_corrs,tmp[1]),True)
for key in corrs.keys():
val = corrs[key]["value"]
word_val = get_corr_word(val)
el1 = corrs[key]["1"]
el2 = corrs[key]["2"]
ind = randrange(len(temps["corr_val"]))
new_text += str(temps["corr_val"][ind].format(word_val,float(val),el1,el2))
return new_text
def add_conjunction(text):
text = text.lstrip()
ind = randrange(len(temps["conj"]))
conj = temps["conj"][ind]
return conj.capitalize() if(text[:-1] == ".") else conj
def add_copulative_conjunction(ind,length):
if ind == 0:
return ""
elif ind != length -1:
return ", "
else:
return " and "
'''------------------------------------------------------------------------
Returning example:
"When it comes to the value Water"
(for mammals.xlsx)
------------------------------------------------------------------------'''
def add_sg_val_intro(sg_val,old_text,sg):
sg_ind = randrange(len(temps["sg"]))
word_intro = temps["sg_val_intro"][randrange(len(temps["sg_val_intro"]))]
if(count_placeholders(word_intro) == 1):
word_intro = word_intro.format(sg)
return str_format(old_text,temps["sg"][sg_ind].format(word_intro,sg_val),True)
def repeated_words(prev_tmp,actual_tmp):
prev_tmp = prev_tmp.split()
actual_tmp = actual_tmp.split()
for word1 in prev_tmp:
for word2 in actual_tmp:
if word1.lower() == word2.lower() and word1 in temps["sg_val_intro"]:
return True
return False
'''-----------------------------------------------------------------------------
Introduces new category. Ex: "Concerning male maturity(d)"
data- Contains all info and relations.
text- Text until now.
cat- New category to be added to the text.
ind- Category Number (Is it the first, or second or third cat being written...)
------------------------------------------------------------------------------'''
def add_sg_cat(data,text,cat,ind):
tmp = " "
if(ind != 0):
ind = randrange(len(temps["same_sg_val"]))
verb = random.choice(temps["same_sg_verb"])
str_tmp = temps["same_sg_val"][ind]
n_phs = count_placeholders(str_tmp)
if(n_phs == 2):
tmp += str_format(tmp,temps["same_sg_val"][ind].format(verb,data["super_group"]["name"]),True)
else:
tmp += str_format(tmp,temps["same_sg_val"][ind].format(data["super_group"]["name"]),True)
tmp += " but now "
#Certain words like "to" or "for" do not work for fist category.
word_intro = "to"
while(word_intro == "to" or word_intro == "for"):
i = randrange(len(temps["sg_val_var"]))
word_intro = temps["sg_val_var"][i]
i = randrange(len(temps["sg_val"]))
tmp += str_format(tmp,temps["sg_val"][i].format(word_intro,cat),False)
return tmp
def intro_with_timespan(timespan):
ind = randrange(len(temps["timespan"]))
return temps["timespan"][ind].format(timespan["begin"],timespan["end"])
'''----------------------------------------------
Writes body when there isnt a supergroup.
----------------------------------------------'''
def write_body_no_sg(data,text=''):
tmp = ""
for key in data["free_vars"]:
free_var = data["free_vars"][key]
if(key == list(data["free_vars"].keys())[0]):
tmp += temps["free_vars"].format(key, free_var["mean"],free_var["std"])
else:
tmp += str_format(text,temps["free_vars"].
format(key, free_var["mean"],free_var["std"]),True)
tmp += str_format(tmp,add_maxmin(free_var,"max",data["ner_classes"],data),False)
tmp += str_format(tmp,add_conjunction(text),False)
tmp += str_format(tmp,add_maxmin(free_var,"min",data["ner_classes"],data),False)
return tmp
'''----------------------------------------------
Writes body when there is a supergroup.
----------------------------------------------'''
def write_body_sg(data,R,df):
tmp = ''
for sg_val in list(data["super_group"]["vals"].keys()):
tmp2 = ''
sg_val_keys = list(data["super_group"]["vals"][sg_val].keys())
intro_temp = add_sg_val_intro(sg_val,tmp,data["super_group"]["name"])
tmp2 += intro_temp
#For each category in a supergroup value
for cat_ind in range(0,len(sg_val_keys)):
repeated = True
cat = sg_val_keys[cat_ind]
while repeated:
recent_temp = add_sg_cat(data,'',cat,cat_ind)
repeated = repeated_words(intro_temp,recent_temp)
tmp2 += recent_temp
mean = data["super_group"]["vals"][sg_val][cat]["mean"]
std = data["super_group"]["vals"][sg_val][cat]["std"]
ind = randrange(len(temps["sg_val_mean_std"]))
tmp2 += str_format(tmp2,temps["sg_val_mean_std"][ind].format(mean,std),False)
tmp2 += str_format(tmp2,add_maxmin(data["super_group"]["vals"][sg_val][cat],"max",data["ner_classes"],data),False)
tmp2 += str_format(tmp2,add_conjunction(tmp),True)
tmp2 += str_format(tmp2,add_maxmin(data["super_group"]["vals"][sg_val][cat],"min",data["ner_classes"],data),False)
tmp2 += end_sentence()
table = df.loc[df[data["super_group"]["name"]].str.capitalize() == sg_val].to_html(index = False, justify = 'center')
R.hyperlinks[sg_val] = [f'<section id="{sg_val}">{tmp2}', table.replace('\n', ''),"</section>"]
def mean_max_min(tipo, val, key, condicao):
tmp = ""
maximo = max(val[key].values())
super_max = hyperlink(max(val[key].items(), key=operator.itemgetter(1))[0])
minimo = min(val[key].values())
super_min = hyperlink(min(val[key].items(), key=operator.itemgetter(1))[0])
i = randrange(len(temps["sg_val_col"]))
if(condicao):
tmp = f"{tmp} {temps['sg_val_col'][i]} {key}"
i = randrange(len(temps["super_group_sub_intro"]))
ind_val = randrange(len(temps[tipo]))
if(maximo != minimo):
if(condicao):
tmp += str_format(tmp,temps["global_val"].format(temps["super_group_sub_intro"][i],temps[tipo][ind_val],minimo,maximo),False)
else:
tmp += temps["global_val"].format("",temps[tipo][ind_val],minimo,maximo)
tmp += end_sentence()
i = randrange(len(temps["max"]))
indice = randrange(len(temps["min"]))
tmp += temps["supergroup_minmax_global"].format(temps["min"][indice],super_min,temps["max"][i],super_max)
tmp += end_sentence()
else:
i = randrange(len(temps["super_group_sub_intro"]))
if(condicao):
tmp += str_format(tmp,temps["same_global_val"].format(temps["super_group_sub_intro"][i],temps[tipo][ind_val],maximo),False)
else:
tmp += temps["same_global_val"].format("",temps[tipo][ind_val],maximo)
tmp += end_sentence()
return tmp
``` |
{
"source": "JoaoTimm/depot-1",
"score": 3
} |
#### File: depot/fields/upload.py
```python
from depot.manager import DepotManager
from .interfaces import DepotFileInfo
import json
class UploadedFile(DepotFileInfo):
"""Simple :class:`depot.fields.interfaces.DepotFileInfo` implementation that stores files.
Takes a file as content and uploads it to the depot while saving around
most file information. Pay attention that if the file gets replaced
through depot manually the ``UploadedFile`` will continue to have the old data.
Also provides support for encoding/decoding using JSON for storage inside
databases as a plain string.
Default attributes provided for all ``UploadedFile`` include:
- filename - This is the name of the uploaded file
- file_id - This is the ID of the uploaded file
- path - This is a depot_name/file_id path which can
be used with :meth:`DepotManager.get_file` to retrieve the file
- content_type - This is the content type of the uploaded file
- uploaded_at - This is the upload date in YYYY-MM-DD HH:MM:SS format
- url - Public url of the uploaded file
- file - The :class:`depot.io.interfaces.StoredFile` instance of the stored file
"""
def process_content(self, content, filename=None, content_type=None):
"""Standard implementation of :meth:`.DepotFileInfo.process_content`
This is the standard depot implementation of files upload, it will
store the file on the default depot and will provide the standard
attributes.
Subclasses will need to call this method to ensure the standard
set of attributes is provided.
"""
file_path, file_id = self.store_content(content, filename, content_type)
self['file_id'] = file_id
self['path'] = file_path
saved_file = self.file
self['filename'] = saved_file.filename
self['content_type'] = saved_file.content_type
self['uploaded_at'] = saved_file.last_modified.strftime('%Y-%m-%d %H:%M:%S')
self['_public_url'] = saved_file.public_url
def store_content(self, content, filename=None, content_type=None):
file_id = self.depot.create(content, filename, content_type)
file_path = '%s/%s' % (self.depot_name, file_id)
self.files.append(file_path)
return file_path, file_id
def encode(self):
return json.dumps(self)
@classmethod
def decode(cls, data):
return cls(json.loads(data))
@property
def url(self):
public_url = self['_public_url']
if public_url:
return public_url
return DepotManager.get_middleware().url_for(self['path'])
@property
def depot(self):
return DepotManager.get(self.depot_name)
@property
def file(self):
return self.depot.get(self.file_id)
``` |
{
"source": "JoaoTimm/MOZEN",
"score": 2
} |
#### File: MOZEN/account/routes.py
```python
import os
import secrets
from PIL import Image
from flask import Blueprint, render_template, url_for, request, redirect, flash, session
from flask_login import login_required, current_user
from htmlmin.minify import html_minify
from account.forms import UpdateAccountForm
from app import db, app, current_user_image_file
from blog.routes import search_form
from models import User, Post
account = Blueprint('account', __name__, template_folder='templates')
@account.route("/", methods=['GET', 'POST'])
def home():
rendered_html = render_template('account/index.html')
return html_minify(rendered_html)
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/profile_pics', picture_fn)
output_size = (125, 125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@account.route("/update", methods=['GET', 'POST'])
@login_required
def update_account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.image_file.data:
image_file = save_picture(form.image_file.data)
current_user.image_file = image_file
current_user.username = form.username.data
current_user.email = form.email.data
current_user.git_username = form.git_username.data
db.session.commit()
flash('Your account has been updated!', 'success')
return redirect(url_for('account.update_account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
form.git_username.data = current_user.git_username
# Private image path
return render_template('account/account.html',
title='Account',
image_file=current_user_image_file(),
input_search_form=search_form(),
form=form)
@account.route("/profile/<user>", methods=['GET', 'POST'])
def profile(user):
session['url'] = '/account/profile/' + user
# print(session['url'])
user = User.query.filter_by(username=user).first_or_404()
# Public image path
posts = Post.query.filter_by(author=user).order_by(Post.date_posted.desc())
if current_user.is_authenticated:
return render_template('account/profile.html',
title='Account',
user=user,
image_file=current_user_image_file(),
input_search_form=search_form(),
posts=posts)
return render_template('account/profile.html',
title='Account',
user=user,
posts=posts,
input_search_form=search_form()
)
``` |
{
"source": "joaoTrevizoli/flask-mongoengine",
"score": 3
} |
#### File: flask-mongoengine/flask_mongoengine/json.py
```python
from bson import json_util
from flask.json import JSONEncoder
from mongoengine.base import BaseDocument
from mongoengine.queryset import QuerySet
def _make_encoder(superclass):
class MongoEngineJSONEncoder(superclass):
"""
A JSONEncoder which provides serialization of MongoEngine
documents and queryset objects.
"""
def default(self, obj):
if isinstance(obj, BaseDocument):
return json_util._json_convert(obj.to_mongo())
elif isinstance(obj, QuerySet):
return json_util._json_convert(obj.as_pymongo())
return superclass.default(self, obj)
return MongoEngineJSONEncoder
MongoEngineJSONEncoder = _make_encoder(JSONEncoder)
def override_json_encoder(app):
"""
A function to dynamically create a new MongoEngineJSONEncoder class
based upon a custom base class.
This function allows us to combine MongoEngine serialization with
any changes to Flask's JSONEncoder which a user may have made
prior to calling init_app.
NOTE: This does not cover situations where users override
an instance's json_encoder after calling init_app.
"""
app.json_encoder = _make_encoder(app.json_encoder)
```
#### File: flask-mongoengine/flask_mongoengine/sessions.py
```python
import datetime
import sys
import uuid
from bson.tz_util import utc
from flask.sessions import SessionInterface, SessionMixin
from werkzeug.datastructures import CallbackDict
__all__ = ("MongoEngineSession", "MongoEngineSessionInterface")
if sys.version_info >= (3, 0):
basestring = str
class MongoEngineSession(CallbackDict, SessionMixin):
def __init__(self, initial=None, sid=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.sid = sid
self.modified = False
class MongoEngineSessionInterface(SessionInterface):
"""SessionInterface for mongoengine"""
def __init__(self, db, collection="session"):
"""
The MongoSessionInterface
:param db: The app's db eg: MongoEngine()
:param collection: The session collection name defaults to "session"
"""
if not isinstance(collection, basestring):
raise ValueError("collection argument should be string or unicode")
class DBSession(db.Document):
sid = db.StringField(primary_key=True)
data = db.DictField()
expiration = db.DateTimeField()
meta = {
"allow_inheritance": False,
"collection": collection,
"indexes": [
{
"fields": ["expiration"],
"expireAfterSeconds": 60 * 60 * 24 * 7 * 31,
}
],
}
self.cls = DBSession
def get_expiration_time(self, app, session):
if session.permanent:
return app.permanent_session_lifetime
if "SESSION_TTL" in app.config:
return datetime.timedelta(**app.config["SESSION_TTL"])
return datetime.timedelta(days=1)
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if sid:
stored_session = self.cls.objects(sid=sid).first()
if stored_session:
expiration = stored_session.expiration
if not expiration.tzinfo:
expiration = expiration.replace(tzinfo=utc)
if expiration > datetime.datetime.utcnow().replace(tzinfo=utc):
return MongoEngineSession(
initial=stored_session.data, sid=stored_session.sid
)
return MongoEngineSession(sid=str(uuid.uuid4()))
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
httponly = self.get_cookie_httponly(app)
if not session:
if session.modified:
response.delete_cookie(app.session_cookie_name, domain=domain)
return
expiration = datetime.datetime.utcnow().replace(
tzinfo=utc
) + self.get_expiration_time(app, session)
if session.modified:
self.cls(sid=session.sid, data=session, expiration=expiration).save()
response.set_cookie(
app.session_cookie_name,
session.sid,
expires=expiration,
httponly=httponly,
domain=domain,
)
```
#### File: flask-mongoengine/tests/test_connection.py
```python
import mongoengine
import pymongo
import pytest
from mongoengine.connection import ConnectionFailure
from mongoengine.context_managers import switch_db
from pymongo.database import Database
from pymongo.errors import InvalidURI
from pymongo.mongo_client import MongoClient
from pymongo.read_preferences import ReadPreference
from flask_mongoengine import MongoEngine, current_mongoengine_instance
def test_connection__should_use_defaults__if_no_settings_provided(app):
"""Make sure a simple connection to a standalone MongoDB works."""
db = MongoEngine()
# Verify no extension for Mongoengine yet created for app
assert app.extensions == {}
assert current_mongoengine_instance() is None
# Create db connection. Should return None.
assert db.init_app(app) is None
# Verify db added to Flask extensions.
assert current_mongoengine_instance() == db
# Verify db settings passed to pymongo driver.
# Default mongoengine db is 'default', default Flask-Mongoengine db is 'test'.
connection = mongoengine.get_connection()
mongo_engine_db = mongoengine.get_db()
assert isinstance(mongo_engine_db, Database)
assert isinstance(connection, MongoClient)
assert mongo_engine_db.name == "test"
assert connection.HOST == "localhost"
assert connection.PORT == 27017
@pytest.mark.parametrize(
("config_extension"),
[
{
"MONGODB_SETTINGS": {
"ALIAS": "simple_conn",
"HOST": "localhost",
"PORT": 27017,
"DB": "flask_mongoengine_test_db",
}
},
{
"MONGODB_HOST": "localhost",
"MONGODB_PORT": 27017,
"MONGODB_DB": "flask_mongoengine_test_db",
"MONGODB_ALIAS": "simple_conn",
},
],
ids=("Dict format", "Config variable format"),
)
def test_connection__should_pass_alias__if_provided(app, config_extension):
"""Make sure a simple connection pass ALIAS setting variable."""
db = MongoEngine()
app.config.update(config_extension)
# Verify no extension for Mongoengine yet created for app
assert app.extensions == {}
assert current_mongoengine_instance() is None
# Create db connection. Should return None.
assert db.init_app(app) is None
# Verify db added to Flask extensions.
assert current_mongoengine_instance() == db
# Verify db settings passed to pymongo driver.
# ALIAS is used to find correct connection.
# As we do not use default alias, default call to mongoengine.get_connection
# should raise.
with pytest.raises(ConnectionFailure):
mongoengine.get_connection()
connection = mongoengine.get_connection("simple_conn")
mongo_engine_db = mongoengine.get_db("simple_conn")
assert isinstance(mongo_engine_db, Database)
assert isinstance(connection, MongoClient)
assert mongo_engine_db.name == "flask_mongoengine_test_db"
assert connection.HOST == "localhost"
assert connection.PORT == 27017
@pytest.mark.parametrize(
("config_extension"),
[
{
"MONGODB_SETTINGS": {
"HOST": "mongodb://localhost:27017/flask_mongoengine_test_db"
}
},
{
"MONGODB_HOST": "mongodb://localhost:27017/flask_mongoengine_test_db",
"MONGODB_PORT": 27017,
"MONGODB_DB": "should_ignore_it",
},
],
ids=("Dict format", "Config variable format"),
)
def test_connection__should_parse_host_uri__if_host_formatted_as_uri(
app, config_extension
):
"""Make sure a simple connection pass ALIAS setting variable."""
db = MongoEngine()
app.config.update(config_extension)
# Verify no extension for Mongoengine yet created for app
assert app.extensions == {}
assert current_mongoengine_instance() is None
# Create db connection. Should return None.
assert db.init_app(app) is None
# Verify db added to Flask extensions.
assert current_mongoengine_instance() == db
connection = mongoengine.get_connection()
mongo_engine_db = mongoengine.get_db()
assert isinstance(mongo_engine_db, Database)
assert isinstance(connection, MongoClient)
assert mongo_engine_db.name == "flask_mongoengine_test_db"
assert connection.HOST == "localhost"
assert connection.PORT == 27017
@pytest.mark.parametrize(
("config_extension"),
[
{
"MONGODB_SETTINGS": {
"HOST": "mongomock://localhost:27017/flask_mongoengine_test_db"
}
},
{
"MONGODB_SETTINGS": {
"ALIAS": "simple_conn",
"HOST": "localhost",
"PORT": 27017,
"DB": "flask_mongoengine_test_db",
"IS_MOCK": True,
}
},
{"MONGODB_HOST": "mongomock://localhost:27017/flask_mongoengine_test_db"},
],
ids=("Dict format as URI", "Dict format as Param", "Config variable format as URI"),
)
def test_connection__should_parse_mongo_mock_uri__as_uri_and_as_settings(
app, config_extension
):
"""Make sure a simple connection pass ALIAS setting variable."""
db = MongoEngine()
app.config.update(config_extension)
# Verify no extension for Mongoengine yet created for app
assert app.extensions == {}
assert current_mongoengine_instance() is None
# Create db connection. Should return None.
with pytest.raises(RuntimeError) as error:
assert db.init_app(app) is None
assert str(error.value) == "You need mongomock installed to mock MongoEngine."
@pytest.mark.parametrize(
("config_extension"),
[
{
"MONGODB_SETTINGS": {
"HOST": "postgre://localhost:27017/flask_mongoengine_test_db"
}
},
{"MONGODB_HOST": "mysql://localhost:27017/flask_mongoengine_test_db"},
],
ids=("Dict format as URI", "Config variable format as URI"),
)
def test_connection__should_raise__if_uri_not_properly_formatted(app, config_extension):
"""Make sure a simple connection pass ALIAS setting variable."""
db = MongoEngine()
app.config.update(config_extension)
# Verify no extension for Mongoengine yet created for app
assert app.extensions == {}
assert current_mongoengine_instance() is None
# Create db connection. Should return None.
with pytest.raises(InvalidURI) as error:
assert db.init_app(app) is None
assert (
str(error.value)
== "Invalid URI scheme: URI must begin with 'mongodb://' or 'mongodb+srv://'"
)
def test_connection__should_accept_host_as_list(app):
"""Make sure MONGODB_HOST can be a list hosts."""
db = MongoEngine()
app.config["MONGODB_SETTINGS"] = {
"ALIAS": "host_list",
"HOST": ["localhost:27017"],
"DB": "flask_mongoengine_list_test_db",
}
db.init_app(app)
connection = mongoengine.get_connection("host_list")
mongo_engine_db = mongoengine.get_db("host_list")
assert isinstance(mongo_engine_db, Database)
assert isinstance(connection, MongoClient)
assert mongo_engine_db.name == "flask_mongoengine_list_test_db"
assert connection.HOST == "localhost"
assert connection.PORT == 27017
def test_multiple_connections(app):
"""Make sure establishing multiple connections to a standalone
MongoDB and switching between them works.
"""
db = MongoEngine()
app.config["MONGODB_SETTINGS"] = [
{
"ALIAS": "default",
"DB": "flask_mongoengine_test_db_1",
"HOST": "localhost",
"PORT": 27017,
},
{
"ALIAS": "alternative",
"DB": "flask_mongoengine_test_db_2",
"HOST": "localhost",
"PORT": 27017,
},
]
class Todo(db.Document):
title = db.StringField(max_length=60)
db.init_app(app)
# Drop default collection from init
Todo.drop_collection()
Todo.meta = {"db_alias": "alternative"}
# Drop 'alternative' collection initiated early.
Todo.drop_collection()
# Make sure init correct and both databases are clean
with switch_db(Todo, "default") as Todo:
doc = Todo.objects().first()
assert doc is None
with switch_db(Todo, "alternative") as Todo:
doc = Todo.objects().first()
assert doc is None
# Test saving a doc via the default connection
with switch_db(Todo, "default") as Todo:
todo = Todo()
todo.text = "Sample"
todo.title = "Testing"
todo.done = True
s_todo = todo.save()
f_to = Todo.objects().first()
assert s_todo.title == f_to.title
# Make sure the doc still doesn't exist in the alternative db
with switch_db(Todo, "alternative") as Todo:
doc = Todo.objects().first()
assert doc is None
# Make sure switching back to the default connection shows the doc
with switch_db(Todo, "default") as Todo:
doc = Todo.objects().first()
assert doc is not None
def test_ingnored_mongodb_prefix_config(app):
"""Config starting by MONGODB_ but not used by flask-mongoengine
should be ignored.
"""
db = MongoEngine()
app.config[
"MONGODB_HOST"
] = "mongodb://localhost:27017/flask_mongoengine_test_db_prod"
# Invalid host, should trigger exception if used
app.config["MONGODB_TEST_HOST"] = "dummy://localhost:27017/test"
db.init_app(app)
connection = mongoengine.get_connection()
mongo_engine_db = mongoengine.get_db()
assert isinstance(mongo_engine_db, Database)
assert isinstance(connection, MongoClient)
assert mongo_engine_db.name == "flask_mongoengine_test_db_prod"
assert connection.HOST == "localhost"
assert connection.PORT == 27017
def test_connection_kwargs(app):
"""Make sure additional connection kwargs work."""
# Figure out whether to use "MAX_POOL_SIZE" or "MAXPOOLSIZE" based
# on PyMongo version (former was changed to the latter as described
# in https://jira.mongodb.org/browse/PYTHON-854)
# TODO remove once PyMongo < 3.0 support is dropped
if pymongo.version_tuple[0] >= 3:
MAX_POOL_SIZE_KEY = "MAXPOOLSIZE"
else:
MAX_POOL_SIZE_KEY = "MAX_POOL_SIZE"
app.config["MONGODB_SETTINGS"] = {
"ALIAS": "tz_aware_true",
"DB": "flask_mongoengine_testing_tz_aware",
"TZ_AWARE": True,
"READ_PREFERENCE": ReadPreference.SECONDARY,
MAX_POOL_SIZE_KEY: 10,
}
db = MongoEngine(app)
assert db.connection.codec_options.tz_aware
assert db.connection.max_pool_size == 10
assert db.connection.read_preference == ReadPreference.SECONDARY
``` |
{
"source": "joaoubaldo/cli",
"score": 2
} |
#### File: terraform_compliance/extensions/terraform.py
```python
import json
from terraform_compliance.common.helper import seek_key_in_dict, flatten_list, Match, merge_dicts, remove_constant_values
import sys
from copy import deepcopy
from radish.utils import console_write
from terraform_compliance.common.defaults import Defaults
from terraform_compliance.extensions.cache import Cache
from terraform_compliance.common.helper import recursive_jsonify, strip_iterations, get_most_child_module
class TerraformParser(object):
def __init__(self, filename, parse_it=True):
'''
This class reads the given terraform plan filename ( in json format ) and assigns required variables for
further steps in terraform-compliance. If the file is not a json or terraform plan file, then it will be
checked and exited in prior steps.
:param filename: terraform plan filename in json format.
:parse_it: Runs self.parse() if given.
:return: None
'''
self.supported_terraform_versions = (
'0.12.',
'0.13.',
'0.14.',
'0.15.',
'1.0.',
)
self.supported_format_versions = [
'0.1',
'0.2'
]
self.raw = self._read_file(filename)
self.variables = None
self.resources = {}
self.data = {}
self.providers = {}
self.configuration = dict(resources={}, variables={})
self.file_type = "plan"
self.resources_raw = {}
self.type_to_after_unknown_properties = {}
self.parse_it = parse_it
if parse_it:
self.cache = Cache()
self.parse()
def _version_check(self):
if self.raw['format_version'] not in self.supported_format_versions:
print('\nFATAL ERROR: Unsupported terraform plan output format version '
'({}).\n'.format(self.raw['format_version']))
sys.exit(1)
if not self.raw['terraform_version'].startswith(self.supported_terraform_versions):
print('\nFATAL ERROR: Unsupported terraform version '
'({}).\n'.format(self.raw['terraform_version']))
sys.exit(1)
return True
def _identify_data_file(self):
if 'values' in self.raw:
self.file_type = 'state'
def _read_file(self, filename):
'''
Reads the json filename as a dictionary. We are not checking if the file is a json file again, since
it is already checked in main.py
:param filename: json filename with full path
:return: parsed dictionary
'''
with open(filename, 'r', encoding='utf-8') as plan_file:
data = json.load(plan_file)
return data
def _parse_variables(self):
'''
Assignes all variables that is defined within the terraform plan
:return: none
'''
self.variables = self.raw.get('variables', {})
def _parse_resources(self):
'''
Assigns all resources defined in the terraform plan
:return: none
'''
# Read Cache
if self.parse_it:
cache = self.cache.get('resources')
if cache:
self.resources = cache
return
# Resources ( exists in Plan )
for findings in seek_key_in_dict(self.raw.get('planned_values', {}).get('root_module', {}), 'resources'):
for resource in findings.get('resources', []):
if self.is_type(resource, 'data'):
self.data[resource['address']] = resource
else:
self.resources[resource['address']] = resource
# Resources ( exists in State )
for findings in seek_key_in_dict(self.raw.get('values', {}).get('root_module', {}), 'resources'):
for resource in findings.get('resources', []):
if self.is_type(resource, 'data'):
self.data[resource['address']] = resource
else:
self.resources[resource['address']] = resource
# Resources ( exists in Prior State )
for findings in seek_key_in_dict(self.raw.get('prior_state', {}).get('values', {}).get('root_module', {}).get('resources', {}), 'resource'):
for resource in findings.get('resources', []):
if self.is_type(resource, 'data'):
self.data[resource['address']] = resource
else:
self.resources[resource['address']] = resource
# Child Modules Resources ( exists in State )
for findings in seek_key_in_dict(self.raw.get('values', {}).get('root_module', {}), 'child_modules'):
for resource in findings.get('resources', []):
if self.is_type(resource, 'data'):
self.data[resource['address']] = resource
else:
self.resources[resource['address']] = resource
# Resource Changes ( exists in Plan )
for finding in self.raw.get('resource_changes', {}):
resource = deepcopy(finding)
change = resource.get('change', {})
actions = change.get('actions', [])
if actions != ['delete']:
resource['values'] = change.get('after', {}) # dict_merge(change.get('after', {}), change.get('after_unknown', {}))
self.remember_after_unknown(resource, change.get('after_unknown', {}))
if 'change' in resource:
del resource['change']
if self.is_type(resource, 'data'):
self.data[resource['address']] = resource
else:
self.resources[resource['address']] = resource
if self.parse_it:
self.cache.set('resources', self.resources)
def remember_after_unknown(self, resource, after_unknown):
'''
Creates a map of resource type to after_unknown values
These can be used in given step "resource that supports x"
Note: This function may be extended to capture 'after' values as well. That would require flattening the multi level
dictionaries in resource
'''
# get type
resource_type = resource.get('type', '')
# if resource doesn't have type field, try to extract it from address (ideally, this if never evaluates true)
if not resource_type and 'address' in resource and resource['address']:
parsed_address = resource.get('address').split('.')
if parsed_address != 'module':
resource_type = parsed_address[0]
elif len(parsed_address) >= 3:
resource_type = parsed_address[2]
else:
return
# get after_unknown values
# need to merge because which values are in after_unknown may change from instance to instance
# merging rule: if field not in map, add it
if resource_type not in self.type_to_after_unknown_properties:
self.type_to_after_unknown_properties[resource_type] = after_unknown
else:
for key, value in after_unknown.items():
if key not in self.type_to_after_unknown_properties[resource_type]:
self.type_to_after_unknown_properties[resource_type][key] = value
def _parse_configurations(self):
'''
Assigns all configuration related data defined in the terraform plan. This is mostly used for
resources referencing each other.
:return: none
'''
# Read Cache
if self.parse_it:
cache = self.cache.get('configuration')
if cache:
self.configuration = cache
return
# Resources
self.configuration['resources'] = {}
# root resources
resources = self.raw.get('configuration', {}).get('root_module', {}).get('resources', [])
# Append module resources
resources.extend(self.process_module_calls(self.raw.get('configuration', {}).get('root_module', {}).get("module_calls", {})))
remove_constant_values(resources)
for resource in resources:
if self.is_type(resource, 'data'):
self.data[resource['address']] = resource
else:
self.configuration['resources'][resource['address']] = resource
# Variables
self.configuration['variables'] = {}
for findings in seek_key_in_dict(self.raw.get('configuration', {}).get('root_module', {}), 'variables'):
self.configuration['variables'] = findings.get('variables')
# Providers
self.configuration['providers'] = {}
for findings in seek_key_in_dict(self.raw.get('configuration', {}), 'provider_config'):
self.configuration['providers'] = findings.get('provider_config', {})
# Outputs
self.configuration['outputs'] = {}
for findings in seek_key_in_dict(self.raw.get('configuration', {}), 'outputs'):
for key, value in findings.get('outputs', {}).items():
tmp_output = dict(address=key, value={})
if 'expression' in value:
if 'references' in value['expression']:
tmp_output['value'] = value['expression']['references']
tmp_output['type'] = 'object'
elif 'constant_value' in value['expression']:
tmp_output['value'] = value['expression']['constant_value']
if 'sensitive' in value:
tmp_output['sensitive'] = str(value['sensitive']).lower()
else:
tmp_output['sensitive'] = 'false'
if 'type' in value:
tmp_output['type'] = value['type']
elif 'type' not in tmp_output:
if isinstance(tmp_output['value'], list):
tmp_output['type'] = 'list'
elif isinstance(tmp_output['value'], dict):
tmp_output['type'] = 'map'
elif isinstance(tmp_output['value'], str):
tmp_output['type'] = 'string'
elif isinstance(tmp_output['value'], int):
tmp_output['type'] = 'integer'
elif isinstance(tmp_output['value'], bool):
tmp_output['type'] = 'boolean'
self.configuration['outputs'][key] = tmp_output
if self.parse_it:
self.cache.set('configuration', self.configuration)
def _mount_resources(self, source, target, ref_type):
'''
Mounts values of the source resource to the target resource's values with ref_type key
:param source: source resource
:param target: target resource
:param ref_type: reference type (e.g. ingress )
:return: none
'''
for source_resource in source:
if 'values' not in self.resources.get(source_resource, {}):
continue
for parameter, target_resources in target.items():
for target_resource in target_resources:
if target_resource not in self.resources or 'values' not in self.resources[target_resource]:
continue
resource = self.resources_raw[source_resource]['values']
# This is a very stupid terraform-provider bug. Somehow, sometimes it loses the state
# and sets the value to None - which is normally not allowed.. It should have been an empty
# dict instead. Hence, we are fixing that here.
if resource is None:
defaults = Defaults()
console_write('{} {}: {}'.format(defaults.warning_icon,
defaults.warning_colour('WARNING (mounting)'),
defaults.info_colour('The resource "{}" has no values set. This is a terraform provider '
'bug. Its recommended to remove/fix this resource within your state.'.format(source_resource))))
self.resources_raw[source_resource]['values'] = {}
self.resources[source_resource]['values'] = {}
resource = {}
resource[Defaults.mounted_ptr] = True
if Defaults.r_mount_ptr not in self.resources[target_resource]:
self.resources[target_resource][Defaults.r_mount_ptr] = {}
if Defaults.r_mount_addr_ptr not in self.resources[target_resource]:
self.resources[target_resource][Defaults.r_mount_addr_ptr] = {}
if Defaults.r_mount_addr_ptr_list not in self.resources[target_resource]:
self.resources[target_resource][Defaults.r_mount_addr_ptr_list] = []
# ensure resources[target_resource]['values'] is an
# empty dict and not None
if not self.resources[target_resource]['values']:
self.resources[target_resource]['values'] = dict()
if ref_type not in self.resources[target_resource]['values']:
self.resources[target_resource]['values'][ref_type] = []
self.resources[target_resource]['values'][ref_type].append(resource)
self.resources[target_resource][Defaults.r_mount_ptr][parameter] = ref_type
self.resources[target_resource][Defaults.r_mount_addr_ptr][parameter] = source
target_set = set(self.resources[target_resource][Defaults.r_mount_addr_ptr_list])
source_set = set(source)
self.resources[target_resource][Defaults.r_mount_addr_ptr_list] = list(target_set | source_set)
if parameter not in self.resources[source_resource]['values']:
self.resources[source_resource]['values'][parameter] = target_resource
def _find_resource_from_name(self, resource_name, module_address=None):
'''
Finds all the resources that is starting with resource_name
:param resource_name: The first initials of the resource
:param module_address: Full module address (without the resource)
:return: list of the found resources
'''
if resource_name in self.resources:
return [resource_name]
resource_list = []
# Try to find the resource with the module address in self.resources
if module_address is not None:
full_address = '{}.{}'.format(module_address, resource_name)
if full_address in self.resources:
return [full_address]
for key, value in self.resources.items():
if not key.startswith(module_address):
continue
# Check if the resource/module has iterations
if '[' in key:
# Possibly module (or resource) is using foreach/count
k = strip_iterations(key)
if k == strip_iterations(full_address):
resource_list.append(key)
else:
# Resource/module does not have any iteration
# Additionally, the resource we are looking under this module is coming from another
# module output. Thus, we need to dive a bit deeper.
if resource_name.startswith('module') and key.startswith('module'):
resource_list.append(key)
if resource_list:
return resource_list
resource_type, resource_id = resource_name.split('.')[0:2]
if resource_type == 'module':
# TODO: This wont work correctly, if an output is used within a module, coming from another module.
# Fix multi-layer module structure for the outputs ?
module_name, output_id = resource_name.split('.')[1:3]
module = self.raw['configuration']['root_module'].get('module_calls', {}).get(module_name, {})
output_value = module.get('module', {}).get('outputs', {}).get(output_id, {})
resources = output_value.get('expression', {}).get('references', []) if 'expression' in output_value else output_value.get('value', [])
resources = ['{}.{}.{}'.format(resource_type, module_name, res) for res in resources]
if not resources:
for key, _ in self.resources.items():
if key.startswith(resource_name):
resources.append(key)
if resources:
resource_list.extend(resources)
else:
for key, value in self.resources.items():
if value['type'] == resource_type and value['name'] == resource_id:
resource_list.append(key)
return resource_list
def _mount_references(self):
'''
Find the references that is defined in self.configuration
:return:
'''
self.resources_raw = deepcopy(self.resources)
invalid_references = ('var.', 'each.', 'count.')
# This section will link resources found in configuration part of the plan output.
# The reference should be on both ways (A->B, B->A) since terraform sometimes report these references
# in opposite ways, depending on the provider structure.
for resource in self.configuration['resources']:
relative_resource_address = '{}.{}'.format(self.configuration['resources'][resource]['type'], self.configuration['resources'][resource]['name'])
current_module_address = self.configuration['resources'][resource]['address'].replace('.{}'.format(relative_resource_address), '')
if 'expressions' in self.configuration['resources'][resource]:
ref_list = {}
for key, value in self.configuration['resources'][resource]['expressions'].items():
references = seek_key_in_dict(value, 'references') if isinstance(value, (dict, list)) else []
valid_references = []
for ref in references:
if isinstance(ref, dict) and ref.get('references'):
valid_references = []
for r in ref['references']:
if r.startswith('var'):
# Try to track the resource given by a variable
_var = self._fetch_resource_by_a_variable(current_module_address, r)
if _var:
valid_references.extend(_var)
if not r.startswith(invalid_references):
valid_references.append(r)
for ref in valid_references:
# if ref is not in the correct format, handle it
if len(ref.split('.')) < 3 and ref.startswith('module'):
# Using for_each and modules together may introduce an issue where the plan.out.json won't
# include the necessary third part of the reference. It is partially resolved by mounting
# the reference to all instances belonging to the module
if 'for_each_expression' in self.configuration['resources'][resource]:
# extract source resources
assumed_source_resources = [k for k in self.resources.keys() if k.startswith(resource)]
# extract for_each keys
assumed_for_each_keys = [k[len(resource):].split('.')[0] for k in assumed_source_resources]
# combine ref with for each keys
assumed_refs = ['{}{}'.format(ref, key) for key in assumed_for_each_keys]
# get all the resources that start with updated ref
ambiguous_references = []
for r in self.resources.keys():
for assumed_ref in assumed_refs:
if r.startswith(assumed_ref):
if key in ref_list:
ref_list[key].append(r)
else:
ref_list[key] = [r]
ambiguous_references.append(r)
# throw a warning
defaults = Defaults()
console_write('{} {}: {}'.format(defaults.warning_icon,
defaults.warning_colour('WARNING (mounting)'),
defaults.info_colour('The reference "{}" in resource {} is ambigious.'
' It will be mounted to the following resources:').format(ref, resource)))
for i, r in enumerate(ambiguous_references, 1):
console_write(defaults.info_colour('{}. {}'.format(i, r)))
# if the reference can not be resolved, warn the user and continue.
else:
console_write('{} {}: {}'.format(Defaults().warning_icon,
Defaults().warning_colour('WARNING (mounting)'),
Defaults().info_colour('The reference "{}" in resource {} is ambigious. It will not be mounted.'.format(ref, resource))))
continue
elif key not in ref_list:
ref_list[key] = self._find_resource_from_name(ref, current_module_address)
else:
ref_list[key].extend(self._find_resource_from_name(ref, current_module_address))
# This is where we synchronise constant_value in the configuration section with the resource
# for filling up the missing elements that hasn't been defined in the resource due to provider
# implementation.
target_resource = [t for t in [self.resources.get(resource, {}).get('address')] if t is not None]
if not target_resource:
target_resource = [k for k in self.resources.keys() if k.startswith(resource)]
for t_r in target_resource:
if self.resources[t_r].get('values') is None:
continue
if type(value) is type(self.resources[t_r]['values'].get(key)) and self.resources[t_r]['values'].get(key) != value:
if isinstance(value, (list, dict)):
merge_dicts(self.resources[t_r]['values'][key], value)
if ref_list:
ref_type = self.configuration['resources'][resource]['expressions'].get('type', {})
if 'references' in ref_type:
ref_type = resource.split('.')[0]
if not ref_type and not self.is_type(resource, 'data'):
ref_type = self.extract_resource_type_from_address(resource)
for k, v in ref_list.items():
v = flatten_list(v)
# Mounting A->B
source_resources = self._find_resource_from_name(self.configuration['resources'][resource]['address'])
# Try again in case we might have for_each/count usage for the module
if not source_resources:
source_resources = self._find_resource_from_name(relative_resource_address, current_module_address)
self._mount_resources(source=source_resources,
target=ref_list,
ref_type=ref_type)
# Mounting B->A
for parameter, target_resources in ref_list.items():
for target_resource in target_resources:
if not self.is_type(resource, 'data') and not self.is_type(resource, 'var') and not self.is_type(resource, 'provider'):
ref_type = self.extract_resource_type_from_address(target_resource)
self._mount_resources(source=[target_resource],
target={parameter: source_resources},
ref_type=ref_type)
def _distribute_providers(self):
for resource_name, resource_data in self.resources.items():
resource_provider = resource_name.split('_')[0]
if resource_provider not in self.providers:
self.providers[resource_provider] = {}
self.providers[resource_provider][resource_name] = resource_data
def parse(self):
'''
Main method for initialising the parsing of the terraform plan json file
:return: nothing
'''
self._version_check()
self._identify_data_file()
self._parse_resources()
if self.file_type == 'plan':
self._parse_variables()
self._parse_configurations()
cache_mounted_resources = self.cache.get('mounted_resources') if self.parse_it else None
cache_raw_resources = self.cache.get('resources_raw') if self.parse_it else None
cache_type_to_after_unknown_properties = self.cache.get('type_to_after_unknown_properties') if self.parse_it else None
if cache_mounted_resources and cache_raw_resources:
# print('Read from cache, instead of re-mounting.')
self.resources = cache_mounted_resources
self.resources_raw = cache_raw_resources
self.type_to_after_unknown_properties = cache_type_to_after_unknown_properties
else:
# print('Building cache for mounted resources at {}'.format(Defaults.cache_dir))
self._mount_references()
# metadata related calls
self._add_action_status()
self._add_module_call_source()
self.resources = recursive_jsonify(self.resources)
self.resources_raw = recursive_jsonify(self.resources_raw)
self.type_to_after_unknown_properties = recursive_jsonify(self.type_to_after_unknown_properties)
self.variables = recursive_jsonify(self.variables)
self.data = recursive_jsonify(self.data)
self.providers = recursive_jsonify(self.providers)
if self.parse_it:
self.cache.set('mounted_resources', self.resources)
self.cache.set('resources_raw', self.resources_raw)
self.cache.set('type_to_after_unknown_properties', self.type_to_after_unknown_properties)
self._distribute_providers()
for _, resource in self.resources.items():
self._expand_resource_tags(resource)
def _add_action_status(self):
'''
Adds Terraform's action status to each resource
'''
if 'resource_changes' not in self.raw:
return
for resource_change in self.raw['resource_changes']:
resource = resource_change['address']
if resource in self.resources:
self.resources[resource]['actions'] = resource_change['change']['actions']
def _add_module_call_source(self):
'''
Adds module call's source to module's resources as metadata
'''
for resource in self.resources.values():
# removes the for_each signature from addresses
# module.a["index_1"].b.c -> module.a.b.c
fixed_module_name = '.'.join([word.split('[')[0] for word in resource['address'].split('.')])
if 'source' in self.configuration['resources'].get(fixed_module_name, ''):
resource['source'] = self.configuration['resources'][fixed_module_name]['source']
def find_resources_by_type(self, resource_type, match=Match(case_sensitive=False)):
'''
Finds all resources matching with the resource_type
:param resource_type: String of resource type defined in terraform
:return: list of dict including resources
'''
resource_list = []
for resource_data in self.resources.values():
if resource_type == 'any' or (match.equals(resource_data['type'], resource_type) and resource_data['mode'] == 'managed'):
resource_list.append(resource_data)
return resource_list
def find_data_by_type(self, resource_type, match=Match(case_sensitive=False)):
'''
Finds all data matching with the resource_type
:param resource_type: String of resource type defined in terraform
:return: list of dict including resources
'''
resource_list = []
for resource_data in self.data.values():
if match.equals(resource_data['type'], resource_type):
resource_list.append(resource_data)
return resource_list
def get_providers_from_configuration(self, provider_type, match=Match(case_sensitive=False)):
'''
Returns all providers as a list for the given provider type
:param provider_type: String of a provider type like aws
:return: list of providers that has this type
'''
providers = []
for provider_alias, values in self.configuration['providers'].items():
if isinstance(values, dict) and match.equals(values.get('name'), provider_type):
providers.append(values)
return providers
def _expand_resource_tags(self, resource):
if isinstance((resource.get('values') or {}).get('tags'), list):
for tag in resource.get('values', {}).get('tags', {}):
if isinstance(tag, dict) and 'key' in tag and 'value' in tag:
tag[tag['key']] = tag['value']
return True
return False
def is_type(self, resource, mode):
if isinstance(resource, dict):
if 'mode' in resource:
return resource['mode'] == mode
return resource['address'].split('.')[0] == mode
return False
def process_module_calls(self, module_resource, parents_modules=None):
'''
This method will recursively process modules and extract resources from "module_resource" data
which is actually a data from self.configuration dict. We were returning the native resource name
before this method, but now we are returning proper address naming for the resource.
:param module_resource: The self.configuration part
:param parents_modules: internal usage for recursive functionality
:return: None
'''
if parents_modules is None:
parents_modules = []
resources = []
for k, v in module_resource.items():
# Set the naming correct (for cases like module.a.module.b.module.c...)
current_module_level = deepcopy(parents_modules)
current_module_level.append('module.{}'.format(k))
module_name = ".".join(current_module_level)
# Pull module's source to be later used in metadata
module_source = v.get('source', '')
# Register the resource (along with module naming)
if 'resources' in v.get('module', {}):
for resource in v['module']['resources']:
resource['address'] = '{}.{}'.format(module_name, resource['address'])
resource['source'] = module_source
resources.append(resource)
# Dive deeper, its not finished yet.
if 'module_calls' in v.get('module', {}):
resources.extend(self.process_module_calls(v['module']['module_calls'], current_module_level))
return resources
def extract_resource_type_from_address(self, resource_address_string):
'''
Tries to get the resource type from the resource address
:param resource_address_string: String of the whole resource address
:return: String of the resource type if found, otherwise will return full address
Example;
"aws_s3_bucket.test" will return "aws_s3_bucket"
"module_a.module_b.module_c.aws_s3_bucket.test" will return "aws_s3_bucket"
"something_else" will return "something_else"
'''
if '.' in resource_address_string:
octets = resource_address_string.split('.')
if len(octets) > 1:
# Return the type as we found it properly
return octets[-2]
else:
# Return the whole address
return octets[0]
# Returning the whole address
return resource_address_string
def _fetch_resource_by_a_variable(self, module, variable):
target_module = get_most_child_module(module)
stripped_variable = variable.replace('var.', '')
var = self.raw['configuration'].get('root_module', {}).get('module_calls', {}).get(target_module, {}).get('expressions', {}).get(stripped_variable, {}).get('references', {})
return var
``` |
{
"source": "joaovaleriano/EcoNets-QBio2021",
"score": 3
} |
#### File: joaovaleriano/EcoNets-QBio2021/bipartite_random_graph.py
```python
import numpy as np
import networkx as nx
from networkx.algorithms import bipartite
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
def gen_random_net(n0, n1, coop_freq0, coop_freq1, n_keep, seed=None):
# number_of_nodes: number of nodes in the network
# knn: number of nearest neighbors to connect
# rewire: probability of rewiring a connection
# coop_freq: cooperator frequency
# Create network
network = nx.complete_multipartite_graph(n0, n1)
# Array to store colormap indicating cooperators (blue) and defectors (red)
colormap = []
# Generate array with strategies for each node, randomly sorted
np.random.seed(seed)
strat0 = np.random.choice([0,1], n0, p=[coop_freq0, 1-coop_freq0])
strat1 = np.random.choice([0,1], n1, p=[coop_freq1, 1-coop_freq1])
# Loop over nodes
for i in range(n0):
if strat0[i] == 0: # Set node as a cooperator
network.nodes[i]["strat"] = 0
colormap.append("blue")
else: # Set node as a defector
network.nodes[i]["strat"] = 1
colormap.append("yellow")
# Initialize the fitness of each node
network.nodes[i]["fit"] = 0
# Set node positions
network.nodes[i]["pos"] = (0,-i)
for i in range(n0, n0+n1):
if strat1[i-n0] == 0: # Set node as a cooperator
network.nodes[i]["strat"] = 0
colormap.append("blue")
else: # Set node as a defector
network.nodes[i]["strat"] = 1
colormap.append("yellow")
# Initialize the fitness of each node
network.nodes[i]["fit"] = 0
# Set node positions
network.nodes[i]["pos"] = (1,n0-i)
save_edges = list(network.edges)
np.random.shuffle(save_edges)
network.remove_edges_from(list(network.edges))
B_nodes = [i for i in range(n0, n0+n1)]
for i in range(n0):
node = np.random.choice(B_nodes)
network.add_edge(i, node)
B_nodes.remove(node)
save_edges.remove((i,node))
network.add_edges_from(save_edges[:n_keep-n0])
# for i in range(n_remove):
# node = np.random.choice(len(network.edges))
# network.remove_edge(*list(network.edges)[node])
return network, colormap
# network, colormap = gen_random_net(10, 20, 0.5, 0.5, 90, None)
# nx.draw_networkx_nodes(network, nx.get_node_attributes(network, "pos"),
# node_color=colormap, node_size=100)
# nx.draw_networkx_edges(network, nx.get_node_attributes(network, "pos"))
# plt.show()
def plot_partition(network, colormap, n0, n1, p):
if p == 0:
G = bipartite.projected_graph(network, [i for i in range(n0)])
nx.draw(G, node_color=colormap[:n0], with_labels=True)
plt.title("A")
plt.show()
if p == 1:
G = bipartite.projected_graph(network, [i for i in range(n0, n0+n1)])
nx.draw(G, node_color=colormap[n0:n0+n1], with_labels=True)
plt.title("B")
plt.show()
# Calculate fitness matrix over the network
def calc_fit_mat(network, n0, n1, payoff_mat0, payoff_mat1):
# network: the network object from the NetworkX package
# payoff_mat: payoff matrix for the game
# Loop over nodes
for i in range(n0):
network.nodes[i]["fit"] = 0 # Set fitness to zero initially
# Sum the contribution of each neighbor for the fitness of the focal node
for nb in network.neighbors(i):
# print(network.nodes[nb]["strat"])
network.nodes[i]["fit"] += payoff_mat0[network.nodes[i]["strat"],
network.nodes[nb]["strat"]]
# print("\n")
# Loop over nodes
for i in range(n0, n0+n1):
network.nodes[i]["fit"] = 0 # Set fitness to zero initially
# Sum the contribution of each neighbor for the fitness of the focal node
for nb in network.neighbors(i):
network.nodes[i]["fit"] += payoff_mat1[network.nodes[i]["strat"],
network.nodes[nb]["strat"]]
# No need to return anything, we're just editing the "fit" attribute of the network
# Evolution of the network by a time step
def evolve_strats(network, colormap, n0, n1, payoff_mat0, payoff_mat1):
# network: the network object from the NetworkX package
# colormap: colors of the nodes indicating cooperators and defectors
# payoff_mat: payoff matrix for the game
past_network = nx.Graph.copy(network)
past_colormap = [i for i in colormap]
# Colors of nodes for cooperators (blue) and defectors (red)
colors = ["blue", "yellow"]
A = bipartite.projected_graph(network, [i for i in range(n0)])
B = bipartite.projected_graph(network, [i for i in range(n0, n0+n1)])
coopA = 0
coopB = 0
# Loop over A nodes
for i in range(n0):
# Initialize lists to save fitnesses of cooperator and defector neighbors
coop_nb_fit = [-1]
defec_nb_fit = [-1]
# Check if focal node is cooperator or defector and add its fitness to
# the corresponding list
if A.nodes[i]["strat"] == 0:
coop_nb_fit.append(A.nodes[i]["fit"])
else:
defec_nb_fit.append(A.nodes[i]["fit"])
# Loop over neighbors, adding their fitnesses to the appropriate lists
for nb in A.neighbors(i):
if A.nodes[nb]["strat"] == 0:
coop_nb_fit.append(A.nodes[nb]["fit"])
else:
defec_nb_fit.append(A.nodes[nb]["fit"])
# Check if cooperators or defectors neighbors have higher fitness and
# update the focal node's strategy
if max(coop_nb_fit) > max(defec_nb_fit):
network.nodes[i]["strat"] = 0
colormap[i] = colors[0]
coopA += 1
elif max(coop_nb_fit) < max(defec_nb_fit):
network.nodes[i]["strat"] = 1
colormap[i] = colors[1]
# In case of a fitness tie between cooperators and defectors, sort the
# new strategy for the focal node
else:
n_defec_tie = defec_nb_fit.count(max(defec_nb_fit))
n_coop_tie = coop_nb_fit.count(max(coop_nb_fit))
tie_strats = [0]*n_coop_tie + [1]*n_defec_tie
sort_strat = np.random.choice(tie_strats)
network.nodes[i]["strat"] = sort_strat
colormap[i] = colors[sort_strat]
if sort_strat == 0:
coopA += 1
# Loop over B nodes
for i in range(n0, n0+n1):
# Initialize lists to save fitnesses of cooperator and defector neighbors
coop_nb_fit = [-1]
defec_nb_fit = [-1]
# Check if focal node is cooperator or defector and add its fitness to
# the corresponding list
if B.nodes[i]["strat"] == 0:
coop_nb_fit.append(B.nodes[i]["fit"])
else:
defec_nb_fit.append(B.nodes[i]["fit"])
# Loop over neighbors, adding their fitnesses to the appropriate lists
for nb in B.neighbors(i):
if B.nodes[nb]["strat"] == 0:
coop_nb_fit.append(B.nodes[nb]["fit"])
else:
defec_nb_fit.append(B.nodes[nb]["fit"])
# Check if cooperators or defectors neighbors have higher fitness and
# update the focal node's strategy
if max(coop_nb_fit) > max(defec_nb_fit):
network.nodes[i]["strat"] = 0
colormap[i] = colors[0]
coopB += 1
elif max(coop_nb_fit) < max(defec_nb_fit):
network.nodes[i]["strat"] = 1
colormap[i] = colors[1]
# In case of a fitness tie between cooperators and defectors, sort the
# new strategy for the focal node
else:
n_defec_tie = defec_nb_fit.count(max(defec_nb_fit))
n_coop_tie = coop_nb_fit.count(max(coop_nb_fit))
tie_strats = [0]*n_coop_tie + [1]*n_defec_tie
sort_strat = np.random.choice(tie_strats)
network.nodes[i]["strat"] = sort_strat
colormap[i] = colors[sort_strat]
if sort_strat == 0:
coopB += 1
# Calculate the new fitness matrix
calc_fit_mat(network, n0, n1, payoff_mat0, payoff_mat1)
return coopA/n0, coopB/n1
# Show the time evolution of a network
def show_random_time_evol(n0, n1, init_coop_freq0, init_coop_freq1, n_remove, nt, b0, b1, seed=None):
# n: number of nodes in the network
# init_coop_freq: cooperator frequency in the initial condition
# knn: number of nearest neighbors to connect
# rewire: probability of rewiring a connection
# nt: number of timesteps to run time evolution
# b: b parameter of payoff matrix
# eps: eps parameter of payoff matrix
# seed: seed for random number generation
# Payoff matrix
payoff_mat0 = np.array([[1, 0],[b0, 0]])
payoff_mat1 = np.array([[1, 0],[b1, 0]])
# Initialize network and calculate the fitness of its nodes
# network, colormap = gen_ring_net(n0, n1, init_coop_freq0, init_coop_freq1, k, seed)
network, colormap = gen_random_net(n0, n1, init_coop_freq0, init_coop_freq1, n_remove, seed)
calc_fit_mat(network, n0, n1, payoff_mat0, payoff_mat1)
# Get node positions
node_pos = nx.get_node_attributes(network, "pos")
# Draw the initial network
nx.draw(network, node_pos, node_size=50, node_color=colormap)#, with_labels=True)
# plt.savefig(f"small_world_movie/small_world{0:04d}.png", dpi=300)
plt.show()
# Time evolution of the network
for i in range(1, nt):
evolve_strats(network, colormap, n0, n1, payoff_mat0, payoff_mat1) # Evolve the network by a timestep
# Plot the network
nx.draw(network, node_pos, node_size=50, node_color=colormap)#, with_labels=True)
plt.title(f"{i}")
# plt.savefig(f"bipartite_movie/small_world{i:04d}.png", dpi=300)
plt.show()
# show_ring_time_evol(100, 100, 0.5, 0.5, 5, nt=100, b0=1.1, b1=1.1, seed=None)
##############################################################################
# Show the time evolution of a network
def show_random_time_evol_wparts(n0, n1, init_coop_freq0, init_coop_freq1, n_keep, nt, b0, b1, seed=None):
# n: number of nodes in the network
# init_coop_freq: cooperator frequency in the initial condition
# knn: number of nearest neighbors to connect
# rewire: probability of rewiring a connection
# nt: number of timesteps to run time evolution
# b: b parameter of payoff matrix
# eps: eps parameter of payoff matrix
# seed: seed for random number generation
# Payoff matrix
payoff_mat0 = np.array([[1, 0],[b0, 0]])
payoff_mat1 = np.array([[1, 0],[b1, 0]])
# Initialize network and calculate the fitness of its nodes
network, colormap = gen_random_net(n0, n0, init_coop_freq0, init_coop_freq1, n_keep, seed)
for i in range(n0):
network.nodes[i+n0]["strat"] = network.nodes[i]["strat"]
colormap[i+n0] = colormap[i]
# for i in range(n0//6):
# network.nodes[i]["strat"] = 0
# colormap[i] = "blue"
# for i in range(n0//6, n0):
# network.nodes[i]["strat"] = 1
# colormap[i] = "yellow"
# for i in range(n0//3):
# network.nodes[i+n0]["strat"] = 0
# colormap[i+n0] = "blue"
# for i in range(n0//3, n0):
# network.nodes[i+n0]["strat"] = 1
# colormap[i+n0] = "yellow"
A = bipartite.projected_graph(network, [i for i in range(n0)])
B = bipartite.projected_graph(network, [i for i in range(n0, n0+n1)])
for i in range(n0):
A.nodes[i]["pos"] = (np.cos(i*2*np.pi/n0),
np.sin(i*2*np.pi/n0))
for i in range(n0, n0+n1):
B.nodes[i]["pos"] = (np.cos(i*2*np.pi/n1),
np.sin(i*2*np.pi/n1))
calc_fit_mat(network, n0, n1, payoff_mat0, payoff_mat1)
# Get node positions
node_pos = nx.get_node_attributes(network, "pos")
# Draw the initial network
plt.subplots(1,3)
plt.subplot(1,3,1)
nx.draw(A, nx.get_node_attributes(A, "pos"), node_size=50, node_color=colormap[:n0])
plt.subplot(1,3,2)
nx.draw(network, node_pos, node_size=50, node_color=colormap)#, with_labels=True)
plt.subplot(1,3,3)
nx.draw(B, nx.get_node_attributes(B, "pos"), node_size=50, node_color=colormap[n0:])
plt.suptitle(f"{0}")
# plt.savefig(f"small_world_movie/small_world{i:04d}.png", dpi=300)
plt.show()
# Time evolution of the network
for i in range(1, nt):
evolve_strats(network, colormap, n0, n1, payoff_mat0, payoff_mat1) # Evolve the network by a timestep
# Plot the network
plt.subplots(1,3)
plt.subplot(1,3,1)
nx.draw(A, nx.get_node_attributes(A, "pos"), node_size=25, node_color=colormap[:n0])
plt.subplot(1,3,2)
nx.draw(network, node_pos, node_size=25, node_color=colormap)#, with_labels=True)
plt.subplot(1,3,3)
nx.draw(B, nx.get_node_attributes(B, "pos"), node_size=25, node_color=colormap[n0:])
plt.suptitle(f"{i}")
plt.savefig(f"bipartite_movie/random/nkeep[400]-b0[1.5]-b1[1.1]{i:04d}.png", dpi=300)
plt.show()
show_random_time_evol_wparts(100, 100, 0.9, 0.9, 200, nt=100, b0=1.5, b1=1.1, seed=None)
##############################################################################
# Generate Cooperator Frequency curves for different b values
def gen_coop_freq_evol(n0, n1, nt, b0, b1, seeds, init_coop_freq0, init_coop_freq1, n_keep):
# n: number of nodes in the network
# nt: number of timesteps to run time evolution
# b0: b parameter of payoff matrix A
# b1: array for values of b parameter of payoff matrix A
# eps: eps parameter of payoff matrix
# seed: seed for random number generation
# init_coop_freq: cooperator frequency in the initial condition
# knn: number of nearest neighbors to connect
# rewire: probability of rewiring a connection
# Array to store cooperator frequencies for all timesteps and b values
coop_freqs0 = np.zeros((nt, len(b1), len(seeds)))
coop_freqs1 = np.zeros((nt, len(b1), len(seeds)))
payoff_mat0 = np.array([[1., 0],[b0, 0]]) # Define the payoff matrix
# Loop over b values
for j in tqdm(range(len(b1))):
# Loop over different seeds
for s in tqdm(range(len(seeds))):
payoff_mat1 = np.array([[1., 0],[b1[j], 0]]) # Define the payoff matrix
# Set random number generator seed
np.random.seed(seeds[s])
# Initialize network and calculate its fitness matrix
network, colormap = gen_random_net(n0, n1, init_coop_freq0, init_coop_freq1,
n_keep, seed=seeds[s])
A = bipartite.projected_graph(network, [i for i in range(n0)])
B = bipartite.projected_graph(network, [i for i in range(n0, n0+n1)])
calc_fit_mat(network, n0, n1, payoff_mat0, payoff_mat1)
coop_freqs0[0,j,s] = 1 - sum(nx.get_node_attributes(A, "strat").values()) / n0
coop_freqs1[0,j,s] = 1 - sum(nx.get_node_attributes(B, "strat").values()) / n1
# Time evolution of the network
for i in range(1, nt):
coop_freqs0[i,j,s], coop_freqs1[i,j,s] = evolve_strats(network, colormap, n0, n1, payoff_mat0, payoff_mat1) # Evolve the network by a timestep
return coop_freqs0, coop_freqs1
def plot_coop_freq_evol(coop_freqs0, coop_freqs1, b0, b1, title=None, save_files=False):
# Array with timesteps
timesteps = np.linspace(1, coop_freqs0.shape[0], coop_freqs0.shape[0])
avg_coop_freqs0 = np.mean(coop_freqs0, axis=2) # Average cooperator frequencies
std_coop_freqs0 = np.std(coop_freqs0, axis=2) # Standard deviation of cooperator frequencies
avg_coop_freqs1 = np.mean(coop_freqs1, axis=2) # Average cooperator frequencies
std_coop_freqs1 = np.std(coop_freqs1, axis=2) # Standard deviation of cooperator frequencies
# Set colors for plot
colors = plt.cm.viridis(np.linspace(0, 1, len(b1)))
# Plot cooperator frequency time evolution for different b values
plt.subplots(1, 2, figsize=(15,7))
for i in range(len(b1)):
plt.subplot(1, 2, 1)
plt.plot(timesteps, avg_coop_freqs0[:,i], color=colors[i], lw=3,
label=f"$b = {b1[i]:0.2f}$", alpha=1.) # Plot cooperator frequency over time
plt.fill_between(timesteps, avg_coop_freqs0[:,i]-std_coop_freqs0[:,i],
avg_coop_freqs0[:,i]+std_coop_freqs0[:,i], color=colors[i], alpha=0.3)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlim(0, coop_freqs0.shape[0])
plt.ylim(0, 1)
plt.xlabel("Time", fontsize=24)
plt.ylabel("Cooperator Frequency of A", fontsize=24)
plt.ylim(0, 1)
plt.title(title, fontsize=28)
plt.subplot(1, 2, 2)
plt.plot(timesteps, avg_coop_freqs1[:,i], color=colors[i], lw=3,
label=f"$b = {b1[i]:0.2f}$", alpha=1.) # Plot cooperator frequency over time
plt.fill_between(timesteps, avg_coop_freqs1[:,i]-std_coop_freqs1[:,i],
avg_coop_freqs1[:,i]+std_coop_freqs1[:,i], color=colors[i], alpha=0.3)
plt.legend(loc=(1.01, 0.1), fontsize=16) # Add legend
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlim(0, coop_freqs0.shape[0])
plt.ylim(0, 1)
plt.xlabel("Time", fontsize=24)
plt.ylabel("Cooperator Frequency of B", fontsize=24)
plt.ylim(0, 1)
plt.title(title, fontsize=28)
if save_files:
plt.savefig(f"bipartite_coop_freq_evol_error.pdf",
bbox_inches="tight")
else:
plt.show()
# b0 = 1.2
# # b1 = np.arange(1.1, 1.4, 0.1)
# b1 = [1.2]
# n0 = 100
# n1 = 100
# nt = 100
# seeds = [i for i in range(50)]
# init_coop_freq0 = 0.9
# init_coop_freq1 = 0.9
# n_keep = 400
# coop_freqs0, coop_freqs1 = gen_coop_freq_evol(n0, n1, nt, b0, b1, seeds,
# init_coop_freq0, init_coop_freq1, n_keep)
# plot_coop_freq_evol(coop_freqs0, coop_freqs1, b0, b1)
##############################################################################
# Generate final cooperator frequency for different b values
def gen_final_coop_freq(n0, n1, n_keep, nt, nt_save, b0, b1, init_coop_freq0=0.5, init_coop_freq1=0.5, seeds=[None]):
# n: lattice side -> number of sites = n^2
# knn: number of nearest neighbors to connect
# rewire: probability of rewiring a connection
# nt: number of timesteps to evolve before annotating results
# nt_save: number of timesteps to annotate results for calculating statistics
# b: array of values for b parameter value for the payoff matrix
# eps: eps parameter value for the payoff matrix
# init_coop_freq: frequency of cooperators on initial condition
# init_cond: initial condition of the lattice
# save_files: wether to save plots to files or not
# seed: random number generator seed
# Array to store cooperator frequency for different b values and different timesteps
coop_freqs0 = np.zeros((len(b1), len(seeds), nt_save))
coop_freqs1 = np.zeros((len(b1), len(seeds), nt_save))
payoff_mat0 = np.array([[1., 0],[b0, 0]]) # Define the payoff matrix
# Loop over b values
for j in range(len(b1)):
for s in range(len(seeds)):
payoff_mat1 = np.array([[1., 0],[b1[j], 0]]) # Define the payoff matrix
network, colormap = gen_random_net(n0, n1, init_coop_freq0, init_coop_freq1, n_keep, seeds[s])
calc_fit_mat(network, n0, n1, payoff_mat0, payoff_mat1)
# Time evolution = Loop over timesteps
for i in range(1, nt):
evolve_strats(network, colormap, n0, n1, payoff_mat0, payoff_mat1) # Evolve the network by a timestep
print(f"\rb: {j+1}/{len(b1)}; time: {i+1}/{nt}", end="")
for i in range(nt_save):
coop_freqs0[j,s,i], coop_freqs1[j,s,i] = evolve_strats(network, colormap, n0, n1,
payoff_mat0, payoff_mat1) # Evolve the network by a timestep
print(f"\rb: {j+1}/{len(b1)}; time: {i+1}/{nt_save}", end="")
return coop_freqs0, coop_freqs1
# Plot statistics of final cooperator frequency for different b values
def plot_final_coop_freq(coop_freqs0, coop_freqs1, b0, b1, save_files=False):
# coop_freq: array containing some timesteps of the cooperator frequency for different values of b
# |-> shape: (len(b), # of timesteps)
# b: array of b values considered for generating "coop_freq"
# save_files: wether or not to save plot to file
avg_coop_freqs0 = np.mean(coop_freqs0, axis=-1)
avg_coop_freqs1 = np.mean(coop_freqs1, axis=-1)
final_coop_freq_avg0 = np.mean(avg_coop_freqs0, axis=-1) # Average final cooperator frequencies
final_coop_freq_min0 = np.min(avg_coop_freqs0, axis=-1) # Minimum final cooperator frequencies
final_coop_freq_max0 = np.max(avg_coop_freqs0, axis=-1) # Maximum final cooperator frequencies
final_coop_freq_avg1 = np.mean(avg_coop_freqs1, axis=-1) # Average final cooperator frequencies
final_coop_freq_min1 = np.min(avg_coop_freqs1, axis=-1) # Minimum final cooperator frequencies
final_coop_freq_max1 = np.max(avg_coop_freqs1, axis=-1) # Maximum final cooperator frequencies
# Generate errorbars from minimum to maximum cooperator frequencies
errorbars0 = np.zeros((2, len(b1)))
errorbars1 = np.zeros((2, len(b1)))
for i in range(len(b1)):
errorbars0[:,i] = [final_coop_freq_avg0[i]-final_coop_freq_min0[i],
final_coop_freq_max0[i]-final_coop_freq_avg0[i]]
errorbars1[:,i] = [final_coop_freq_avg1[i]-final_coop_freq_min1[i],
final_coop_freq_max1[i]-final_coop_freq_avg1[i]]
# Set colors for plot
colors = plt.cm.viridis(np.linspace(0, 1, len(b1)))
# Plot final cooperator frequency for different b values
plt.figure(figsize=(10,7))
for j in range(len(b1)):
# Plot markers with errorbars
plt.errorbar(b1[j:j+1], final_coop_freq_avg0[j:j+1], errorbars0[:,j:j+1],
color=colors[j], marker="o", markersize=10, capsize=5,
label=f"$b = {b1[j]:0.2f}$")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("$b_{B}$", fontsize=24)
plt.ylabel("Final Cooperator Frequency", fontsize=24)
# plt.legend(loc=(1.01, 0.5), fontsize=16)
# Save plot to file or show it
if save_files:
plt.savefig("bipartite_final_coop_freq_vs_b.pdf", bbox_inches="tight")
plt.close()
else:
plt.show()
# b0 = 1.1
# b1 = np.linspace(1.1, 1.5, 2)
# seeds = [i for i in range(2)]
# coop_freqs0, coop_freqs1 = gen_final_coop_freq(n0=100, n1=100, n_keep=200, nt=80, nt_save=20, b0=b0, b1=b1,
# init_coop_freq0=0.5, init_coop_freq1=0.5, seeds=seeds)
# plot_final_coop_freq(coop_freqs0, coop_freqs1, b0, b1, save_files=False)
##############################################################################
# Generate final cooperator frequency for different b values
def gen_final_coop_freq_single_b(n0, n1, n_keep, nt, nt_save, b, init_coop_freq0=0.5, init_coop_freq1=0.5, seeds=[None]):
# n: lattice side -> number of sites = n^2
# knn: number of nearest neighbors to connect
# rewire: probability of rewiring a connection
# nt: number of timesteps to evolve before annotating results
# nt_save: number of timesteps to annotate results for calculating statistics
# b: array of values for b parameter value for the payoff matrix
# eps: eps parameter value for the payoff matrix
# init_coop_freq: frequency of cooperators on initial condition
# init_cond: initial condition of the lattice
# save_files: wether to save plots to files or not
# seed: random number generator seed
# Array to store cooperator frequency for different b values and different timesteps
coop_freqs0 = np.zeros((len(b), len(seeds), nt_save))
coop_freqs1 = np.zeros((len(b), len(seeds), nt_save))
# Loop over b values
for j in range(len(b)):
for s in range(len(seeds)):
payoff_mat = np.array([[1., 0],[b[j], 0]]) # Define the payoff matrix
network, colormap = gen_random_net(n0, n1, init_coop_freq0, init_coop_freq1, n_keep, seeds[s])
calc_fit_mat(network, n0, n1, payoff_mat, payoff_mat)
# Time evolution = Loop over timesteps
for i in range(1, nt):
evolve_strats(network, colormap, n0, n1, payoff_mat, payoff_mat) # Evolve the network by a timestep
print(f"\rb: {j+1}/{len(b)}; time: {i+1}/{nt}", end="")
for i in range(nt_save):
coop_freqs0[j,s,i], coop_freqs1[j,s,i] = evolve_strats(network, colormap, n0, n1,
payoff_mat, payoff_mat) # Evolve the network by a timestep
print(f"\rb: {j+1}/{len(b)}; time: {i+1}/{nt_save}", end="")
return coop_freqs0, coop_freqs1
# Plot statistics of final cooperator frequency for different b values
def plot_final_coop_freq_single_b(coop_freqs0, coop_freqs1, b, save_files=False):
# coop_freq: array containing some timesteps of the cooperator frequency for different values of b
# |-> shape: (len(b), # of timesteps)
# b: array of b values considered for generating "coop_freq"
# save_files: wether or not to save plot to file
avg_coop_freqs0 = np.mean(coop_freqs0, axis=-1)
avg_coop_freqs1 = np.mean(coop_freqs1, axis=-1)
final_coop_freq_avg0 = np.mean(avg_coop_freqs0, axis=-1) # Average final cooperator frequencies
# final_coop_freq_min0 = np.min(avg_coop_freqs0, axis=-1) # Minimum final cooperator frequencies
# final_coop_freq_max0 = np.max(avg_coop_freqs0, axis=-1) # Maximum final cooperator frequencies
final_coop_freq_avg1 = np.mean(avg_coop_freqs1, axis=-1) # Average final cooperator frequencies
# final_coop_freq_min1 = np.min(avg_coop_freqs1, axis=-1) # Minimum final cooperator frequencies
# final_coop_freq_max1 = np.max(avg_coop_freqs1, axis=-1) # Maximum final cooperator frequencies
final_coop_freq_mean_std0 = np.std(avg_coop_freqs0, axis=-1)/avg_coop_freqs0.shape[-1]**0.5 # Minimum final cooperator frequencies
final_coop_freq_mean_std1 = np.std(avg_coop_freqs1, axis=-1)/avg_coop_freqs1.shape[-1]**0.5 # Maximum final cooperator frequencies
# # Generate errorbars from minimum to maximum cooperator frequencies
# errorbars0 = np.zeros((2, len(b)))
# errorbars1 = np.zeros((2, len(b)))
# for i in range(len(b)):
# errorbars0[:,i] = [final_coop_freq_avg0[i]-final_coop_freq_min0[i],
# final_coop_freq_max0[i]-final_coop_freq_avg0[i]]
# errorbars1[:,i] = [final_coop_freq_avg1[i]-final_coop_freq_min1[i],
# final_coop_freq_max1[i]-final_coop_freq_avg1[i]]
# Set colors for plot
colors = plt.cm.viridis(np.linspace(0, 1, len(b)))
# Plot final cooperator frequency for different b values
plt.figure(figsize=(10,7))
for j in range(len(b)):
# Plot markers with errorbars
plt.errorbar(b[j:j+1], final_coop_freq_avg1[j:j+1], final_coop_freq_mean_std1[j],
color=colors[j], marker="o", markersize=10, capsize=5,
label=f"$b = {b[j]:0.2f}$")
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("$b$", fontsize=24)
plt.ylabel("Final Cooperator Frequency", fontsize=24)
# plt.legend(loc=(1.01, 0.5), fontsize=16)
# Save plot to file or show it
if save_files:
plt.savefig(f"bipartite_final_coop_freq_vs_b-b[{b[0]}..{b[-1]}].pdf", bbox_inches="tight")
plt.close()
else:
plt.show()
# b = np.arange(1.1, 1.55, 0.05)
# seeds = [i for i in range(200)]
# coop_freqs0, coop_freqs1 = gen_final_coop_freq_single_b(n0=100, n1=100, n_keep=400, nt=80, nt_save=20, b=b,
# init_coop_freq0=0.9, init_coop_freq1=0.9, seeds=seeds)
# plot_final_coop_freq_single_b(coop_freqs0, coop_freqs1, b, save_files=False)
``` |
{
"source": "Joao-vap/Voyager-based-Steganography",
"score": 3
} |
#### File: src/SBS/imagemessage.py
```python
import os
import numpy as np
import pydub as pd
from src.SBS.messenger import Messenger as messenger
from abc import ABC, abstractmethod
class ImageMessage(messenger, ABC):
""" Abstract methods """
def __init__(self, images_path=None):
super().__init__(images_path)
@abstractmethod
def __add__(self, other):
""" Add two messages
Args:
other (ImageMessage): message to be added
"""
pass
@abstractmethod
def __repr__(self):
""" Return string representation of message
"""
pass
def _getMessageFromFile(self, file) -> np.ndarray:
""" Return message from mp3 file
Args:
file (str): path of mp3 file
Returns:
np.ndarray: message from mp3 file
"""
# get message from mp3 file
# mp3 = pd.AudioSegment.from_mp3(f"{file}")
# left, right = mp3.split_to_mono()[0], mp3.split_to_mono()[1]
left = np.array([])
right = np.array([])
return np.array(left.get_array_of_samples()), np.array(right.get_array_of_samples())
```
#### File: src/SBS/messenger.py
```python
import os
import numpy as np
import pydub as pd
from abc import ABC, abstractmethod
class Messenger(ABC):
""" Abstract methods """
def __init__(self, files_path=None):
if files_path is None:
self.message_left, self.message_right = np.array([]), np.array([])
else:
self.message_left, self.message_right = self._getMessage(files_path)
@abstractmethod
def __add__(self, other):
""" Add two messages
Args:
other (AudioMessage): message to be added
"""
pass
@abstractmethod
def __repr__(self):
""" Return string representation of message
"""
pass
def __len__(self) -> 'int':
""" Return length of message
Returns:
int: left length of message
"""
return self.message_left.shape[0]
def __getitem__(self, key) -> 'list[int]':
""" Return item at index
Args:
key (int): index
Returns:
int: item at index
"""
return (self.message_left[key], self.message_right[key])
def __iter__(self) -> 'list[np.ndarray]':
""" Return iterator of message
Returns:
np.ndarray: iterator of message
"""
return (self.message_left.__iter__(), self.message_right.__iter__())
def __next__(self) -> int:
""" Return next item in message
Returns:
int: next item in message
"""
return (self.message_left.__next__(), self.message_right.__next__())
def _getMessage(self, files_path) -> 'list[np.ndarray]':
""" Return message from files
Args:
files_path (str): path of files
Raises:
TypeError: if files_path is not str or list
Returns:
list[np.ndarray]: message from files, left and right channel
"""
# check if is string with path to directory
# or list of mp3 files
if isinstance(files_path, str):
message_left, message_rigth = self._getMessageFromDirectory(files_path)
elif isinstance(files_path, list):
message_left, message_rigth = self._getMessageFromFiles(files_path)
else:
raise TypeError("files_path must be string or list")
return self._concatenateMessages(message_left), self._concatenateMessages(message_rigth)
def _getMessageFromDirectory(self, files_path) -> 'list[np.ndarray]':
""" Return message from files in directory
Args:
files_path (str): path of files
Returns:
list[np.ndarray]: message from files
"""
# get all mp3 files in directory
files = self._getMp3FilesInDirectory(files_path)
# get all messages from mp3 files
message_left, message_rigth = self._getMessageFromFiles(files)
# concatenate all messages
return message_left, message_rigth
def _getMp3FilesInDirectory(self, files_path) -> list:
""" Return files in directory
Args:
files_path (str): path of files
Returns:
list: files in directory
"""
# get all mp3 files in directory
files = []
for file in os.listdir(files_path):
# add paths of m3 files to list
files.append(files_path + '/' + file)
return files
def _getMessageFromFiles(self, files) -> list:
""" Return message from files
Args:
files (list): list of files
Returns:
list: message from files
"""
# get all messages from files
message_left, message_rigth = [], []
for file in files:
message_l, message_r = self._getMessageFromFile(file)
message_left.append(message_l)
message_rigth.append(message_r)
return message_left, message_rigth
@abstractmethod
def _getMessageFromFile(self, file) -> np.ndarray:
pass
def _concatenateMessages(self, messages) -> np.ndarray:
""" Return concatenated message
Args:
messages (list): list of messages
Returns:
np.ndarray: concatenated message
"""
# concatenate all messages
return np.concatenate(messages)
``` |
{
"source": "joaovbmdias/brainless",
"score": 3
} |
#### File: src/api/calendars.py
```python
from flask import abort
from configuration import db
from models.calendar import Calendar, CalendarSchema
from constants import FAILURE
def create(user_id, account_id, body):
"""
This function creates a new calendar for a specific account
of a specific user based on the passed-in calendar data
:param user_id: user_id passed-in URL
:param account_id: account_id passed-in URL
:param body: calendar to create in events structure
:return: 201 on success, 409 on calendar already exists
"""
schema = CalendarSchema()
calendar = schema.load(body)
if calendar.create() == FAILURE:
abort(409, f'Calendar {calendar.guid} already exists for account {account_id}')
else:
return schema.dump(calendar), 201
def read(user_id, account_id, calendar_id):
"""
This function retrieves a calendar based on the provided information
:param user_id: user_id passed-in URL
:param account_id: account_id passed-in URL
:param calendar_id: calendar_id passed-in URL
:return: 200 on success, 404 on calendar already exists
"""
calendar = Calendar(id = calendar_id,
name = None,
guid = None,
account_id = None,
brain_enabled = None)
if calendar.read() == FAILURE:
abort(404, f'Calendar with id:{calendar_id} not found')
else:
schema = CalendarSchema()
return schema.dump(calendar), 200
def search(user_id, account_id):
"""
This function retrieves a list of calendars based on the provided information
:param user_id: user_id passed-in URL
:param account_id: account_id passed-in URL
:return: 200 on success, 404 on no calendars found
"""
calendars = Calendar.query.filter(Calendar.account_id == account_id).all()
if calendars is None:
abort(404, f'No calendars found')
schema = CalendarSchema(many=True)
return schema.dump(calendars), 200
def update(user_id, account_id, calendar_id, body):
"""
This function updates an account based on the provided information
:param user_id: user_id passed-in URL
:param account_id: account_id passed-in URL
:param calendar_id: calendar_id passed-in URL
:param body: payload information
:return: 200 on success, 404 on calendar not found
"""
schema = CalendarSchema()
calendar = schema.load(body)
if calendar.update() == FAILURE:
abort(404, f'Calendar {calendar_id} not found')
else:
return schema.dump(calendar), 200
def delete(user_id, account_id, calendar_id):
"""
This function deletes a calendar based on the provided information
:param user_id: user_id passed-in URL
:param account_id: account_id passed-in URL
:param calendar_id: calendar_id passed-in URL
:return: 200 on success, 404 on calendar not found
"""
calendar = Calendar(id = calendar_id,
name = None,
guid = None,
account_id = None,
brain_enabled = None)
if calendar.delete() == FAILURE:
abort(404, f'Calendar {calendar_id} not found')
else:
return "Calendar deleted", 200
```
#### File: src/models/project.py
```python
from datetime import datetime
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from configuration import db
from models.task import Task
from models.template import Template
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import or_, and_, CheckConstraint, UniqueConstraint
class Project(db.Model, Template):
""" Project class """
__tablename__ = 'project'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
guid = db.Column(db.String(50), nullable=False)
brain_enabled = db.Column(db.String(1), nullable=False, default='Y')
__created_timestamp = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
__edited_timestamp = db.Column(db.DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow)
account_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
tasks = db.relationship('Task', backref='project', lazy=True, cascade="save-update, merge, delete")
__table_args__ = (
CheckConstraint('brain_enabled IN (\'Y\', \'N\')', name='brain_enabled_val'),
UniqueConstraint('guid', 'account_id', name='unique_guid'))
def __init__(self, name, guid, account_id, brain_enabled='Y', id=None):
self.name = name
self.guid = guid
self.account_id = account_id
self.brain_enabled = brain_enabled
self.id = id
def exists(self):
try:
existing_project = Project.query.filter(or_(Project.id == self.id,
and_(Project.guid == self.guid,
Project.account_id == self.account_id))
).one()
except NoResultFound:
return None
return existing_project
def synchronize(self, tasks):
if self.id is None:
self.create()
else:
self.update()
task_guids = []
if self.brain_enabled:
if tasks is not None:
for ta in tasks:
local = Task(id = None,
name = ta['name'],
guid = ta['guid'],
priority = ta['priority'],
due_datetime = ta['due_datetime'],
project_id = self.id)
task = local.read()
if task is not None:
local.id = task.id
task = local
task.synchronize()
task_guids.append(ta['guid'])
for ta in Task.query.filter(and_(Task.project_id == self.id, ~Task.guid.in_(task_guids))).all():
db.session.delete(ta)
class ProjectSchema(SQLAlchemyAutoSchema):
""" ProjectSchema class """
class Meta:
""" Meta class classification """
model = Project
sqla_session = db.session
include_fk = True
include_relationships = True
load_instance = True
```
#### File: src/models/task.py
```python
from datetime import datetime
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from configuration import db
from models.label import Label
from models.template import Template
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import or_, and_, UniqueConstraint
# task and labels association table
task_labels = db.Table('task_labels',
db.metadata,
db.Column('task_id', db.ForeignKey('task.id'), primary_key=True),
db.Column('label_id', db.ForeignKey('label.id'), primary_key=True))
class Task(db.Model, Template):
""" Task class """
__tablename__ = 'task'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), nullable=False)
due_datetime = db.Column(db.DateTime, nullable=True)
priority = db.Column(db.Integer, nullable=True)
guid = db.Column(db.String(50), nullable=False)
__created_timestamp = db.Column(db.DateTime, default=datetime.utcnow)
__edited_timestamp = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
project_id = db.Column(db.Integer, db.ForeignKey('project.id'), nullable=False)
labels = db.relationship('Label', secondary=task_labels, backref='tasks')
__table_args__ = (
UniqueConstraint('guid', 'project_id', name='unique_guid'),
None)
def __init__(self, name, project_id, due_datetime, priority, guid, id=None):
self.name = name
self.project_id = project_id
self.due_datetime = due_datetime
self.priority = priority
self.guid = guid
self.id = id
def exists(self):
try:
existing_task = Task.query.filter(or_(Task.id == self.id,
and_(Task.guid == self.guid,
Task.project_id == self.project_id))).one()
except NoResultFound:
return None
return existing_task
class TaskSchema(SQLAlchemyAutoSchema):
""" TaskSchema class """
class Meta:
""" Meta class classification """
model = Task
sqla_session = db.session
include_fk = True
include_relationships = True
load_instance = True
```
#### File: src/models/user.py
```python
from datetime import datetime
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
from configuration import db
from models.account import Account
from models.template import Template
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import or_
class User(db.Model, Template):
""" User class """
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(64), nullable=False)
first_name = db.Column(db.String(20), nullable=True)
last_name = db.Column(db.String(20), nullable=True)
__created_timestamp = db.Column(db.DateTime, default=datetime.utcnow)
__edited_timestamp = db.Column(db.DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
accounts = db.relationship('Account', backref='user', lazy=True)
def __init__(self, username, password, first_name, last_name, id=None):
self.username = username
self.password = password
self.first_name = first_name
self.last_name = last_name
self.id = id
def exists(self):
try:
existing_user = User.query.filter(or_(User.username == self.username, User.id == self.id)).one()
except NoResultFound:
return None
return existing_user
class UserSchema(SQLAlchemyAutoSchema):
""" UserSchema class """
class Meta:
""" Meta class classification """
model = User
sqla_session = db.session
include_fk = True
include_relationships = True
load_instance = True
```
#### File: src/providers/apple.py
```python
import constants as const
from pyicloud import PyiCloudService
from datetime import datetime, timedelta
def get_calendar_data(api):
apple_events = api.calendar.events(datetime.utcnow(),datetime.utcnow() + timedelta(days=const.SYNC_DAYS))
calendar_data = []
temp = {}
for event in apple_events:
event_data = {}
calendar_guid = event['pGuid']
calendar_name = ''
name = event['title']
start_data = event['startDate'][1:6]
start_datetime = datetime(start_data[0],start_data[1],start_data[2],start_data[3],start_data[4],0)
end_data = event['endDate'][1:6]
end_datetime = datetime(end_data[0],end_data[1],end_data[2],end_data[3],end_data[4],0)
guid = event['guid']
event_data = {"name": name, "start_datetime": start_datetime, "end_datetime": end_datetime, "guid": guid}
temp.setdefault(calendar_guid, []).append(event_data)
for cal, events in temp.items():
calendar_data.append({'name': '', 'guid': cal, 'events': events})
return calendar_data
def get_reminders_data(account):
return None
def connect(api):
import click
print("Two-step authentication required. Your trusted devices are:")
devices = api.trusted_devices
# for i, device in enumerate(devices):
# print(" %s: %s" % (i, device.get('deviceName',
# "SMS to %s" % device.get('phoneNumber'))))
device = 0#click.prompt('Which device would you like to use?', default=0)
device = devices[device]
if not api.send_verification_code(device):
print("Failed to send verification code")
code = click.prompt('Please enter validation code')
if not api.validate_verification_code(device, code):
print("Failed to verify verification code")
def keep_alive(client_id, client_secret):
api = PyiCloudService(client_id, client_secret)
api.authenticate()
def get_data(client_id, client_secret, account_type):
api = None
try:
api = PyiCloudService(client_id, client_secret)
if api.requires_2sa:
connect(api)
except:
print('Failed to login to user\'s apple account')
apple_data = {const.CALENDAR: None, const.TASK: None, const.LABEL: None}
if account_type == const.CALENDAR:
apple_data[const.CALENDAR] = get_calendar_data(api)
elif account_type == const.TASK:
apple_data[const.TASK] = get_reminders_data(api)
else:
apple_data[const.CALENDAR] = get_calendar_data(api)
apple_data[const.TASK] = get_reminders_data(api)
return apple_data
```
#### File: brainless/tests/test_3_project.py
```python
import unittest
import constants
from configuration import db
from models.project import Project
db.metadata.create_all(db.engine)
unittest.TestLoader.sortTestMethodsUsing = None
class TestProject(unittest.TestCase):
"""
Unit Testing Projects
"""
def test_1_1_create_project(self):
"""
Test that it can create a project
"""
project = Project(name = "Awesome Project",
guid = "arghdlaijsbfzsdjhgfoqeiugtoiweg",
account_id = 1,
brain_enabled = "Y")
result = project.create()
self.assertIsNotNone(project.id, "Project ID cannot be None")
self.assertGreater(project.id, 0, "Project ID must be greater than 0")
self.assertEqual(result, constants.SUCCESS)
def test_1_2_create_another_project(self):
"""
Test that it can create another project
"""
project = Project(name = "An even more awesome project",
guid = "adfhsdykluiglçoçõuryirtyuetutrh",
account_id = 1,
brain_enabled = "Y")
result = project.create()
self.assertIsNotNone(project.id, "Project ID cannot be None")
self.assertGreater(project.id, 0, "Project ID must be greater than 0")
self.assertEqual(result, constants.SUCCESS)
def test_1_3_create_yet_another_project(self):
"""
Test that it can create a third project
"""
project = Project(name = "Cool awesome Project",
guid = "sykt5ywtrshatrusns",
account_id = 1,
brain_enabled = "Y")
result = project.create()
self.assertIsNotNone(project.id, "Project ID cannot be None")
self.assertGreater(project.id, 0, "Project ID must be greater than 0")
self.assertEqual(result, constants.SUCCESS)
def test_2_1_create_same_project(self):
"""
Test that it cannot create an existing project
"""
project = Project(name = "Awesome Project",
guid = "arghdlaijsbfzsdjhgfoqeiugtoiweg",
account_id = 1,
brain_enabled = "Y")
result = project.create()
self.assertIsNone(project.id, "Project ID must be None")
self.assertEqual(result, constants.FAILURE)
def test_3_1_delete_existing_project(self):
"""
Test that it can delete an existing project
"""
project = Project(name = "Cool awesome Project",
guid = "sykt5ywtrshatrusns",
account_id = 1,
brain_enabled = "Y",
id = 3)
result = project.delete()
self.assertEqual(result, constants.SUCCESS)
def test_3_2_delete_unexisting_project(self):
"""
Test that it cannot delete an unexisting project
"""
project = Project(name = "Cool awesome Project",
guid = "fsdhjsdtkusygjsgdkuly",
account_id = 1,
brain_enabled = "Y",
id = 100000)
result = project.delete()
self.assertEqual(result, constants.FAILURE)
def test_4_1_read_one_existing_project(self):
"""
Test that it can read an existing project
"""
project = Project(name = None,
guid = None,
account_id = None,
brain_enabled = None,
id = 1)
result = project.read()
self.assertEqual(result, constants.SUCCESS)
def test_4_2_read_one_unexisting_project(self):
"""
Test that it cannot read an unexisting project
"""
project = Project(name = None,
guid = None,
account_id = None,
brain_enabled = None,
id = 100000)
result = project.read()
self.assertEqual(result, constants.FAILURE)
def test_5_1_update_existing_project(self):
"""
Test that it can update an existing project
"""
project = Project(name = "An even more awesome project UPDATED",
guid = "adfhsdykluiglçoçõuryirtyuetutrh",
account_id = 1,
brain_enabled = "Y",
id = 2)
result = project.update()
self.assertEqual(result, constants.SUCCESS)
def test_5_2_update_unexisting_project(self):
"""
Test that it cannot update an unexisting account
"""
project = Project(name = "A random project",
guid = "aehrtjsrhrtjsrtjr",
account_id = 1,
brain_enabled = "Y",
id = 10000000)
result = project.update()
self.assertEqual(result, constants.FAILURE)
def test_6_1_check_project_brain_val(self):
"""
Test that it cannot create a project with brain value not in (Y,N)
"""
project = Project(name = "Awesome Project Check",
guid = "arghdlaijsbfzsdjhgfoqeiugtoiweg",
account_id = 1,
brain_enabled = "X")
result = project.create()
self.assertEqual(result, constants.FAILURE)
if __name__ == '__main__':
db.metadata.create_all(db.engine)
unittest.TestLoader.sortTestMethodsUsing = None
unittest.main()
```
#### File: brainless/tests/test_7_task.py
```python
import unittest
import constants
from configuration import db
from models.task import Task
from datetime import datetime
db.metadata.create_all(db.engine)
unittest.TestLoader.sortTestMethodsUsing = None
class TestTask(unittest.TestCase):
"""
Unit Testing Task
"""
def test_1_1_create_task(self):
"""
Test that it can create a task
"""
task = Task(name = "Amazing task",
project_id = 1,
due_datetime = datetime.strptime("2020-05-29 20:00", '%Y-%m-%d %H:%M'),
priority = 1,
guid = "asdvakjfhvaskfasvfkasvfkjasv")
result = task.create()
self.assertIsNotNone(task.id, "Task ID cannot be None")
self.assertGreater(task.id, 0, "Task ID must be greater than 0")
self.assertEqual(result, constants.SUCCESS)
def test_1_2_create_another_task(self):
"""
Test that it can create another task
"""
task = Task(name = "Another Amazing task",
project_id = 1,
due_datetime = datetime.strptime("2020-05-29 20:00", '%Y-%m-%d %H:%M'),
priority = 1,
guid = "asdgflkjnadrkçgjbadkçgb")
result = task.create()
self.assertIsNotNone(task.id, "Task ID cannot be None")
self.assertGreater(task.id, 0, "Task ID must be greater than 0")
self.assertEqual(result, constants.SUCCESS)
def test_1_3_create_yet_another_task(self):
"""
Test that it can create a third task
"""
task = Task(name = "A Third Amazing task",
project_id = 1,
due_datetime = datetime.strptime("2020-05-29 20:00", '%Y-%m-%d %H:%M'),
priority = 1,
guid = "aegpoiasenpgadrgpsdngao")
result = task.create()
self.assertIsNotNone(task.id, "Task ID cannot be None")
self.assertGreater(task.id, 0, "Task ID must be greater than 0")
self.assertEqual(result, constants.SUCCESS)
def test_2_1_create_same_task(self):
"""
Test that it cannot create an existing task
"""
task = Task(name = "<NAME>",
project_id = 1,
due_datetime = datetime.strptime("2020-05-29 20:00", '%Y-%m-%d %H:%M'),
priority = 1,
guid = "asdvakjfhvaskfasvfkasvfkjasv")
result = task.create()
self.assertIsNone(task.id, "Task ID must be None")
self.assertEqual(result, constants.FAILURE)
def test_3_1_delete_existing_task(self):
"""
Test that it can delete an existing task
"""
task = Task(name = None,
project_id = None,
due_datetime = None,
priority = None,
guid = None,
id = 3)
result = task.delete()
self.assertEqual(result, constants.SUCCESS)
def test_3_2_delete_unexisting_task(self):
"""
Test that it cannot delete an unexisting task
"""
task = Task(name = None,
project_id = None,
due_datetime = None,
priority = None,
guid = None,
id = 1000000)
result = task.delete()
self.assertEqual(result, constants.FAILURE)
def test_4_1_read_one_existing_task(self):
"""
Test that it can read an existing task
"""
task = Task(name = None,
project_id = None,
due_datetime = None,
priority = None,
guid = None,
id = 1)
result = task.read()
self.assertEqual(result, constants.SUCCESS)
self.assertIsNotNone(task.id, "Task ID cannot be None")
self.assertGreater(task.id, 0, "Task ID must be greater than 0")
def test_4_2_read_one_unexisting_task(self):
"""
Test that it cannot read an unexisting task
"""
task = Task(name = None,
project_id = None,
due_datetime = None,
priority = None,
guid = None,
id = 1000000)
result = task.read()
self.assertEqual(result, constants.FAILURE)
def test_5_1_update_existing_task(self):
"""
Test that it can update an existing task
"""
task = Task(name = "Amazing task UPDATED",
project_id = 1,
due_datetime = datetime.strptime("2020-05-29 20:00", '%Y-%m-%d %H:%M'),
priority = 1,
guid = "asdvakjfhvaskfasvfkasvfkjasv",
id = 1)
result = task.update()
self.assertEqual(result, constants.SUCCESS)
def test_5_2_update_unexisting_task(self):
"""
Test that it cannot update an unexisting account
"""
task = Task(name = "Amazingly Awkward task UPDATED",
project_id = 1,
due_datetime = datetime.strptime("2020-05-29 20:00", '%Y-%m-%d %H:%M'),
priority = 1,
guid = "weryhwrethçaekrsjngsrlthpri",
id = 10000)
result = task.update()
self.assertEqual(result, constants.FAILURE)
if __name__ == '__main__':
db.metadata.create_all(db.engine)
unittest.TestLoader.sortTestMethodsUsing = None
unittest.main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.