metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JordanRushing/CCI-Exercises",
"score": 4
} |
#### File: JordanRushing/CCI-Exercises/cci_1-3.py
```python
import sys
from collections import Counter
def check_perm(testString1, testString2):
"""
This function determines if two provided strings are permutations
of each other. Exits if yes and returns none if no
"""
charFreq1 = Counter(testString1)
charFreq2 = Counter(testString2)
if charFreq1 == charFreq2:
sys.exit("\nSuccess - provided strings confirmed permutations\n")
testString1 = input("\nPlease enter the first string to be checked: ")
testString2 = input("\nPlease enter the second string to be checked: ")
check_perm(testString1, testString2)
print("\nThe provided strings are NOT permutations of each other\n")
``` |
{
"source": "JordanSamhi/BricksBreaker",
"score": 4
} |
#### File: BricksBreaker/modele/Case.py
```python
class Case():
def __init__(self, x, y, couleur, grille):
self._x = x
self._y = y
self._couleur = couleur
self._grille = grille
self.GREY = "grey"
def getCouleur(self):
return self._couleur
def getCouleurOriginale(self):
if self._couleur[-1] == "3":
return self._couleur[:-1]+"2"
return self._couleur
def getX(self):
return self._x
def getY(self):
return self._y
def setX(self, x):
self._x = x
def setY(self, y):
self._y = y
def setCouleur(self, couleur):
self._couleur = couleur
def getNord(self):
if self.getY() > 0:
return self._grille[self._y-1][self._x]
return None
def getSud(self):
if self.getY() < len(self._grille) - 1:
return self._grille[self._y+1][self._x]
return None
def getEst(self):
if self.getX() < len(self._grille) - 1:
return self._grille[self._y][self._x+1]
return None
def getOuest(self):
if self.getX() > 0:
return self._grille[self._y][self._x-1]
return None
def getVoisins(self):
return [self.getNord(), self.getSud(), self.getEst(), self.getOuest()]
def getGrille(self):
return self._grille
def surbrillance(self):
self._couleur = self._couleur[:-1] + "2"
def couleurParDefaut(self):
self._couleur = self._couleur[:-1] + "3"
def detruire(self):
self.setCouleur(self.GREY)
def estDetruite(self):
return self._couleur == self.GREY
```
#### File: modele/couleurs/ListeCouleurs.py
```python
import random
'''
Cette classe definie les couleurs du jeu
Deux listes, une pour 10x10 et une pour 20x20
Deux methodes de recuperation aleatoire de couleurs dans ces tuples
'''
class ListeCouleurs():
def __init__(self):
self._listeCouleurs = ()
self._compteursCouleurs = {}
self.generateListesCouleurs()
'''
Generation des tuples de couleur
Mise a 0 des compteurs de couleurs
'''
def generateListesCouleurs(self):
self._listeCouleurs = self.definitionCouleurs()
for elt in self._listeCouleurs:
self._compteursCouleurs[elt] = 0
'''
On recupere une couleur aleatoirement dans notre liste
On incremente notre compteur de 1
Si le compteur atteint 25, on supprime la couleur du tuple
'''
def getUneCouleur(self):
couleur = self._listeCouleurs[random.randrange(len(self._listeCouleurs))]
self._compteursCouleurs[couleur] += 1
if self._compteursCouleurs[couleur] == self.getLimiteCouleur():
self._listeCouleurs = [coul for coul in self._listeCouleurs if coul != couleur]
return couleur
def getCouleurCaseMorte(self):
return "grey"
```
#### File: modele/modes/ModeDeuxJoueurs.py
```python
from modele.modes.Mode import Mode
from modele.Joueur import Joueur
from controleur.actionsCases.ActionsCasesDeuxJoueurs import ActionsCasesDeuxJoueurs
import threading, time
class ModeDeuxJoueurs(Mode):
def __init__(self, app):
Mode.__init__(self, app)
self._adversaire= Joueur(False)
self._agentReseau = None
self._actionsCases = ActionsCasesDeuxJoueurs(self, self._application)
self._delai = None
def debut(self):
self._application.popUpReseau()
if self._agentReseau:
popup = self._application.popUpAttenteClient()
self.attendreClient(popup)
def attendreClient(self, popup):
''' Compteur pour detecter le client du serveur '''
compteur = 0
def attenteClient(popup, compteur):
if self._agentReseau.echo() != 1:
compteur += 1
if not popup.estOuverte():
self._application.desactiverEvenements()
self._agentReseau.stop()
return
threading.Timer(0.2, lambda:attenteClient(popup, compteur)).start()
else:
''' Connexion OK '''
popup.fermerFenetre()
if compteur > 1:
''' serveur '''
self._application.popUpDelai()
self._agentReseau.envoyerDelai(self._delai)
self._application.popUpChoixTaille()
if self._partie:
self._agentReseau.envoyerGrille(self._partie.getGrilleEnListe())
self._application.genererScoreDeuxJoueurs()
self._application.initialiserGrille(self._partie.getGrilleEnListe())
self._application.updateDeuxJoueurs()
self._application.updateTour()
self._application.activerEvenements()
self.gererFinPartie()
else:
''' client '''
self._joueur.setTour(False)
secondes = 0
''' attente pour reprendre partie '''
while not self._partie:
if secondes == 10:
break
time.sleep(1)
secondes += 1
if self._partie:
self._application.genererScoreDeuxJoueurs()
self._application.initialiserGrille(self._partie.getGrilleEnListe())
self._application.updateDeuxJoueurs()
self._application.updateTour()
attenteClient(popup, compteur)
def surbrillanceCases(self, event):
if self.getJoueur().getTour():
self._actionsCases.surbrillanceCases(event)
def desactiverSurbrillance(self, _):
if self.getJoueur().getTour():
self._actionsCases.desactiverSurbrillance()
def detruireCases(self, _):
self._actionsCases.detruireCases()
def gererFinPartie(self):
if self.isPartieFinie():
self._application.afficherMessageFinPartieDeuxJoueurs()
self._application.desactiverEvenements()
self._actionsCases.resetSemblables()
self._agentReseau.envoyerFinPartie()
def isPartieFinie(self):
for case in self._partie.getGrilleEnListe():
if not case.estDetruite():
if ((self.getJoueur().getNombreCouleurs() < self._partie.getNombreCouleurs() / 2
and (case.getCouleurOriginale() in self.getJoueur().getCouleurs()
or case.getCouleurOriginale() not in self.getAdversaire().getCouleurs()))
or
(self.getJoueur().getNombreCouleurs() == self._partie.getNombreCouleurs() / 2
and case.getCouleurOriginale() in self.getJoueur().getCouleurs())):
semblables = self._generateurSemblables.generer(case, [case])
if len(semblables) >= self.LIMITE_NOMBRE_CASES:
return False
return True
def changerTour(self):
self._agentReseau.changerTour()
def getAdversaire(self):
return self._adversaire
def setAgentReseau(self, agent):
self._agentReseau = agent
def getAgentReseau(self):
return self._agentReseau
def setDelai(self, delai):
self._delai = delai
def getDelai(self):
return self._delai
```
#### File: BricksBreaker/modele/Partie.py
```python
from modele.Joueur import Joueur
'''
Une partie est definie par une liste de case et un score
'''
class Partie():
def __init__(self, grille):
self._grille = grille
self._score = 0
self._scorePotentiel = 0
self._casesModifiees = []
self._moi, self._adversaire = Joueur(True), Joueur(False)
def getMoi(self):
return self._moi
def getAdversaire(self):
return self._adversaire
def getNombreCouleurs(self):
return int(len(self._grille) / 2.5)
def getGrille(self):
return self._grille
def getScore(self):
return self._score
def getTailleGrille(self):
return len(self._grille)
def getScorePotentiel(self):
return self._scorePotentiel
def setScore(self, newScore):
self._score = newScore
def setScorePotentiel(self, newScorePotentiel):
self._scorePotentiel = newScorePotentiel
def ajouerScore(self, scoreEnPlus):
self._score += scoreEnPlus
def setGrille(self, grille):
self._grille = grille
def getCasesModifiees(self):
return self._casesModifiees
def setCasesModifiees(self, casesModifiees):
self._casesModifiees = casesModifiees
def getGrilleEnListe(self):
return self.transformerGrilleEnListe()
def transformerGrilleEnListe(self):
l = []
for i in self._grille:
for elt in i:
l.append(elt)
return l
def grillePeutDecalerAGauche(self):
for i in range(len(self._grille)):
case = self._grille[len(self._grille) - 1][i]
if case.estDetruite():
while case.getEst():
case = case.getEst()
if not case.estDetruite():
return True
return False
```
#### File: vue/popups/PopupAttenteClient.py
```python
import tkinter as tk
class PopupAttenteClient():
def __init__(self):
self._fenetre = tk.Toplevel()
self._fenetre.grab_set()
self._fenetre.resizable(width=False, height=False)
self._fenetre.title('Attente client')
self._message = "Attente d'un second joueur"
tk.Label(self._fenetre, text=self._message).pack()
self._fenetre.protocol("WM_DELETE_WINDOW", self.disableFermeture)
tk.Button(self._fenetre, text='Quitter', command=self.fermerFenetre).pack()
def getToplevel(self):
return self._fenetre
def disableFermeture(self):
pass
def fermerFenetre(self):
if self._fenetre:
self._fenetre.grab_release()
self._fenetre.destroy()
self._fenetre = None
def estOuverte(self):
if self._fenetre:
return True
return False
```
#### File: vue/popups/PopupReseau.py
```python
import tkinter as tk
from controleur.AgentReseau import AgentReseau
class PopupReseau():
def __init__(self, mode):
self._mode = mode
self._fenetre = tk.Toplevel()
self._fenetre.grab_set()
self._fenetre.resizable(width=False, height=False)
self._fenetre.title('Connexion')
self._message = "<adresse IP : port>"
tk.Label(self._fenetre, text=self._message).pack()
self._adresse = tk.Text(self._fenetre, height=1, width=30)
self._adresse.insert(1.0, "127.255.255.255:2010")
self._adresse.pack()
tk.Button(self._fenetre, text='Connexion', command=self.connexion).pack()
def connexion(self):
self._mode.setAgentReseau(AgentReseau(self._mode, self._adresse.get(1.0, tk.END), "AgentReseau"))
self._fenetre.grab_release()
self._fenetre.destroy()
def getToplevel(self):
return self._fenetre
``` |
{
"source": "jordansatler/SNPtoAFS",
"score": 3
} |
#### File: jordansatler/SNPtoAFS/SNPtoAFSready.py
```python
import sys
#dictionary of ambiguity codes
ambig = {"Y":["C", "T"], "R":["A", "G"], "W":["A", "T"], "S":["C", "G"],
"K":["G","T"], "M":["A", "C"]}
def duplicate(file):
"""duplicate all individuals so they are diploid"""
with open(file, 'r') as Infile:
d = []
for line in Infile:
if not line.startswith("#"):
d.append(line)
d.append(line)
return d
def header(SNP_list):
"""create a header for each SNP to record linkage patterns"""
getL = SNP_list[0]
getL = getL.strip().split()
locus = 1
L = [len(i) for i in getL[1:] if i != "_"]
head = ['']
for j in L:
name = "LocusNumber" + str(locus)
#write N number of times to list
for reps in range(j):
head.append(name)
locus += 1
return head
def phase(SNP_dup):
"""resolve SNPs that contain ambiguity codes"""
count = 2
phased = []
for line in SNP_dup:
line = line.strip().split()
if count % 2 == 0:
seq = [line[0] + "_a"]
else:
seq = [line[0] + "_b"]
for bp in line[1:]:
for nc in bp:
if nc in ambig:
if count % 2 == 0:
seq.append(ambig[nc][0])
else:
seq.append(ambig[nc][1])
#Skip loci that are invariant
elif nc == "_":
pass
else:
seq.append(nc)
phased.append(seq)
count += 1
return phased
def write_out(name, header, SNPs):
"""write the new SNP file"""
with open(name[:-5] + "_AFSready.txt", "w") as out:
for i in range(len(header)):
if i != len(header) - 1:
out.write(header[i] + "\t")
else:
out.write(header[i] + "\n")
for j in range(len(SNPs)):
for k in range(len(SNPs[j])):
if k != len(SNPs[j]) - 1:
out.write(SNPs[j][k] + "\t")
else:
out.write(SNPs[j][k] + "\n")
def Main():
dip = duplicate(sys.argv[1])
headL = header(dip)
ph = phase(dip)
write_out(sys.argv[1], headL, ph)
if __name__ == "__main__":
Main()
``` |
{
"source": "JordanSchafer/web_scrapping_challenge",
"score": 3
} |
#### File: JordanSchafer/web_scrapping_challenge/flask_app.py
```python
from flask import Flask, render_template,redirect
from flask_pymongo import PyMongo
import scrape_mars
#Create an instance of Flask
app=Flask(__name__)
#Setup connection to mongodb
app.config["MONGO_URI"]="mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
#Create routes and render html templates
@app.route("/")
def index():
mars_dict=mongo.db.mars_dict.find_one()
return render_template("index.html",mars=mars_dict)
@app.route("/scrape")
def scrape():
mars_dict = mongo.db.mars_dict
mars_data=scrape_mars.scrape()
mars_dict.update({},mars_data,upsert=True)
return redirect("/",code=302)
if __name__=="__main__":
app.run(debug=True)
``` |
{
"source": "jordan-schneider/google-birthdays",
"score": 3
} |
#### File: jordan-schneider/google-birthdays/birthday.py
```python
import os.path
import pickle
import subprocess
import arrow # type: ignore
import fire # type: ignore
from google.auth.transport.requests import Request # type: ignore
from google_auth_oauthlib.flow import InstalledAppFlow # type: ignore
from googleapiclient.discovery import build # type: ignore
# If modifying these scopes, delete the file token.pickle.
SCOPES = ["https://www.googleapis.com/auth/calendar.readonly"]
def auth():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists("token.pickle"):
with open("token.pickle", "rb") as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file("credentials.json", SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open("token.pickle", "wb") as token:
pickle.dump(creds, token)
return creds
def send_reminder(event, period: str) -> None:
person = event["summary"][: len("'s birthday") + 1]
birthday = event["start"].get("dateTime", event["start"].get("date"))
print(f"Sending message to {person} for {birthday}")
subject = f"It's {person}'s birthday {period}, {birthday}."
if period == "today":
content = "You should call them!"
elif period == "tomorrow":
content = "Send them a late gift!"
elif period == "next week":
content = "Send them a gift!"
elif period == "next month":
content = "Figure out what to give them!"
else:
raise ValueError(
f'Invalid period "{period}". Must be "today", "tomorrow", "next week", or "next month".'
)
dst = "jordan.jack.<EMAIL>"
subprocess.run(f'echo "{content}" | mail -s "{subject}" {dst}', shell=True, check=True)
def main():
creds = auth()
service = build("calendar", "v3", credentials=creds)
now = arrow.utcnow()
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
events_result = (
service.events()
.list(
calendarId="addressbook#[email protected].<EMAIL>.com",
timeMin=today,
timeMax=today.shift(weeks=4, days=1),
maxResults=100,
singleEvents=True,
orderBy="startTime",
)
.execute()
)
events = events_result.get("items", [])
for event in events:
# print(event)
date = arrow.get(event["start"].get("date"))
if arrow.get(date) == today:
send_reminder(event, "today")
elif date == today.shift(days=1):
send_reminder(event, "tomorrow")
elif date == today.shift(weeks=1):
send_reminder(event, "next week")
elif date == today.shift(weeks=4):
send_reminder(event, "next month")
if __name__ == "__main__":
fire.Fire(main)
``` |
{
"source": "jordan-schneider/procgen",
"score": 2
} |
#### File: procgen/procgen/env_test.py
```python
import numpy as np
import pytest
from .env import ENV_NAMES
from procgen import ProcgenGym3Env
@pytest.mark.parametrize("env_name", ["coinrun", "starpilot"])
def test_seeding(env_name):
num_envs = 1
def make_env(level_num):
venv = ProcgenGym3Env(
num=num_envs, env_name=env_name, num_levels=1, start_level=level_num
)
return venv
env1 = make_env(0)
env2 = make_env(0)
env3 = make_env(1)
env1.act(np.zeros(num_envs))
env2.act(np.zeros(num_envs))
env3.act(np.zeros(num_envs))
_, obs1, _ = env1.observe()
_, obs2, _ = env2.observe()
_, obs3, _ = env3.observe()
assert np.array_equal(obs1["rgb"], obs2["rgb"])
assert not np.array_equal(obs1["rgb"], obs3["rgb"])
@pytest.mark.parametrize("env_name", ["coinrun", "starpilot"])
def test_determinism(env_name):
def collect_observations():
rng = np.random.RandomState(0)
env = ProcgenGym3Env(num=2, env_name=env_name, rand_seed=23)
_, obs, _ = env.observe()
obses = [obs["rgb"]]
for _ in range(128):
env.act(
rng.randint(
low=0, high=env.ac_space.eltype.n, size=(env.num,), dtype=np.int32
)
)
_, obs, _ = env.observe()
obses.append(obs["rgb"])
return np.array(obses)
obs1 = collect_observations()
obs2 = collect_observations()
assert np.array_equal(obs1, obs2)
@pytest.mark.parametrize("env_name", ENV_NAMES)
@pytest.mark.parametrize("num_envs", [1, 2, 16])
def test_multi_speed(env_name, num_envs, benchmark):
env = ProcgenGym3Env(num=num_envs, env_name=env_name)
actions = np.zeros([env.num])
def rollout(max_steps):
step_count = 0
while step_count < max_steps:
env.act(actions)
env.observe()
step_count += 1
benchmark(lambda: rollout(1000))
``` |
{
"source": "jordan-schneider/value-alignment-verification",
"score": 2
} |
#### File: jordan-schneider/value-alignment-verification/check_elicit_output.py
```python
import logging
import pickle
from pathlib import Path
import fire # type: ignore
import numpy as np
from driver.legacy.models import Driver
from active.simulation_utils import assert_normals, orient_normals
from utils import assert_reward
def make_normals(input_features: np.ndarray) -> np.ndarray:
normals = input_features[:, 0] - input_features[:, 1]
assert_normals(normals, False, input_features.shape[2])
return normals
def make_input_features(inputs: np.ndarray, sim) -> np.ndarray:
input_features = np.empty((inputs.shape[0], 2, sim.num_of_features))
for i, (a, b) in enumerate(inputs):
sim.feed(a)
input_features[i, 0] = sim.get_features()
sim.feed(b)
input_features[i, 1] = sim.get_features()
return input_features
def assert_input_feature_consistency(inputs: np.ndarray, input_features: np.ndarray, sim) -> None:
recreated_input_features = make_input_features(inputs, sim)
matches = recreated_input_features == input_features
if not np.all(matches):
bad_indices = np.logical_not(matches)
bad_inputs = inputs[bad_indices]
bad_input_features = input_features[bad_indices]
expected_bad_outputs = recreated_input_features[bad_indices]
logging.error("Some input features don't match the recreated inputs.")
logging.error(f"The following inputs are bad:\n{bad_inputs}")
logging.error(f"The recorded features for these inputs are:\n{bad_input_features}")
logging.error(f"The recreated input_features are:\n{expected_bad_outputs}")
logging.error(f"The bad indices are {np.where(bad_indices)}")
assert np.all(matches)
def assert_normal_consistency(input_features: np.ndarray, normals: np.ndarray) -> None:
assert np.all(make_normals(input_features) == normals)
def assert_true_reward_consistency(oriented_normals: np.ndarray, true_reward: np.ndarray) -> None:
gt_value_diff = oriented_normals @ true_reward
pref_correct = gt_value_diff >= 0
if not np.all(pref_correct):
pref_incorrect = np.logical_not(pref_correct)
bad_normals = oriented_normals[pref_incorrect]
bad_values = gt_value_diff[pref_incorrect]
logging.error("Some preferences backwards relative to gt reward.")
logging.error(f"The following normals are bad:\n{bad_normals}")
logging.error(f"The value difference for these normals were:\n{bad_values}")
logging.error(f"The ground truth reward is {true_reward}")
logging.error(f"The bad normal indices are {np.where(pref_incorrect)}")
assert np.all(pref_correct)
def main(datadir: Path) -> None:
logging.basicConfig(level="INFO")
datadir = Path(datadir)
flags = pickle.load(open(datadir / "flags.pkl", "rb"))
use_equiv = False
sim = Driver()
n_reward_features = sim.num_of_features
inputs = np.load(datadir / "inputs.npy")
n_questions = inputs.shape[0]
assert inputs.shape[1] == 2
input_features = np.load(datadir / "input_features.npy")
n_questions = input_features.shape[0]
assert input_features.shape == (n_questions, 2, n_reward_features), input_features.shape
assert_input_feature_consistency(inputs, input_features, sim)
normals = np.load(datadir / "normals.npy")
logging.info(f"There are {normals.shape[0]} questions")
assert_normals(normals, use_equiv, n_reward_features)
assert_normal_consistency(input_features, normals)
preferences = np.load(datadir / "preferences.npy")
assert preferences.shape == (n_questions,)
assert np.all((preferences == 1) | (preferences == -1))
oriented_normals = orient_normals(normals, preferences)
if (datadir / "true_reward.npy").exists():
true_reward = np.load(datadir / "true_reward.npy")
assert_reward(true_reward, use_equiv, n_reward_features)
logging.info(f"true_reward={true_reward}")
assert_true_reward_consistency(oriented_normals, true_reward)
if (datadir / "mean_reward.npy").exists():
mean_reward = np.load(datadir / "mean_reward.npy")
logging.info(f"mean_reward={mean_reward}")
assert_reward(mean_reward, use_equiv, n_reward_features)
mean_accuracy = np.mean(oriented_normals @ mean_reward > 0)
logging.info(f"Accuracy of mean reward function is {mean_accuracy}")
if __name__ == "__main__":
fire.Fire(main)
```
#### File: jordan-schneider/value-alignment-verification/run_tests.py
```python
import logging
import pickle as pkl
from functools import partial
from itertools import product
from pathlib import Path
from typing import (
Dict,
Generator,
List,
Literal,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import argh # type: ignore
import numpy as np
import tensorflow as tf # type: ignore
from driver.gym_env.legacy_env import LegacyEnv
from gym.spaces import flatten # type: ignore
from search import GeometricSearch, TestRewardSearch
tf.config.set_visible_devices([], "GPU") # Car simulation stuff is faster on cpu
from argh import arg
from driver.legacy.models import Driver
from gym.core import Env # type: ignore
from joblib import Parallel, delayed # type: ignore
from sklearn.metrics import confusion_matrix # type: ignore
from active.simulation_utils import TrajOptimizer, assert_normals, make_normals, orient_normals
from equiv_utils import add_equiv_constraints, remove_equiv
from random_baseline import make_random_questions
from testing_factory import TestFactory
from utils import (
assert_nonempty,
assert_reward,
assert_rewards,
get_mean_reward,
load,
make_gaussian_rewards,
parse_replications,
rollout,
setup_logging,
shape_compat,
)
Experiment = Tuple[float, Optional[float], int]
input_features_name = Path("input_features.npy")
normals_name = Path("normals.npy")
preferences_name = Path("preferences.npy")
true_reward_name = Path("true_reward.npy")
flags_name = Path("flags.pkl")
use_equiv = False
# Top level functions callable from fire
@arg("--epsilons", nargs="+", type=float)
def premake_test_rewards(
epsilons: List[float] = [0.0],
n_rewards: int = 100,
n_test_states: Optional[int] = None,
n_gt_test_questions: int = 10000,
true_reward_name: Path = Path("true_reward.npy"),
datadir: Path = Path(),
outdir: Path = Path(),
replications: Optional[Union[str, Tuple[int, ...]]] = None,
n_cpus: int = 1,
overwrite: bool = False,
verbosity: Literal["INFO", "DEBUG"] = "INFO",
):
""" Finds test rewards for each experiment. """
outdir.mkdir(parents=True, exist_ok=True)
# TODO(joschnei): I'm making some dangerous logging decisions. Do I want to append to logs, or
# give logs unique names? I really need to pick at least one.
setup_logging(verbosity, log_path=outdir / "log.txt")
if replications is not None:
replication_indices = parse_replications(replications)
for replication in replication_indices:
if not (datadir / str(replication)).exists():
logging.warning(f"Replication {replication} does not exist, skipping")
continue
premake_test_rewards(
epsilons=epsilons,
n_rewards=n_rewards,
n_test_states=n_test_states,
n_gt_test_questions=n_gt_test_questions,
true_reward_name=true_reward_name,
datadir=datadir / str(replication),
outdir=outdir / str(replication),
use_equiv=use_equiv,
n_cpus=n_cpus,
overwrite=overwrite,
verbosity=verbosity,
)
logging.info(f"Done with replication {replication}")
exit()
true_reward = np.load(datadir / true_reward_name)
assert_reward(true_reward, False, 4)
with Parallel(n_jobs=n_cpus) as parallel:
make_test_rewards(
epsilons=epsilons,
true_reward=true_reward,
n_rewards=n_rewards,
n_test_states=n_test_states,
n_gt_test_questions=int(n_gt_test_questions),
outdir=outdir,
parallel=parallel,
use_equiv=use_equiv,
overwrite=overwrite,
)
@arg("--epsilons", nargs="+", type=float)
@arg("--deltas", nargs="+", type=float)
@arg("--human-samples", nargs="+", type=int)
def simulated(
epsilons: List[float] = [0.0],
n_rewards: int = 100,
human_samples: List[int] = [1],
n_reward_samples: int = 1000,
n_test_states: Optional[int] = None,
n_gt_test_questions: int = 10000,
traj_opt: bool = False,
datadir: Path = Path(),
outdir: Path = Path(),
deltas: List[Optional[float]] = [None],
use_mean_reward: bool = False,
use_random_test_questions: bool = False,
n_random_test_questions: Optional[int] = None,
use_cheating_questions: bool = False,
skip_remove_duplicates: bool = False,
skip_epsilon_filtering: bool = False,
skip_redundancy_filtering: bool = False,
use_true_epsilon: bool = False,
legacy_test_rewards: bool = False,
replications: Optional[Union[str, Tuple[int, ...]]] = None,
n_cpus: int = 1,
overwrite_test_rewards: bool = False,
overwrite_results: bool = False,
verbosity: Literal["INFO", "DEBUG"] = "INFO",
) -> None:
""" Evaluates alignment test generated by ground-truth rewards. """
logging.basicConfig(level=verbosity, format="%(levelname)s:%(asctime)s:%(message)s")
if replications is not None:
replication_indices = parse_replications(replications)
for replication in replication_indices:
if not (datadir / str(replication)).exists():
logging.warning(f"Replication {replication} does not exist, skipping")
continue
logging.info(f"Starting replication {replication}")
simulated(
epsilons=epsilons,
deltas=deltas,
n_rewards=n_rewards,
human_samples=human_samples,
n_reward_samples=n_reward_samples,
n_test_states=n_test_states,
n_gt_test_questions=n_gt_test_questions,
datadir=datadir / str(replication),
outdir=outdir / str(replication),
use_mean_reward=use_mean_reward,
use_random_test_questions=use_random_test_questions,
use_cheating_questions=use_cheating_questions,
n_random_test_questions=n_random_test_questions,
skip_remove_duplicates=skip_remove_duplicates,
skip_epsilon_filtering=skip_epsilon_filtering,
skip_redundancy_filtering=skip_redundancy_filtering,
use_true_epsilon=use_true_epsilon,
legacy_test_rewards=legacy_test_rewards,
n_cpus=n_cpus,
overwrite_test_rewards=overwrite_test_rewards,
overwrite_results=overwrite_results,
verbosity=verbosity,
)
exit()
logging.info(f"Using {n_cpus} cpus.")
parallel = Parallel(n_jobs=n_cpus)
outdir.mkdir(parents=True, exist_ok=True)
if n_random_test_questions is not None:
# Argh defaults to parsing something as a string if its optional
n_random_test_questions = int(n_random_test_questions)
flags = pkl.load(open(datadir / flags_name, "rb"))
query_type = flags["query_type"]
equiv_probability = flags["equiv_size"]
env = Driver()
n_reward_features = env.num_of_features
logging.info("Loading elicitation results")
elicited_normals, elicited_preferences, elicited_input_features = load_elicitation(
datadir=datadir,
normals_name=normals_name,
preferences_name=preferences_name,
input_features_name=input_features_name,
n_reward_features=n_reward_features,
use_equiv=use_equiv,
query_type=query_type,
equiv_probability=equiv_probability,
)
true_reward = np.load(datadir / true_reward_name)
assert_reward(true_reward, False, n_reward_features)
if use_equiv:
true_reward = np.append(true_reward, [1])
else:
assert not np.any(elicited_preferences == 0)
factory = TestFactory(
query_type=query_type,
reward_dimension=elicited_normals.shape[1],
equiv_probability=equiv_probability,
n_reward_samples=n_reward_samples,
use_mean_reward=use_mean_reward,
skip_dedup=skip_remove_duplicates,
skip_noise_filtering=True,
skip_epsilon_filtering=skip_epsilon_filtering,
skip_redundancy_filtering=skip_redundancy_filtering,
use_true_epsilon=use_true_epsilon,
true_reward=true_reward,
)
logging.info(
f"""Filtering settings:
# reward samples={n_reward_samples},
use mean reward={use_mean_reward},
skip duplicates={skip_remove_duplicates}
skip noise={True}
skip epsilon={skip_epsilon_filtering}
skip redundancy={skip_redundancy_filtering}
use true epsilon={use_true_epsilon}
"""
)
confusion_path, test_path = make_outnames(
outdir,
skip_remove_duplicates,
True,
skip_epsilon_filtering,
skip_redundancy_filtering,
)
confusions: Dict[Experiment, np.ndarray] = load(confusion_path, overwrite_results, default={})
minimal_tests: Dict[Experiment, np.ndarray] = load(test_path, overwrite_results, default={})
experiments = make_experiments(
epsilons, deltas, human_samples, overwrite_results, experiments=set(minimal_tests.keys())
)
if use_random_test_questions:
logging.info("Making random test")
logging.info(f"True reward: {true_reward}")
normals, preferences, input_features = make_random_test(
n_random_test_questions,
elicited_input_features,
elicited_preferences,
reward_iterations=flags["reward_iterations"],
query_type=query_type,
equiv_size=flags["equiv_size"],
sim=env,
use_equiv=use_equiv,
)
good_indices = (true_reward @ normals.T) > 0
logging.info(f"{np.mean(good_indices)*100:2f}% of new test questions agree with gt reward.")
if use_cheating_questions:
logging.info(f"Selecting only questions consistent with gt reward")
normals = normals[good_indices]
preferences = preferences[good_indices]
input_features = input_features[good_indices]
assert_normals(normals, use_equiv)
else:
max_n = max(human_samples)
preferences = elicited_preferences[:max_n]
input_features = elicited_input_features[:max_n]
logging.debug(f"elicited_normals={elicited_normals[:10]}")
normals = orient_normals(
elicited_normals[:max_n], preferences, use_equiv, n_reward_features
)
logging.debug(f"normals={normals[:10]}")
assert np.all(true_reward @ normals.T >= 0)
if not legacy_test_rewards:
test_rewards = make_test_rewards(
epsilons=epsilons,
true_reward=true_reward,
n_rewards=n_rewards,
n_test_states=n_test_states,
n_gt_test_questions=int(n_gt_test_questions),
traj_opt=traj_opt,
outdir=outdir,
parallel=parallel,
use_equiv=use_equiv,
overwrite=overwrite_test_rewards,
)
else:
test_rewards = legacy_make_test_rewards(1000, n_rewards, true_reward, epsilons, use_equiv)
for indices, confusion, experiment in parallel(
delayed(run_gt_experiment)(
normals=normals,
test_rewards=test_rewards[epsilon][0],
test_reward_alignment=test_rewards[epsilon][1],
epsilon=epsilon,
delta=delta,
use_equiv=use_equiv,
n_human_samples=n,
factory=factory,
input_features=input_features,
preferences=preferences,
outdir=outdir,
verbosity=verbosity,
)
for epsilon, delta, n in experiments
):
minimal_tests[experiment] = indices
confusions[experiment] = confusion
pkl.dump(confusions, open(confusion_path, "wb"))
pkl.dump(minimal_tests, open(test_path, "wb"))
@arg("--epsilons", nargs="+", type=float)
@arg("--deltas", nargs="+", type=float)
@arg("--human-samples", nargs="+", type=int)
def human(
epsilons: List[float] = [0.0],
deltas: List[float] = [0.05],
n_rewards: int = 10000,
human_samples: List[int] = [1],
n_model_samples: int = 1000,
input_features_name: Path = Path("input_features.npy"),
normals_name: Path = Path("normals.npy"),
preferences_name: Path = Path("preferences.npy"),
flags_name: Path = Path("flags.pkl"),
datadir: Path = Path("questions"),
outdir: Path = Path("questions"),
rewards_path: Optional[Path] = None,
use_mean_reward: bool = False,
skip_remove_duplicates: bool = False,
skip_epsilon_filtering: bool = False,
skip_redundancy_filtering: bool = False,
n_cpus: int = 1,
overwrite: bool = False,
):
""" Evaluates alignment test elicited from a human. """
outdir.mkdir(parents=True, exist_ok=True)
parallel = Parallel(n_jobs=n_cpus)
flags = pkl.load(open(datadir / flags_name, "rb"))
query_type = flags["query_type"]
equiv_probability = flags["equiv_size"]
sim = Driver()
n_reward_features = sim.num_of_features
elicited_normals, elicited_preferences, elicited_input_features = load_elicitation(
datadir=datadir,
normals_name=normals_name,
preferences_name=preferences_name,
input_features_name=input_features_name,
n_reward_features=n_reward_features,
use_equiv=use_equiv,
query_type=query_type,
equiv_probability=equiv_probability,
)
assert elicited_preferences.shape[0] > 0
factory = TestFactory(
query_type=query_type,
reward_dimension=elicited_normals.shape[1],
equiv_probability=equiv_probability,
n_reward_samples=n_model_samples,
use_mean_reward=use_mean_reward,
skip_dedup=skip_remove_duplicates,
skip_noise_filtering=True,
skip_epsilon_filtering=skip_epsilon_filtering,
skip_redundancy_filtering=skip_redundancy_filtering,
)
test_path = outdir / make_outname(
skip_remove_duplicates,
True,
skip_epsilon_filtering,
skip_redundancy_filtering,
base="indices",
)
test_results_path = outdir / make_outname(
skip_remove_duplicates,
True,
skip_epsilon_filtering,
skip_redundancy_filtering,
base="test_results",
)
minimal_tests: Dict[Experiment, np.ndarray] = load(test_path, overwrite)
results: Dict[Experiment, np.ndarray] = load(test_results_path, overwrite)
test_rewards = (
np.load(open(rewards_path, "rb"))
if rewards_path is not None
else make_gaussian_rewards(n_rewards, use_equiv)
)
np.save(outdir / "test_rewards.npy", test_rewards)
experiments = make_experiments(
epsilons, deltas, human_samples, overwrite, experiments=set(minimal_tests.keys())
)
for indices, result, experiment in parallel(
delayed(run_human_experiment)(
test_rewards,
elicited_normals,
elicited_input_features,
elicited_preferences,
epsilon,
delta,
n,
factory,
use_equiv,
)
for epsilon, delta, n in experiments
):
minimal_tests[experiment] = indices
results[experiment] = result
pkl.dump(minimal_tests, open(test_path, "wb"))
pkl.dump(results, open(test_results_path, "wb"))
def compare_test_labels(
test_rewards_path: Path,
true_reward_path: Path,
traj_opt: bool = False,
elicitation: bool = False,
replications: Optional[str] = None,
normals_path: Optional[Path] = None,
):
if replications is not None:
raise NotImplementedError("Replications not yet implemented")
starting_tests: Dict[float, Tuple[np.ndarray, np.ndarray]] = pkl.load(
open(test_rewards_path, "rb")
)
assert not (traj_opt == elicitation), "Provided labels must come from exactly one source"
class Test(NamedTuple):
rewards: np.ndarray
q_labels: np.ndarray
elicitation_labels: np.ndarray
test_rewards: Dict[float, Test] = {}
true_reward = np.load(true_reward_path)
if traj_opt:
normals = np.load(normals_path)
for epsilon, (rewards, q_labels) in starting_tests.items():
normals = normals[true_reward @ normals.T > epsilon]
elicitation_labels = run_test(normals, rewards, use_equiv=False)
test_rewards[epsilon] = Test(
rewards=rewards, q_labels=q_labels, elicitation_labels=elicitation_labels
)
elif elicitation:
parallel = Parallel(n_cpus=-4)
env = LegacyEnv(reward=true_reward, random_start=True)
traj_optimizer = TrajOptimizer(10)
for epsilon, (rewards, elicitation_labels) in starting_tests.items():
q_labels = rewards_aligned(
traj_optimizer=traj_optimizer,
env=env,
true_reward=true_reward,
test_rewards=rewards,
epsilon=epsilon,
parallel=parallel,
)
test_rewards[epsilon] = Test(
rewards=rewards, q_labels=q_labels, elicitation_labels=elicitation_labels
)
total_agree = 0
total_rewards = 0
for epsilon, test in test_rewards.items():
total_agree += np.sum(test.q_labels == test.elicitation_labels)
total_rewards += len(test.rewards)
print(
f"Critic and superset labels agree on {total_agree / total_rewards * 100 :.1f}% of rewards"
)
# Test reward generation
def make_test_rewards(
epsilons: Sequence[float],
true_reward: np.ndarray,
n_rewards: int,
outdir: Path,
parallel: Parallel,
n_test_states: Optional[int] = None,
traj_opt: bool = False,
max_attempts: int = 10,
n_gt_test_questions: Optional[int] = None,
use_equiv: bool = False,
overwrite: bool = False,
) -> Dict[float, Tuple[np.ndarray, np.ndarray]]:
""" Makes test rewards sets for every epsilon and saves them to a file. """
traj_optimizer = (
TrajOptimizer(n_planner_iters=100, optim=tf.keras.optimizers.Adam(0.2))
if traj_opt
else None
)
reward_path = outdir / "test_rewards.pkl"
test_rewards: Dict[float, Tuple[np.ndarray, np.ndarray]] = load(
reward_path, overwrite=overwrite
)
if test_rewards is None:
test_rewards = {}
else:
logging.info(f"Loading test rewards from {reward_path}")
new_epsilons = set(epsilons) - test_rewards.keys()
if len(new_epsilons) > 0:
logging.info(f"Creating new test rewards for epsilons: {new_epsilons}")
if (n_test_states is not None and n_test_states > 1) or len(new_epsilons) == 1:
# Parallelize internally
test_rewards.update(
{
epsilon: find_reward_boundary(
true_reward=true_reward,
traj_optimizer=traj_optimizer,
n_rewards=n_rewards,
use_equiv=use_equiv,
epsilon=epsilon,
n_test_states=n_test_states,
max_attempts=max_attempts,
outdir=outdir,
n_gt_test_questions=n_gt_test_questions,
overwrite=overwrite,
parallel=parallel,
)[:2]
for epsilon in new_epsilons
}
)
else:
for rewards, alignment, epsilon in parallel(
delayed(find_reward_boundary)(
true_reward=true_reward,
traj_optimizer=traj_optimizer,
n_rewards=n_rewards,
use_equiv=use_equiv,
epsilon=epsilon,
n_test_states=n_test_states,
max_attempts=max_attempts,
n_gt_test_questions=n_gt_test_questions,
outdir=outdir,
overwrite=overwrite,
parallel=None,
)
for epsilon in new_epsilons
):
test_rewards[epsilon] = (rewards, alignment)
logging.info(f"Writing generated test rewards to {reward_path}")
pkl.dump(test_rewards, open(reward_path, "wb"))
return test_rewards
def find_reward_boundary(
true_reward: np.ndarray,
traj_optimizer: Optional[TrajOptimizer],
n_rewards: int,
use_equiv: bool,
epsilon: float,
max_attempts: int,
outdir: Path,
parallel: Parallel,
n_test_states: Optional[int] = None,
n_gt_test_questions: Optional[int] = None,
overwrite: bool = False,
) -> Tuple[np.ndarray, np.ndarray, float]:
""" Finds a ballanced set of test rewards according to a critic and epsilon. """
env = LegacyEnv(reward=true_reward)
# Don't parallelize here if we're only testing at one state
logging.debug(f"# test states={n_test_states}")
parallel = None if n_test_states is None or n_test_states <= 1 else parallel
new_rewards = partial(
make_gaussian_rewards, n_rewards=n_rewards, use_equiv=use_equiv, mean=true_reward
)
get_alignment = partial(
rewards_aligned,
traj_optimizer=traj_optimizer,
env=env,
true_reward=true_reward,
epsilon=epsilon,
parallel=parallel,
n_test_states=n_test_states,
n_questions=n_gt_test_questions,
)
search = TestRewardSearch.load(epsilon=epsilon, path=outdir / "search.pkl", overwrite=overwrite)
if search is None:
search = TestRewardSearch(
epsilon,
cov_search=GeometricSearch(start=1.0),
max_attempts=max_attempts,
outdir=outdir,
new_rewards=new_rewards,
get_alignment=get_alignment,
)
else:
search.new_rewards = new_rewards
search.get_alignment = get_alignment
best_test = search.run()
return best_test.rewards, best_test.alignment, epsilon
def rewards_aligned(
traj_optimizer: Optional[TrajOptimizer],
env: Env,
true_reward: np.ndarray,
test_rewards: np.ndarray,
epsilon: float,
parallel: Optional[Parallel] = None,
n_test_states: Optional[int] = None,
n_questions: int = 100000,
use_equiv: bool = False,
) -> np.ndarray:
""" Determines the epsilon-alignment of a set of test rewards relative to a critic and epsilon. """
# This test can produce both false positives and false negatives
# This test is prone to false positives, but a negative is always a true negative
gt_test = make_gt_test_align(test_rewards, n_questions, true_reward, epsilon, use_equiv)
if traj_optimizer is not None:
traj_opt_alignment = make_traj_opt_align(
traj_optimizer, env, true_reward, test_rewards, epsilon, parallel, n_test_states
)
# Start with traj opt alignment, then mask out all of the rewards that failed the gt test
# x y z
# 0 0 0
# 0 1 0 don't trust y when it says something is aligned if you failed the traj opt
# 1 0 0 if y says it's misaligned, then it is
# 1 1 1
# This is just the & function
alignment = traj_opt_alignment & gt_test
n_masked = np.sum(gt_test & np.logical_not(gt_test))
logging.info(
f"Trajectory optimization labelling produced at least {n_masked} false positives"
)
else:
alignment = gt_test
return alignment
def make_gt_test_align(
test_rewards: np.ndarray,
n_questions: int,
true_reward: np.ndarray,
epsilon: float,
use_equiv: bool = False,
) -> np.ndarray:
env = Driver()
trajs = make_random_questions(n_questions, env)
_, normals = make_normals(trajs, env, use_equiv)
value_diff = true_reward @ normals.T
eps_questions = np.abs(value_diff) > epsilon
normals = normals[eps_questions]
gt_pref = value_diff[eps_questions] > 0
normals = orient_normals(normals, gt_pref, use_equiv)
alignment = cast(np.ndarray, np.all(test_rewards @ normals.T > 0, axis=1))
assert alignment.shape == (
test_rewards.shape[0],
), f"alignment shape={alignment.shape} is not expected {test_rewards.shape[0]}"
return alignment
def make_traj_opt_align(
traj_optimizer: TrajOptimizer,
env: Env,
true_reward: np.ndarray,
test_rewards: np.ndarray,
epsilon: float,
parallel: Optional[Parallel] = None,
n_test_states: Optional[int] = None,
) -> np.ndarray:
state_shape = env.observation_space.sample().shape
action_shape = env.action_space.sample().shape
if n_test_states is not None:
raw_states = np.array(
[
flatten(env.observation_space, env.observation_space.sample())
for _ in range(n_test_states)
]
)
else:
n_test_states = 1
raw_states = np.array([env.state])
assert raw_states.shape == (n_test_states, *state_shape)
opt_plans = make_plans(
true_reward.reshape(1, 4),
raw_states,
traj_optimizer,
parallel,
action_shape,
memorize=True,
)
assert opt_plans.shape == (
1,
n_test_states,
50,
*action_shape,
), f"opt_plans shape={opt_plans.shape} is not expected {(1,n_test_states,50,*action_shape)}"
opt_values: np.ndarray = rollout_plans(env, opt_plans, raw_states)
plans = make_plans(test_rewards, raw_states, traj_optimizer, parallel, action_shape)
assert plans.shape == (
len(test_rewards),
n_test_states,
50,
*action_shape,
), f"plans shape={plans.shape} is not expected {(len(test_rewards),n_test_states,50,*action_shape)}"
values = rollout_plans(env, plans, raw_states)
assert values.shape == (
len(test_rewards),
n_test_states,
), f"Values shape={values.shape} is not expected {(len(test_rewards), n_test_states)}"
alignment = cast(np.ndarray, np.all(opt_values - values < epsilon, axis=1))
return alignment
def rollout_plans(env: LegacyEnv, plans: np.ndarray, states: np.ndarray):
returns = np.empty((plans.shape[0], plans.shape[1]))
assert len(returns.shape) == 2
assert len(plans.shape) == 4
for i in range(plans.shape[0]):
for j in range(plans.shape[1]):
returns[i, j] = rollout(plans[i, j], env, states[j])
return returns
def legacy_make_test_rewards(
n_questions: int,
n_rewards: int,
true_reward: np.ndarray,
epsilons: List[float],
use_equiv: bool,
) -> Dict[float, Tuple[np.ndarray, np.ndarray]]:
""" Generates n_rewards reward vectors and determines which are aligned. """
assert n_rewards > 0
assert_reward(true_reward, use_equiv)
trajs = make_random_questions(n_questions, Driver())
_, normals = make_normals(trajs, Driver(), use_equiv)
gt_pref = true_reward @ normals.T > 0
normals = orient_normals(normals, gt_pref, use_equiv)
assert_normals(normals, use_equiv)
n_reward_features = normals.shape[1]
test_rewards: Dict[float, Tuple[np.ndarray, np.ndarray]] = {}
for epsilon in epsilons:
assert epsilon >= 0.0
cov = 1.0
rewards = make_gaussian_rewards(n_rewards, use_equiv, mean=true_reward, cov=cov)
normals = normals[true_reward @ normals.T > epsilon]
ground_truth_alignment = cast(np.ndarray, np.all(rewards @ normals.T > 0, axis=1))
mean_agree = np.mean(ground_truth_alignment)
while mean_agree > 0.55 or mean_agree < 0.45:
if mean_agree > 0.55:
cov *= 1.1
else:
cov /= 1.1
if not np.isfinite(cov) or cov <= 0.0 or cov >= 100.0:
# TODO(joschnei): Break is a code smell
logging.warning(f"cov={cov}, using last good batch of rewards.")
break
rewards = make_gaussian_rewards(n_rewards, use_equiv, mean=true_reward, cov=cov)
normals = normals[true_reward @ normals.T > epsilon]
ground_truth_alignment = cast(np.ndarray, np.all(rewards @ normals.T > 0, axis=1))
mean_agree = np.mean(ground_truth_alignment)
assert ground_truth_alignment.shape == (n_rewards,)
assert rewards.shape == (n_rewards, n_reward_features)
test_rewards[epsilon] = (rewards, ground_truth_alignment)
return test_rewards
def make_plans(
rewards: np.ndarray,
states: np.ndarray,
optim: TrajOptimizer,
parallel: Optional[Parallel] = None,
action_shape: Tuple[int, ...] = (2,),
memorize: bool = False,
) -> np.ndarray:
assert shape_compat(
rewards, (-1, 4)
), f"rewards shape={rewards.shape} is wrong, expected (-1, 4)"
if parallel is not None:
input_batches = np.array_split(list(product(rewards, states)), parallel.n_jobs)
logging.debug("Branching")
return np.concatenate(
parallel(
delayed(align_worker)(
rewards=batch[:, 0],
states=batch[:, 1],
optim=optim,
action_shape=action_shape,
)
for batch in input_batches
)
).reshape(len(rewards), len(states), 50, *action_shape)
else:
plans = np.empty((len(rewards), len(states), 50, *action_shape))
for i, reward in enumerate(rewards):
assert reward.shape == (4,)
for j, state in enumerate(states):
traj, _ = optim.make_opt_traj(reward, state, memorize=memorize)
plans[i, j] = traj.reshape(-1, *action_shape)
return plans
def align_worker(
rewards: np.ndarray,
states: np.ndarray,
optim: TrajOptimizer,
action_shape: Tuple[int, ...] = (2,),
):
batch_size = rewards.shape[0]
assert states.shape[0] == batch_size
plans = np.empty((batch_size, 50, *action_shape))
for i, (reward, state) in enumerate(zip(rewards, states)):
traj, _ = optim.make_opt_traj(reward, state)
plans[i] = traj.reshape(-1, *action_shape)
return plans
# Simulated Experiment
def run_gt_experiment(
normals: np.ndarray,
test_rewards: np.ndarray,
test_reward_alignment: np.ndarray,
epsilon: float,
delta: Optional[float],
use_equiv: bool,
n_human_samples: int,
factory: TestFactory,
input_features: np.ndarray,
preferences: np.ndarray,
outdir: Path,
verbosity: Literal["INFO", "DEBUG"] = "INFO",
) -> Tuple[np.ndarray, np.ndarray, Experiment]:
""" Executes an alignment test on a set of test rewards and records the performance of the test."""
experiment = (epsilon, delta, n_human_samples)
logdir = outdir / "logs"
logdir.mkdir(parents=True, exist_ok=True)
logging.basicConfig(
filename=logdir / f"{epsilon}.{delta}.{n_human_samples}.log",
filemode="w",
level=verbosity,
force=True,
format="%(levelname)s:%(asctime)s:%(message)s",
)
logging.info(f"Working on epsilon={epsilon}, delta={delta}, n={n_human_samples}")
# TODO(joschnei): Really need to make this a fixed set common between comparisons.
filtered_normals = normals[:n_human_samples]
input_features = input_features[:n_human_samples]
preferences = preferences[:n_human_samples]
filtered_normals, indices = factory.filter_halfplanes(
inputs_features=input_features,
normals=filtered_normals,
epsilon=epsilon,
preferences=preferences,
delta=delta,
)
confusion = eval_test(
normals=filtered_normals,
rewards=test_rewards,
aligned=test_reward_alignment,
use_equiv=use_equiv,
)
assert confusion.shape == (2, 2)
return indices, confusion, experiment
def eval_test(
normals: np.ndarray, rewards: np.ndarray, aligned: np.ndarray, use_equiv: bool
) -> np.ndarray:
""" Evaluates an alignment test on a set of test rewards and reports confusion wrt ground truth. """
assert rewards.shape[0] == aligned.shape[0]
assert_rewards(rewards, use_equiv)
if normals.shape[0] > 0:
results = run_test(normals, rewards, use_equiv)
logging.info(
f"predicted true={np.sum(results)}, predicted false={results.shape[0] - np.sum(results)}"
)
return confusion_matrix(y_true=aligned, y_pred=results, labels=[False, True])
else:
return confusion_matrix(
y_true=aligned,
y_pred=np.ones(aligned.shape, dtype=bool),
labels=[False, True],
)
# Human Experiments
def run_human_experiment(
test_rewards: np.ndarray,
normals: np.ndarray,
input_features: np.ndarray,
preferences: np.ndarray,
epsilon: float,
delta: float,
n_human_samples: int,
factory: TestFactory,
use_equiv: bool,
verbosity: Literal["INFO", "DEBUG"] = "INFO",
) -> Tuple[np.ndarray, np.ndarray, Experiment]:
"""Distills a set of normals and preferences into a test using the factory, and runs that test on test_rewards
Args:
test_rewards (np.ndarray): Rewards to run test on
normals (np.ndarray): normal vector of halfplane constraints defining test questions
input_features (np.ndarray): reward features of trajectories in each question
preferences (np.ndarray): Human provided preference over trajectories
epsilon (float): Size of minimum value gap required for de-noising
delta (float): How much of the reward posterior must be over the value gap
n_human_samples (int): Number of preferences to prune down to
factory (TestFactory): Factory to produce test questions
use_equiv (bool): Allow equivalent preference labels?
verbosity (str): Logging verbosity
Returns:
Tuple[np.ndarray, np.ndarray, Experiment]: indices of the selected test questions, test results for each reward, and experimental hyperparameters
"""
logging.basicConfig(level=verbosity, format="%(levelname)s:%(asctime)s:%(message)s")
if n_human_samples == -1:
n_human_samples == normals.shape[0]
filtered_normals = normals[:n_human_samples]
filtered_normals, indices = factory.filter_halfplanes(
inputs_features=input_features,
normals=filtered_normals,
epsilon=epsilon,
preferences=preferences,
delta=delta,
)
experiment = (epsilon, delta, n_human_samples)
results = run_test(filtered_normals, test_rewards, use_equiv)
return indices, results, experiment
# Common test utils
def make_experiments(
epsilons: Sequence[float],
deltas: Sequence[Optional[float]],
n_human_samples: Sequence[int],
overwrite: bool,
experiments: Optional[Set[Experiment]] = None,
) -> Generator[Experiment, None, None]:
""" Yields new experiments (unless overwrite is speificed)"""
if overwrite:
# TODO(joschnei): This is stupid but I can't be bothered to cast an iterator to a generator.
for experiment in product(epsilons, deltas, n_human_samples):
yield experiment
else:
for experiment in product(epsilons, deltas, n_human_samples):
if experiments is None or not (experiment in experiments):
yield experiment
def make_random_test(
n_random_test_questions: Optional[int],
elicited_input_features: np.ndarray,
elicited_preferences: np.ndarray,
reward_iterations: int,
query_type: str,
equiv_size: float,
sim,
use_equiv: bool,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Generates an alignment test of randomly generated questions answered according to the mean
posterior reward.
"""
if n_random_test_questions is None:
raise ValueError(
"Must supply n_random_test_questions if use_random_test_questions is true."
)
mean_reward = get_mean_reward(
elicited_input_features,
elicited_preferences,
reward_iterations,
query_type,
equiv_size,
)
logging.info(f"Mean posterior reward for use in random test: {mean_reward}")
inputs = make_random_questions(n_random_test_questions, sim)
input_features, normals = make_normals(inputs, sim, use_equiv)
preferences = normals @ mean_reward > 0
assert preferences.shape == (normals.shape[0],)
normals = orient_normals(normals, preferences)
return normals, preferences, input_features
def run_test(normals: np.ndarray, test_rewards: np.ndarray, use_equiv: bool) -> np.ndarray:
""" Returns the predicted alignment of the fake rewards by the normals. """
assert_normals(normals, use_equiv)
results = cast(np.ndarray, np.all(np.dot(test_rewards, normals.T) > 0, axis=1))
return results
# IO Utils
def make_outname(
skip_remove_duplicates: bool,
skip_noise_filtering: bool,
skip_epsilon_filtering: bool,
skip_redundancy_filtering: bool,
base: str = "out",
) -> str:
""" Constructs a file name for output files based on flags. """
outname = base
if skip_remove_duplicates:
outname += ".skip_duplicates"
if skip_noise_filtering:
outname += ".skip_noise"
if skip_epsilon_filtering:
outname += ".skip_epsilon"
if skip_redundancy_filtering:
outname += ".skip_lp"
outname += ".pkl"
return outname
def make_outnames(
outdir: Path,
skip_remove_duplicates: bool,
skip_noise_filtering: bool,
skip_epsilon_filtering: bool,
skip_redundancy_filtering: bool,
) -> Tuple[Path, Path]:
""" Constructs confusion and index output file names based on flags. """
confusion_path = outdir / make_outname(
skip_remove_duplicates,
skip_noise_filtering,
skip_epsilon_filtering,
skip_redundancy_filtering,
base="confusion",
)
test_path = outdir / make_outname(
skip_remove_duplicates,
skip_noise_filtering,
skip_epsilon_filtering,
skip_redundancy_filtering,
base="indices",
)
return confusion_path, test_path
def load_elicitation(
datadir: Path,
normals_name: Union[str, Path],
preferences_name: Union[str, Path],
input_features_name: Union[str, Path],
n_reward_features: int,
use_equiv: bool,
query_type: Optional[str] = None,
equiv_probability: Optional[float] = None,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
""" Loads and postprocesses elicitation.py output"""
normals = np.load(datadir / normals_name)
preferences = np.load(datadir / preferences_name)
input_features = np.load(datadir / input_features_name)
if use_equiv:
assert equiv_probability is not None
normals = add_equiv_constraints(preferences, normals, equiv_prob=equiv_probability)
elif query_type == "weak":
preferences, normals, input_features = remove_equiv(
preferences,
normals,
input_features,
)
assert_normals(normals, False, n_reward_features)
assert_nonempty(normals, preferences, input_features)
return normals, preferences, input_features
if __name__ == "__main__":
argh.dispatch_commands([premake_test_rewards, simulated, human, compare_test_labels])
```
#### File: jordan-schneider/value-alignment-verification/search.py
```python
from __future__ import annotations
import logging
import pickle
from dataclasses import dataclass
from pathlib import Path
from typing import Callable, Final, Optional, cast
import numpy as np
from utils import load
class GeometricSearch:
"""Searches for a parameter with unknown bounds in the following way:
1. Search geometrically (*= base) in each direction until you've overshot the goal
2. Once you have established bounds, take the next value to be the geometric mean of the bounds.
"""
def __init__(self, start: float, base: float = 10.0) -> None:
self.value = start
self.base = base
self.min = start
self.max = start
def __call__(self, low: bool) -> float:
if not low:
self.min = min(self.min, self.value)
if self.value < self.max:
# If we already found a max we don't want to go above, pick a value between
# the current covariance and the max
self.value = np.sqrt(self.value * self.max)
else:
# Otherwise, grow geometrically
self.value *= self.base
else:
self.max = max(self.max, self.value)
if self.value > self.min:
self.value = np.sqrt(self.value * self.min)
else:
self.value /= self.base
return self.value
@dataclass
class Test:
rewards: np.ndarray
alignment: np.ndarray
mean_alignment: float
class TestRewardSearch:
def __init__(
self,
epsilon: float,
cov_search: GeometricSearch,
max_attempts: int,
outdir: Path,
new_rewards: Callable[[float], np.ndarray],
get_alignment: Callable[[np.ndarray], np.ndarray],
) -> None:
self.epsilon: Final[float] = epsilon
self.best_test: Optional[Test] = None
self.last_test: Optional[Test] = None
self.cov_search: Final[GeometricSearch] = cov_search
self.attempt = 0
self.max_attempts: Final[int] = max_attempts
self.outdir = outdir
self.new_rewards = new_rewards
self.get_alignment = get_alignment
def run(self) -> Test:
while not self.is_done():
self.attempt += 1
cov = (
self.cov_search(low=self.last_test.mean_alignment < 0.45)
if self.last_test is not None
else self.cov_search.value
)
if not np.isfinite(cov) or cov <= 0.0 or cov >= 100.0:
if self.best_test is not None:
logging.warning(
f"cov={cov}, using best try with mean_alignment={self.best_test.mean_alignment}."
)
else:
logging.warning(f"First cov={cov}, use inital covariance between 0 and 100.")
# TODO(joschnei): Break is a code smell
break
self.last_test = self.make_test(cov=cov)
logging.info(
f"attempt={self.attempt} of {self.max_attempts}, mean_alignment={self.last_test.mean_alignment}"
)
if self.best_test is None or np.abs(self.last_test.mean_alignment - 0.5) < np.abs(
self.best_test.mean_alignment - 0.5
):
self.best_test = self.last_test
pickle.dump(self, (self.outdir / "search.pkl").open("wb"))
logging.debug("Dumped search")
assert self.best_test is not None
if self.attempt == self.max_attempts:
logging.warning(
f"Ran out of attempts, using test with mean_alignment={self.best_test.mean_alignment}"
)
return self.best_test
def make_test(self, cov: float) -> Test:
test_rewards = self.new_rewards(cov=cov)
alignment = self.get_alignment(test_rewards=test_rewards)
mean_align = cast(float, np.mean(alignment))
return Test(rewards=test_rewards, alignment=alignment, mean_alignment=mean_align)
@staticmethod
def load(epsilon: float, path: Path, overwrite: bool = False) -> Optional[TestRewardSearch]:
search: TestRewardSearch = load(path, overwrite)
if search is None or search.epsilon != epsilon:
return None
return search
def is_done(self) -> bool:
if self.attempt == self.max_attempts:
return True
if (
self.best_test is not None
and self.best_test.mean_alignment > 0.45
and self.best_test.mean_alignment < 0.55
):
return True
return False
def __getstate__(self):
""" Remove function arguments from pickle. """
state = self.__dict__.copy()
del state["new_rewards"]
del state["get_alignment"]
return state
``` |
{
"source": "JordanSeay/CalorieCount",
"score": 3
} |
#### File: JordanSeay/CalorieCount/getFoodData.py
```python
import csv
import pandas as pd
from collections import Counter
from nltk.tokenize import RegexpTokenizer
import time
def getFoodIdDf(description, foodIdFilePath="foodData/input_food.csv"):
colList = ["sr_description", "fdc_id"]
df = pd.read_csv(foodIdFilePath, usecols=colList)
tokenizer = RegexpTokenizer(r'\w+')
inputDescriptionTokensCount = Counter(
tokenizer.tokenize(description.lower()))
maxMatches = 0
bestMatch = "No Match"
for _, row in df.iterrows():
descriptionTokensCount = Counter(tokenizer.tokenize(row["sr_description"].lower()))
matches = descriptionTokensCount & inputDescriptionTokensCount
if len(descriptionTokensCount) > 0:
numMatches = sum(matches.values()) / len(descriptionTokensCount)
if numMatches > maxMatches:
maxMatches = numMatches
bestMatch = row["fdc_id"]
return bestMatch
def getFoodIdCsv(description, foodIdFilePath="foodData/input_food.csv"):
openFoodIdFile = open(foodIdFilePath)
tokenizer = RegexpTokenizer(r'\w+')
inputDescriptionTokensCount = Counter(
tokenizer.tokenize(description.lower()))
maxMatches = 0
bestMatch = "No Match"
for row in csv.reader(openFoodIdFile):
descriptionTokensCount = Counter(tokenizer.tokenize(row[6].lower()))
matches = descriptionTokensCount & inputDescriptionTokensCount
if len(descriptionTokensCount) > 0:
numMatches = sum(matches.values()) / len(descriptionTokensCount)
if numMatches > maxMatches:
maxMatches = numMatches
bestMatch = row[1]
return bestMatch
def getNutrientAmount(foodId, nutrientId=1008, nutrientFilePath="foodData/food_nutrient.csv"):
df = pd.read_csv(nutrientFilePath, low_memory=False)
food_row = df.loc[(df['fdc_id'] == foodId) & (df['nutrient_id'] == nutrientId)]
nutrientAmt = food_row["amount"].iloc[0]
return nutrientAmt
def getNutrientAmount2(foodId, nutrientId='1008', nutrientFilePath="foodData/food_nutrient.csv"):
openNutrientFile = open(nutrientFilePath)
for row in csv.reader(openNutrientFile):
if row[1] == foodId and row[2] == nutrientId:
nutrientAmt = row[3]
return float(nutrientAmt)
``` |
{
"source": "jordanshatford/youtube-to-mp3",
"score": 3
} |
#### File: backend/utils/helpers.py
```python
import enum
# NOTE: this needs to be synced with the frontend enum
class Status(str, enum.Enum):
WAITING = "WAITING"
DOWNLOADING = "DOWNLOADING"
PROCESSING = "PROCESSING"
DONE = "DONE"
ERROR = "ERROR"
UNDEFINED = "UNDEFINED"
def format_status_update(video_id: str, status: Status) -> dict:
return {"id": video_id, "status": status.value}
```
#### File: backend/utils/threads.py
```python
import os
import threading
import time
from typing import Callable
from youtube_dl import YoutubeDL
from .helpers import Status
from .models import AudioOptions
from .processors import FileProcessingComplete
class YoutubeDownloadThread(threading.Thread):
def __init__(
self,
id: str,
url: str,
options: AudioOptions,
output_directory: str,
status_update: Callable[[str, Status], None],
):
self._id = id
self._url = url
self._options = options
self._output_directory = output_directory
self._status_update = status_update
YOUTUBE_DL_OPTIONS = {
"format": "bestaudio/best",
"progress_hooks": [self.download_progress_hook],
"outtmpl": f"{self._output_directory}/{self._id}.%(ext)s",
"quiet": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": options.format,
"preferredquality": "192",
}
],
}
self._downloader = YoutubeDL(YOUTUBE_DL_OPTIONS)
self._downloader.add_post_processor(
FileProcessingComplete(
self._id, self._status_update, downloader=self._downloader
)
)
super(YoutubeDownloadThread, self).__init__(
group=None, target=None, name=None, daemon=True
)
def download_progress_hook(self, progress_info: dict) -> None:
if progress_info.get("status", None) == "finished":
self._status_update(self._id, Status.PROCESSING)
def get_file_location(self) -> str:
path = os.path.join(
self._output_directory, f"{self._id}.{self._options.format}"
)
return path
def remove(self) -> bool:
path = self.get_file_location()
if os.path.exists(path):
os.remove(path)
return True
return False
def run(self):
self._status_update(self._id, Status.DOWNLOADING)
try:
self._downloader.download([self._url])
except Exception:
self._status_update(self._id, Status.ERROR)
class RepeatedTimer:
def __init__(self, interval: int, function: Callable, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.next_call = time.time()
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self.next_call += self.interval
self._timer = threading.Timer(self.next_call - time.time(), self._run)
self._timer.daemon = True
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
``` |
{
"source": "jordansilva/raspberry-f1-dashboard",
"score": 3
} |
#### File: f12019/utils/formatHelper.py
```python
import json
from .enums import Constants
def mapGear(gear, pitLimiter = False):
if pitLimiter:
return "P"
elif gear == -1:
return "R"
elif gear == 0:
return 'N'
elif gear > 0:
return gear
else:
return '-'
def mapRevLights(percent, drsAllowed = False, drsActive = False):
lights = [False] * 14
# green lights
lights[0] = drsActive or drsAllowed
lights[1] = drsActive == True
lights[2] = False
lights[3] = False
# red lights
lights[4] = percent >= 10
lights[5] = percent >= 20
lights[6] = percent >= 30
lights[7] = percent >= 40
lights[8] = percent >= 50
# blue lights
lights[9] = percent >= 60
lights[10] = percent >= 70
lights[11] = percent >= 80
lights[12] = percent >= 85
lights[13] = percent >= 92
return lights
def deltaPreviousLap(driver):
if not driver or not driver.lapData or not driver.miniSectors or driver.lapData.currentLapNum == 1:
return float('inf')
lastLap = driver.lapData.currentLapNum - 1
currLap = driver.lapData.currentLapNum
currMiniSector = int(driver.lapData.lapDistance / Constants.MINISECTOR_GAP)
if lastLap not in driver.miniSectors or currMiniSector not in driver.miniSectors[lastLap] or currMiniSector not in driver.miniSectors[currLap]:
return float('inf')
lastTime = driver.miniSectors[lastLap][currMiniSector]
currTime = driver.miniSectors[currLap][currMiniSector]
return (currTime - lastTime) /1000.0
def deltaBetween(driver1, driver2):
if not driver1 or not driver2 or not driver1.lapData or not driver2.lapData:
return float('inf')
driver1Lap = driver1.lapData.currentLapNum
driver1Distance = driver1.lapData.lapDistance
driver2Lap = driver2.lapData.currentLapNum
driver2Distance = driver2.lapData.lapDistance
if driver1Lap == driver2Lap or (driver1Lap == driver2Lap+1 and driver1Distance < driver2Distance):
if driver1.miniSectors and driver2.miniSectors:
# Last one of the seconds, is the slowest
currMiniSector = int(driver2Distance / Constants.MINISECTOR_GAP)
# We get the CURRENT LAP of SECOND, is the last.
if not driver1.miniSectors[driver2Lap] or currMiniSector not in driver1.miniSectors[driver2Lap]:
return float('inf')
driver1Time = driver1.miniSectors[driver2Lap][currMiniSector]
driver2Time = driver2.miniSectors[driver2Lap][currMiniSector]
diff = driver2Time - driver1Time
return diff / 1000.0
else:
return float('inf')
return 5000.0 + (driver1Lap - driver2Lap)
def formatDelta(delta):
if delta > 5000.0 and delta < 5100.0:
return "+ %d LAPS" % int(delta - 5000.0)
if delta == float('inf'):
return ""
if delta == 0:
return "+/-"
return abs(delta)
def formatLapTime(lapTime):
timeframe = divmod(lapTime, 60) # array with [minutes, seconds]
if timeframe[0] > 0:
return "%d:%06.3f" % timeframe
else:
return "%06.3f" % timeframe[1]
def formatFuelRemainingLaps(fuel):
if fuel > 0:
return "(+%.2f)" % fuel
else:
return "(-%.2f)" % fuel
```
#### File: f12020/domain/driver.py
```python
import time
class Driver():
MINISECTOR_GAP = 100 # mini sector each 100 meters
def __init__(self, participant):
self.participant = participant
self.motion = None
self.lapData = None
self.setup = None
self.telemetry = None
self.status = None
self.miniSectors = {}
self.fuelUsed = {}
def update_lap(self, lap):
self.lapData = lap
self.process_minisector(lap.currentLapNum, lap.lapDistance)
# self.processStint(lap.currentLapNum)
self.process_fuel(lap.currentLapNum)
def process_minisector(self, lapNum, lapDistance):
if (lapDistance <= 0 and lapNum <= 0):
return
minisectorId = int(lapDistance / self.MINISECTOR_GAP)
if lapNum not in self.miniSectors:
self.miniSectors[lapNum] = {0: time.time_ns() // 1000000}
elif minisectorId not in self.miniSectors[lapNum]:
self.miniSectors[lapNum][minisectorId] = time.time_ns() // 1000000
def process_fuel(self, lapNum):
if self.status == None:
return
if lapNum-1 not in self.fuelUsed:
self.fuelUsed[lapNum - 1] = self.status.fuelInTank
```
#### File: raspberry-f1-dashboard/src/telemetry.py
```python
from .services import F12019Socket
from .services import F12020Socket
class Telemetry():
F1_VERSION = 2020
def __init__(self, ctx):
self.threads = []
self.ctx = ctx
def connect(self):
socket = self.initF1Socket(self.F1_VERSION)
self.threads.append(socket)
socket.start()
def onLapReady(self, data):
self.ctx.setContextProperty("lap", data)
def onCarReady(self, data):
self.ctx.setContextProperty("car", data)
def onCarStatusReady(self, data):
self.ctx.setContextProperty("carStatus", data)
def onRaceUpdate(self, data):
self.ctx.setContextProperty("raceStatus", data)
def initF1Socket(self, version):
if (version == 2019):
socket = F12020Socket()
socket.lap.connect(self.onLapReady)
socket.car.connect(self.onCarReady)
socket.carStatus.connect(self.onCarStatusReady)
return socket
elif (version == 2020):
socket = F12020Socket()
socket.raceStatus.connect(self.onRaceUpdate)
return socket
else:
return null
```
#### File: src/ui/ui.py
```python
from abc import ABC, abstractmethod
class UIObject(ABC):
@abstractmethod
def process(self, data):
pass
class UILog(UIObject):
def process(self, data):
print(data)
``` |
{
"source": "jordansilva/yelp-research",
"score": 2
} |
#### File: jordansilva/yelp-research/processor.py
```python
import os
import sys
import json
import datetime
import pprint
import math
import numpy as np
import ast
from analysis.parser import parser #parser
from util.reader import reader #loaddata
from algorithms.rbm import RBM #rbm
from sklearn.neural_network import BernoulliRBM
import matplotlib.pyplot as plt #mathplotlib
#tsne
from tsne import bh_sne
dataset = '/Users/jordansilva/Documents/Jordan/Mestrado/Lorien/code/output/vector.rbm'
def run(training_size = sys.maxsize):
print 'size of training sample: %d' % training_size
#load data
r = reader(dataset)
data, labels, data_full = r.load(size=training_size, progress=False)
print data[0]
return
#http://lvdmaaten.github.io/tsne/
#t-Distributed Stochastic Neighbor Embedding (t-SNE) is a (prize-winning) technique for dimensionality reduction that is particularly well suited for the visualization of high-dimensional datasets.
def t_sne(obj):
p = parser()
data_categories = {}
label_categories = {}
for d in obj:
for c in p.categories_item(d):
if c not in data_categories:
data_categories[c] = []
label_categories[c] = []
data_categories[c].append(d[1:])
label_categories[c].append('g' if d[0] == 1 else 'r')
print len(data_categories)
for c in data_categories:
print '------------------------'
print '%s (%d)' % (c, len(data_categories[c]))
print '------------------------'
if len(data_categories[c]) > 100:
t_sne(data_categories[c], label_categories[c])
else:
print 'small dimensionality'
arr = np.array(data_categories, dtype=np.float64)
x2 = bh_sne(arr)
plt.scatter(x2[:, 0], x2[:, 1], c=label_categories)
plt.show()
def bernoulli_rbm(data, labels):
print '> running rbm'
print 'visible units: %d' % len(data)
print 'hidden units: %d' % hidden_units
print 'epochs size: %d' % epochs_size
print '-------------'
rbm = BernoulliRBM(batch_size=32, learning_rate=0.1, n_components=5, n_iter=10, random_state=numpy.RandomState, verbose=True)
rbm.fit(data, labels)
training_data = np.array(data)
rbm.train(training_data, epochs_size, True)
def echen_rbm(data):
visible_units = 57
hidden_units = 10
epochs_size = 5000
print '> running rbm'
print 'visible units: %d' % visible_units
print 'hidden units: %d' % hidden_units
print 'epochs size: %d' % epochs_size
print '-------------'
rbm = RBM(num_visible = visible_units, num_hidden = hidden_units, learning_rate=0.1)
training_data = np.array(data)
rbm.train(training_data, epochs_size, True)
#print(rbm.weights)
# np.savetxt('test.out', r.weights)
# user = np.array([[0,0,0,1,1,0]])
#print rbm.run_visible(user)
# hidden_data = np.array([[0,1]]) # A matrix with a single row that contains the states of the hidden units. (We can also include more rows.)
# print(r.run_hidden(hidden_data)) # See what visible units are activated
if __name__ == '__main__':
print 'running processor...'
training_size = sys.maxsize# 10000
run(training_size)
print 'processor executed'
``` |
{
"source": "JordanSilverman/TrainFinder",
"score": 2
} |
#### File: suds/transport/http.py
```python
from suds.properties import Unskin
from suds.transport import *
import base64
from cookielib import CookieJar
import httplib
import socket
import sys
import urllib2
from urlparse import urlparse
from logging import getLogger
log = getLogger(__name__)
class HttpTransport(Transport):
"""
Basic HTTP transport implemented using using urllib2, that provides for
cookies & proxies but no authentication.
"""
def __init__(self, **kwargs):
"""
@param kwargs: Keyword arguments.
- B{proxy} - An http proxy to be specified on requests.
The proxy is defined as {protocol:proxy,}
- type: I{dict}
- default: {}
- B{timeout} - Set the url open timeout (seconds).
- type: I{float}
- default: 90
"""
Transport.__init__(self)
Unskin(self.options).update(kwargs)
self.cookiejar = CookieJar()
self.proxy = {}
self.urlopener = None
def open(self, request):
try:
url = self.__get_request_url(request)
log.debug('opening (%s)', url)
u2request = urllib2.Request(url)
self.proxy = self.options.proxy
return self.u2open(u2request)
except urllib2.HTTPError, e:
raise TransportError(str(e), e.code, e.fp)
def send(self, request):
result = None
url = self.__get_request_url(request)
msg = request.message
headers = request.headers
try:
u2request = urllib2.Request(url, msg, headers)
self.addcookies(u2request)
self.proxy = self.options.proxy
request.headers.update(u2request.headers)
log.debug('sending:\n%s', request)
fp = self.u2open(u2request)
self.getcookies(fp, u2request)
if sys.version_info < (3, 0):
headers = fp.headers.dict
else:
headers = fp.headers
result = Reply(httplib.OK, headers, fp.read())
log.debug('received:\n%s', result)
except urllib2.HTTPError, e:
if e.code in (httplib.ACCEPTED, httplib.NO_CONTENT):
result = None
else:
raise TransportError(e.msg, e.code, e.fp)
return result
def addcookies(self, u2request):
"""
Add cookies in the cookiejar to the request.
@param u2request: A urllib2 request.
@rtype: u2request: urllib2.Request.
"""
self.cookiejar.add_cookie_header(u2request)
def getcookies(self, fp, u2request):
"""
Add cookies in the request to the cookiejar.
@param u2request: A urllib2 request.
@rtype: u2request: urllib2.Request.
"""
self.cookiejar.extract_cookies(fp, u2request)
def u2open(self, u2request):
"""
Open a connection.
@param u2request: A urllib2 request.
@type u2request: urllib2.Request.
@return: The opened file-like urllib2 object.
@rtype: fp
"""
tm = self.options.timeout
url = self.u2opener()
if (sys.version_info < (3, 0)) and (self.u2ver() < 2.6):
socket.setdefaulttimeout(tm)
return url.open(u2request)
return url.open(u2request, timeout=tm)
def u2opener(self):
"""
Create a urllib opener.
@return: An opener.
@rtype: I{OpenerDirector}
"""
if self.urlopener is None:
return urllib2.build_opener(*self.u2handlers())
return self.urlopener
def u2handlers(self):
"""
Get a collection of urllib handlers.
@return: A list of handlers to be installed in the opener.
@rtype: [Handler,...]
"""
handlers = []
handlers.append(urllib2.ProxyHandler(self.proxy))
return handlers
def u2ver(self):
"""
Get the major/minor version of the urllib2 lib.
@return: The urllib2 version.
@rtype: float
"""
try:
part = urllib2.__version__.split('.', 1)
return float('.'.join(part))
except Exception, e:
log.exception(e)
return 0
def __deepcopy__(self, memo={}):
clone = self.__class__()
p = Unskin(self.options)
cp = Unskin(clone.options)
cp.update(p)
return clone
@staticmethod
def __get_request_url(request):
"""
Returns the given request's URL, properly encoded for use with urllib.
URLs are allowed to be:
under Python 2.x: unicode strings, single-byte strings;
under Python 3.x: unicode strings.
In any case, they are allowed to contain ASCII characters only. We
raise a UnicodeError derived exception if they contain any non-ASCII
characters (UnicodeEncodeError or UnicodeDecodeError depending on
whether the URL was specified as a unicode or a single-byte string).
Python 3.x httplib.client implementation must be given a unicode string
and not a bytes object and the given string is internally converted to
a bytes object using an explicitly specified ASCII encoding.
Python 2.7 httplib implementation expects the URL passed to it to not
be a unicode string. If it is, then passing it to the underlying
httplib Request object will cause that object to forcefully convert all
of its data to unicode, assuming that data contains ASCII data only and
raising a UnicodeDecodeError exception if it does not (caused by simple
unicode + string concatenation).
Python 2.4 httplib implementation does not really care about this as it
does not use the internal optimization present in the Python 2.7
implementation causing all the requested data to be converted to
unicode.
"""
url = request.url
py2 = sys.version_info < (3, 0)
if py2 and isinstance(url, str):
encodedURL = url
decodedURL = url.decode("ascii")
else:
# On Python3, calling encode() on a bytes or a bytearray object
# raises an AttributeError exception.
assert py2 or not isinstance(url, bytes)
assert py2 or not isinstance(url, bytearray)
decodedURL = url
encodedURL = url.encode("ascii")
if py2:
return encodedURL # Python 2 urllib - single-byte URL string.
return decodedURL # Python 3 urllib - unicode URL string.
class HttpAuthenticated(HttpTransport):
"""
Provides basic HTTP authentication for servers that do not follow the
specified challenge/response model. Appends the I{Authorization} HTTP
header with base64 encoded credentials on every HTTP request.
"""
def open(self, request):
self.addcredentials(request)
return HttpTransport.open(self, request)
def send(self, request):
self.addcredentials(request)
return HttpTransport.send(self, request)
def addcredentials(self, request):
credentials = self.credentials()
if not (None in credentials):
credentials = ':'.join(credentials)
if sys.version_info < (3,0):
basic = 'Basic %s' % base64.b64encode(credentials)
else:
encodedBytes = base64.urlsafe_b64encode(credentials.encode())
encodedString = encodedBytes.decode()
basic = 'Basic %s' % encodedString
request.headers['Authorization'] = basic
def credentials(self):
return self.options.username, self.options.password
``` |
{
"source": "JordanSimba/dvc",
"score": 2
} |
#### File: dvc/fs/azure.py
```python
import logging
import os
import threading
from datetime import datetime, timedelta
from azure.identity import (
ChainedTokenCredential,
ClientSecretCredential,
DefaultAzureCredential,
)
from azure.identity._exceptions import CredentialUnavailableError
from funcy import cached_property, wrap_prop
from dvc.path_info import CloudURLInfo
from dvc.progress import Tqdm
from dvc.scheme import Schemes
from .base import BaseFileSystem
logger = logging.getLogger(__name__)
class ServicePrincipalConfigCredential:
def __init__(self, config):
self._credential = None
self._credential = self._create_client_secret_cred(config)
def _create_client_secret_cred(self, config):
client_id = config.get("client_id")
client_secret = config.get("client_secret")
tenant_id = config.get("tenant_id")
if not (client_id and client_secret and tenant_id):
return None
return ClientSecretCredential(
client_id=client_id,
client_secret=client_secret,
tenant_id=tenant_id,
)
def get_token(self, *scopes, **kwargs):
if not self._credential:
message = (
"Dvc authentication unavailable. "
"Some service principle auth data missing from config."
)
raise CredentialUnavailableError(message=message)
return self._credential.get_token(*scopes, **kwargs)
class AzureFileSystem(BaseFileSystem):
scheme = Schemes.AZURE
PATH_CLS = CloudURLInfo
REQUIRES = {
"azure-storage-blob": "azure.storage.blob",
"azure-identity": "azure.identity",
"knack": "knack",
}
PARAM_CHECKSUM = "etag"
DETAIL_FIELDS = frozenset(("etag", "size"))
COPY_POLL_SECONDS = 5
LIST_OBJECT_PAGE_SIZE = 5000
def __init__(self, repo, config):
super().__init__(repo, config)
self._account_url = None
url = config.get("url", "azure://")
self.path_info = self.PATH_CLS(url)
self.bucket = self.path_info.bucket
if not self.bucket:
container = self._az_config.get("storage", "container_name", None)
self.path_info = self.PATH_CLS(f"azure://{container}")
self.bucket = self.path_info.bucket
self._conn_str = config.get(
"connection_string"
) or self._az_config.get("storage", "connection_string", None)
if not self._conn_str:
name = config.get("storage_account") or self._az_config.get(
"storage", "account", None
)
self._account_url = f"https://{name}.blob.core.windows.net"
# Microsoft azure docs
# https://docs.microsoft.com/en-us/python/api/overview/azure/identity-readme?view=azure-python
self._credential = (
config.get("sas_token")
or config.get("storage_key")
or self._az_config.get("storage", "key", None)
or self._az_config.get("storage", "sas_token", None)
or ChainedTokenCredential(
ServicePrincipalConfigCredential(config),
DefaultAzureCredential(),
)
)
@cached_property
def _az_config(self):
# NOTE: ideally we would've used get_default_cli().config from
# azure.cli.core, but azure-cli-core has a lot of conflicts with other
# dependencies. So instead we are just use knack directly
from knack.config import CLIConfig
config_dir = os.getenv(
"AZURE_CONFIG_DIR", os.path.expanduser(os.path.join("~", ".azure"))
)
return CLIConfig(config_dir=config_dir, config_env_var_prefix="AZURE")
@wrap_prop(threading.Lock())
@cached_property
def blob_service(self):
# pylint: disable=no-name-in-module
from azure.core.exceptions import (
HttpResponseError,
ResourceNotFoundError,
)
from azure.storage.blob import BlobServiceClient
if self._conn_str:
logger.debug(f"Using connection string '{self._conn_str}'")
blob_service = BlobServiceClient.from_connection_string(
self._conn_str, credential=self._credential
)
else:
logger.debug(f"Using account url '{self._account_url}'")
blob_service = BlobServiceClient(
self._account_url, credential=self._credential
)
logger.debug(f"Container name {self.bucket}")
container_client = blob_service.get_container_client(self.bucket)
try: # verify that container exists
container_client.get_container_properties()
except ResourceNotFoundError:
container_client.create_container()
except HttpResponseError as exc:
# client may not have account-level privileges
if exc.status_code != 403:
raise
return blob_service
def get_etag(self, path_info):
blob_client = self.blob_service.get_blob_client(
path_info.bucket, path_info.path
)
etag = blob_client.get_blob_properties().etag
return etag.strip('"')
def _generate_download_url(self, path_info, expires=3600):
from azure.storage.blob import ( # pylint:disable=no-name-in-module
BlobSasPermissions,
generate_blob_sas,
)
expires_at = datetime.utcnow() + timedelta(seconds=expires)
blob_client = self.blob_service.get_blob_client(
path_info.bucket, path_info.path
)
sas_token = generate_blob_sas(
blob_client.account_name,
blob_client.container_name,
blob_client.blob_name,
account_key=blob_client.credential.account_key,
permission=BlobSasPermissions(read=True),
expiry=expires_at,
)
return blob_client.url + "?" + sas_token
def exists(self, path_info, use_dvcignore=True):
paths = self._list_paths(path_info.bucket, path_info.path)
return any(path_info.path == path for path in paths)
def _list_paths(self, bucket, prefix):
container_client = self.blob_service.get_container_client(bucket)
for blob in container_client.list_blobs(name_starts_with=prefix):
yield blob.name
def walk_files(self, path_info, **kwargs):
if not kwargs.pop("prefix", False):
path_info = path_info / ""
for fname in self._list_paths(
path_info.bucket, path_info.path, **kwargs
):
if fname.endswith("/"):
continue
yield path_info.replace(path=fname)
def ls(
self, path_info, detail=False, recursive=False
): # pylint: disable=arguments-differ
assert recursive
container_client = self.blob_service.get_container_client(
path_info.bucket
)
for blob in container_client.list_blobs(
name_starts_with=path_info.path
):
if detail:
yield {
"type": "file",
"name": blob.name,
"size": blob.size,
"etag": blob.etag.strip('"'),
}
else:
yield blob.name
def remove(self, path_info):
if path_info.scheme != self.scheme:
raise NotImplementedError
logger.debug(f"Removing {path_info}")
self.blob_service.get_blob_client(
path_info.bucket, path_info.path
).delete_blob()
def info(self, path_info):
blob_client = self.blob_service.get_blob_client(
path_info.bucket, path_info.path
)
properties = blob_client.get_blob_properties()
return {
"type": "file",
"size": properties.size,
"etag": properties.etag.strip('"'),
}
def _upload_fobj(self, fobj, to_info):
blob_client = self.blob_service.get_blob_client(
to_info.bucket, to_info.path
)
blob_client.upload_blob(fobj, overwrite=True)
def _upload(
self, from_file, to_info, name=None, no_progress_bar=False, **_kwargs
):
total = os.path.getsize(from_file)
with open(from_file, "rb") as fobj:
self.upload_fobj(
fobj,
to_info,
desc=name,
total=total,
no_progress_bar=no_progress_bar,
)
def _download(
self, from_info, to_file, name=None, no_progress_bar=False, **_kwargs
):
blob_client = self.blob_service.get_blob_client(
from_info.bucket, from_info.path
)
total = blob_client.get_blob_properties().size
stream = blob_client.download_blob()
with open(to_file, "wb") as fobj:
with Tqdm.wrapattr(
fobj, "write", desc=name, total=total, disable=no_progress_bar
) as wrapped:
stream.readinto(wrapped)
``` |
{
"source": "jordanskomer/hubitat-meross",
"score": 3
} |
#### File: jordanskomer/hubitat-meross/login.py
```python
import base64
import hashlib
import string
import random
import requests
import time
def rand_gen(size, chars=string.ascii_lowercase + string.digits):
return str(''.join(random.choice(chars) for _ in range(size)))
def msg_id(unix_time):
concat_string = '{}{}'.format(rand_gen(16), unix_time)
final_md5 = hashlib.md5(concat_string.encode('utf-8')).hexdigest()
return str(final_md5)
def get_unix_time():
current_time = int(time.time())
return current_time
def get_key(username, password, uts):
nonce = rand_gen
unix_time = uts
param = '{{"email":"{}","password":"{}"}}'.format(username, password)
encoded_param = base64.standard_b64encode(param.encode('utf8'))
concat_sign = '{}{}{}{}'.format('23x17ahWarFH6w29', unix_time, nonce, encoded_param.decode("utf-8"))
sign = hashlib.md5(concat_sign.encode('utf-8')).hexdigest()
headers = {
'content-type': 'application/x-www-form-urlencoded',
}
data = {
'params': encoded_param,
'sign': sign,
'timestamp': unix_time,
'nonce': nonce
}
response = requests.post('https://iot.meross.com/v1/Auth/login', headers=headers, data=data)
key = response.json()['data']['key']
userid = response.json()['data']['userid']
token = response.json()['data']['token']
return str(key), str(userid), str(token)
def signing_key(message_id, key, uts):
concat_string = '{}{}{}'.format(message_id, key, uts)
final_md5 = hashlib.md5(concat_string.encode('utf-8')).hexdigest()
return str(final_md5)
def login(username, password):
current = get_unix_time()
message_id = msg_id(current)
key, userid, token = get_key(username, password, current)
sign = signing_key(message_id,key, current)
print("{} {}".format("userId:", userid))
print("{} {}".format("key:", key))
print("{} {}".format("token:", token))
print("{} {}".format("messageId:", message_id))
print("{} {}".format("sign:", sign))
print("{} {}".format("timestamp:", current))
email = input("email: ")
password = input("password: ")
login(email,password)
``` |
{
"source": "Jordan-Stoddard/Blockchain-Deforked",
"score": 3
} |
#### File: Blockchain-Deforked/client_mining_p/miner.py
```python
import hashlib
import requests
import time
import sys
# TODO: Implement functionality to search for a proof
def valid_proof(last_proof, proof):
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:6] == "000000"
def proof_of_work(last_proof):
proof = 0
while valid_proof(last_proof, proof) is False:
proof += 1
return proof
if __name__ == '__main__':
# What node are we interacting with?
if len(sys.argv) > 1:
node = sys.argv[1]
else:
node = "http://localhost:5000"
coins_mined = 0
# Run forever until interrupted
while True:
print('Searching for next proof')
# TODO: Get the last proof from the server and look for a new one
last_proof = requests.get('http://localhost:5000/last_proof').json()['last_proof']
start = time.process_time()
proof = proof_of_work(last_proof)
# TODO: When found, POST it to the server {"proof": new_proof}
miningResponse = requests.post('http://localhost:5000/mine', json={'proof': proof}).json()
# TODO: If the server responds with 'New Block Forged'
if miningResponse['message'] == "New Block Forged":
# add 1 to the number of coins mined and print it.
coins_mined += 1
print(f'Total coins mined: {coins_mined}')
end = time.process_time()
print(f'Mining operation took: {end - start} seconds')
# Otherwise,
else:
# print the message from the server.
print(miningResponse['message'])
``` |
{
"source": "jordantcarlisle/DSPT6-Twitoff",
"score": 3
} |
#### File: DSPT6-Twitoff/twitoff/db_model.py
```python
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.BigInteger, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
follower_count = db.Column(db.Integer, nullable=False)
# Tweet ids are ordinal ints, so we can fetch most recent tweets
newest_tweet_id = db.Column(db.BigInteger, nullable=False)
def __repr__(self):
return '<User %r>' % self.username
class Tweet(db.Model):
id = db.Column(db.BigInteger, primary_key=True)
text = db.Column(db.Unicode(300))
embedding = db.Column(db.PickleType, nullable=False)
user_id = db.Column(db.BigInteger, db.ForeignKey('user.id'), nullable=False)
user = db.relationship('User', backref=db.backref('tweet', lazy=True))
def __repr__(self):
return '<Tweet %r>' % self.text
''' from twitoff.db_model import db, User, Tweet '''
``` |
{
"source": "jordantcarlisle/Lambdata-DSPT6-JTC",
"score": 2
} |
#### File: Lambdata-DSPT6-JTC/test/ds_utilities_test.py
```python
import unittest
from my_lambdata.ds_utilities import enlarge
class TestDsUtilities(unittest.TestCase):
def test_enlarge(self):
self.assertEqual(enlarge(3), 300)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "Jordan-Theriault/mriqc",
"score": 2
} |
#### File: mriqc/bin/mriqc_run.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import os.path as op
from multiprocessing import cpu_count
from .. import __version__
DEFAULT_MEM_GB = 8
def get_parser():
"""Build parser object"""
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from .. import DEFAULTS
parser = ArgumentParser(description='MRIQC: MRI Quality Control',
formatter_class=RawTextHelpFormatter)
# Arguments as specified by BIDS-Apps
# required, positional arguments
# IMPORTANT: they must go directly with the parser object
parser.add_argument('bids_dir', action='store',
help='The directory with the input dataset '
'formatted according to the BIDS standard.')
parser.add_argument('output_dir', action='store',
help='The directory where the output files '
'should be stored. If you are running group level analysis '
'this folder should be prepopulated with the results of the'
'participant level analysis.')
parser.add_argument('analysis_level', action='store', nargs='+',
help='Level of the analysis that will be performed. '
'Multiple participant level analyses can be run independently '
'(in parallel) using the same output_dir.',
choices=['participant', 'group'])
# optional arguments
parser.add_argument('--version', action='version',
version='mriqc v{}'.format(__version__))
# BIDS selectors
g_bids = parser.add_argument_group('Options for filtering BIDS queries')
g_bids.add_argument('--participant_label', '--participant-label', action='store', nargs='+',
help='one or more participant identifiers (the sub- prefix can be '
'removed)')
g_bids.add_argument('--session-id', action='store', nargs='+', type=str,
help='select a specific session to be processed')
g_bids.add_argument('--run-id', action='store', type=str, nargs='+',
help='select a specific run to be processed')
g_bids.add_argument('--task-id', action='store', nargs='+', type=str,
help='select a specific task to be processed')
g_bids.add_argument('-m', '--modalities', action='store', nargs='*',
choices=['T1w', 'bold', 'T2w'], default=['T1w', 'bold', 'T2w'],
help='select one of the supported MRI types')
# Control instruments
g_outputs = parser.add_argument_group('Instrumental options')
g_outputs.add_argument('-w', '--work-dir', action='store',
default=op.join(os.getcwd(), 'work'))
g_outputs.add_argument('--report-dir', action='store')
g_outputs.add_argument('--verbose-reports', default=False, action='store_true')
g_outputs.add_argument('--write-graph', action='store_true', default=False,
help='Write workflow graph.')
g_outputs.add_argument('--dry-run', action='store_true', default=False,
help='Do not run the workflow.')
g_outputs.add_argument('--profile', action='store_true', default=False,
help='hook up the resource profiler callback to nipype')
g_outputs.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
g_outputs.add_argument('--no-sub', default=False, action='store_true',
help='Turn off submission of anonymized quality metrics '
'to MRIQC\'s metrics repository.')
g_outputs.add_argument('--email', action='store', default='', type=str,
help='Email address to include with quality metric submission.')
g_outputs.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=0,
help="increases log verbosity for each occurence, debug level is -vvv")
g_outputs.add_argument(
'--webapi-url', action='store', default='https://mriqc.nimh.nih.gov/api/v1', type=str,
help='IP address where the MRIQC WebAPI is listening')
g_outputs.add_argument(
'--webapi-port', action='store', type=int,
help='port where the MRIQC WebAPI is listening')
g_outputs.add_argument('--upload-strict', action='store_true', default=False,
help='upload will fail if if upload is strict')
# General performance
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--n_procs', '--nprocs', '--n_cpus', '--nprocs',
action='store', default=0, type=int, help='number of threads')
g_perfm.add_argument('--mem_gb', action='store', default=0, type=int,
help='available total memory')
g_perfm.add_argument('--testing', action='store_true', default=False,
help='use testing settings for a minimal footprint')
g_perfm.add_argument(
'-f', '--float32', action='store_true', default=DEFAULTS['float32'],
help="Cast the input data to float32 if it's represented in higher precision "
"(saves space and improves perfomance)")
# Workflow settings
g_conf = parser.add_argument_group('Workflow configuration')
g_conf.add_argument('--ica', action='store_true', default=False,
help='Run ICA on the raw data and include the components'
'in the individual reports (slow but potentially very insightful)')
g_conf.add_argument('--hmc-afni', action='store_true', default=True,
help='Use ANFI 3dvolreg for head motion correction (HMC) - default')
g_conf.add_argument('--hmc-fsl', action='store_true', default=False,
help='Use FSL MCFLIRT instead of AFNI for head motion correction (HMC)')
g_conf.add_argument('--fft-spikes-detector', action='store_true', default=False,
help='Turn on FFT based spike detector (slow).')
g_conf.add_argument('--fd_thres', action='store', default=0.2,
type=float, help='motion threshold for FD computation')
# ANTs options
g_ants = parser.add_argument_group('Specific settings for ANTs')
g_ants.add_argument(
'--ants-nthreads', action='store', type=int, default=1,
help='number of threads that will be set in ANTs processes')
g_ants.add_argument(
'--ants-float', action='store_true', default=False,
help='use float number precision on ANTs computations')
g_ants.add_argument('--ants-settings', action='store',
help='path to JSON file with settings for ANTS')
# AFNI head motion correction settings
g_afni = parser.add_argument_group('Specific settings for AFNI')
g_afni.add_argument('--deoblique', action='store_true', default=False,
help='Deoblique the functional scans during head motion '
'correction preprocessing')
g_afni.add_argument('--despike', action='store_true', default=False,
help='Despike the functional scans during head motion correction '
'preprocessing')
g_afni.add_argument('--start-idx', action='store', type=int,
help='Initial volume in functional timeseries that should be '
'considered for preprocessing')
g_afni.add_argument('--stop-idx', action='store', type=int,
help='Final volume in functional timeseries that should be '
'considered for preprocessing')
g_afni.add_argument('--correct-slice-timing', action='store_true', default=False,
help='Perform slice timing correction')
return parser
def main():
"""Entry point"""
from nipype import config as ncfg, logging as nlog
from nipype.pipeline.engine import Workflow
from .. import logging
from ..utils.bids import collect_bids_data
from ..workflows.core import build_workflow
from ..utils.misc import check_folder
# Run parser
opts = get_parser().parse_args()
# Retrieve logging level
log_level = int(max(3 - opts.verbose_count, 0) * 10)
if opts.verbose_count > 1:
log_level = int(max(25 - 5 * opts.verbose_count, 1))
logging.getLogger().setLevel(log_level)
log = logging.getLogger('mriqc.cli')
# Build settings dict
bids_dir = op.abspath(opts.bids_dir)
# Number of processes
n_procs = opts.n_procs
settings = {
'bids_dir': bids_dir,
'write_graph': opts.write_graph,
'testing': opts.testing,
'hmc_afni': opts.hmc_afni,
'hmc_fsl': opts.hmc_fsl,
'fft_spikes_detector': opts.fft_spikes_detector,
'n_procs': n_procs,
'ants_nthreads': opts.ants_nthreads,
'ants_float': opts.ants_float,
'output_dir': op.abspath(opts.output_dir),
'work_dir': op.abspath(opts.work_dir),
'verbose_reports': opts.verbose_reports or opts.testing,
'float32': opts.float32,
'ica': opts.ica,
'no_sub': opts.no_sub,
'email': opts.email,
'fd_thres': opts.fd_thres,
'webapi_url': opts.webapi_url,
'webapi_port': opts.webapi_port,
'upload_strict': opts.upload_strict,
}
if opts.hmc_afni:
settings['deoblique'] = opts.deoblique
settings['despike'] = opts.despike
settings['correct_slice_timing'] = opts.correct_slice_timing
if opts.start_idx:
settings['start_idx'] = opts.start_idx
if opts. stop_idx:
settings['stop_idx'] = opts.stop_idx
if opts.ants_settings:
settings['ants_settings'] = opts.ants_settings
log_dir = op.join(settings['output_dir'], 'logs')
analysis_levels = opts.analysis_level
if opts.participant_label is None:
analysis_levels.append('group')
analysis_levels = list(set(analysis_levels))
if len(analysis_levels) > 2:
raise RuntimeError('Error parsing analysis levels, got "%s"' % ', '.join(analysis_levels))
settings['report_dir'] = opts.report_dir
if not settings['report_dir']:
settings['report_dir'] = op.join(settings['output_dir'], 'reports')
check_folder(settings['output_dir'])
if 'participant' in analysis_levels:
check_folder(settings['work_dir'])
check_folder(log_dir)
check_folder(settings['report_dir'])
# Set nipype config
ncfg.update_config({
'logging': {'log_directory': log_dir, 'log_to_file': True},
'execution': {'crashdump_dir': log_dir, 'crashfile_format': 'txt',
'resource_monitor': opts.profile},
})
# Set nipype logging level
nlog.getLogger('workflow').setLevel(log_level)
nlog.getLogger('interface').setLevel(log_level)
nlog.getLogger('utils').setLevel(log_level)
plugin_settings = {'plugin': 'Linear'}
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as pfile:
plugin_settings = loadyml(pfile)
else:
# Setup multiprocessing
if settings['n_procs'] == 0:
settings['n_procs'] = cpu_count()
if settings['ants_nthreads'] == 0:
if settings['n_procs'] > 1:
# always leave one extra thread for non ANTs work,
# don't use more than 8 threads - the speed up is minimal
settings['ants_nthreads'] = min(settings['n_procs'] - 1, 8)
else:
settings['ants_nthreads'] = 1
if settings['n_procs'] > 1:
plugin_settings['plugin'] = 'MultiProc'
plugin_settings['plugin_args'] = {'n_procs': settings['n_procs']}
if opts.mem_gb:
plugin_settings['plugin_args']['memory_gb'] = opts.mem_gb
# Process data types
modalities = opts.modalities
# Set up participant level
if 'participant' in analysis_levels:
log.info('Participant level started. Checking BIDS dataset...')
dataset = collect_bids_data(
settings['bids_dir'],
modalities=modalities,
participant_label=opts.participant_label,
session=opts.session_id,
run=opts.run_id,
task=opts.task_id,
)
log.info(
'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
__version__, ', '.join(analysis_levels), opts.participant_label, settings)
workflow = Workflow(name='workflow_enumerator')
workflow.base_dir = settings['work_dir']
wf_list = []
for mod in modalities:
if not dataset[mod]:
log.warning('No %s scans were found in %s', mod, settings['bids_dir'])
continue
wf_list.append(build_workflow(dataset[mod], mod, settings=settings))
if wf_list:
workflow.add_nodes(wf_list)
if not opts.dry_run:
# Warn about submitting measures BEFORE
if not settings['no_sub']:
log.warning(
'Anonymized quality metrics will be submitted'
' to MRIQC\'s metrics repository.'
' Use --no-sub to disable submission.')
# run MRIQC
workflow.run(**plugin_settings)
# Warn about submitting measures AFTER
if not settings['no_sub']:
log.warning(
'Anonymized quality metrics have beeen submitted'
' to MRIQC\'s metrics repository.'
' Use --no-sub to disable submission.')
else:
msg = 'Error reading BIDS directory ({}), or the dataset is not ' \
'BIDS-compliant.'
if opts.participant_label or opts.session_id or opts.run_id or opts.task_id:
msg = 'The combination of supplied labels'
if opts.participant_label is not None:
msg += ' (--participant_label {})'.format(" ".join(opts.participant_label))
if opts.session_id is not None:
msg += ' (--session-id {})'.format(" ".join(opts.session_id))
if opts.run_id is not None:
msg += ' (--run-id {})'.format(" ".join(opts.run_id))
if opts.task_id is not None:
msg += ' (--task-id {})'.format(" ".join(opts.task_id))
msg += ' did not result in matches within the BIDS directory ({}).'
raise RuntimeError(msg.format(settings['bids_dir']))
log.info('Participant level finished successfully.')
# Set up group level
if 'group' in analysis_levels:
from ..reports import group_html
from ..utils.misc import generate_csv # , generate_pred
log.info('Group level started...')
log.info(
'Running MRIQC-%s (analysis_levels=[%s], participant_label=%s)\n\tSettings=%s',
__version__, ', '.join(analysis_levels), opts.participant_label, settings)
reports_dir = check_folder(op.join(settings['output_dir'], 'reports'))
derivatives_dir = op.join(settings['output_dir'], 'derivatives')
n_group_reports = 0
for mod in modalities:
dataframe, out_csv = generate_csv(derivatives_dir,
settings['output_dir'], mod)
# If there are no iqm.json files, nothing to do.
if dataframe is None:
log.warning(
'No IQM-JSON files were found for the %s data type in %s. The group-level '
'report was not generated.', mod, derivatives_dir)
continue
log.info('Summary CSV table for the %s data generated (%s)', mod, out_csv)
# out_pred = generate_pred(derivatives_dir, settings['output_dir'], mod)
# if out_pred is not None:
# log.info('Predicted QA CSV table for the %s data generated (%s)',
# mod, out_pred)
out_html = op.join(reports_dir, mod + '_group.html')
group_html(out_csv, mod,
csv_failed=op.join(settings['output_dir'], 'failed_' + mod + '.csv'),
out_file=out_html)
log.info('Group-%s report generated (%s)', mod, out_html)
n_group_reports += 1
if n_group_reports == 0:
raise Exception("No data found. No group level reports were generated.")
log.info('Group level finished successfully.')
if __name__ == '__main__':
main()
```
#### File: mriqc/reports/utils.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
def iqms2html(indict, table_id):
"""Converts a dictionary into an HTML table"""
columns = sorted(unfold_columns(indict))
if not columns:
return None
depth = max([len(col) for col in columns])
result_str = '<table id="%s">\n' % table_id
td = '<td{1}>{0}</td>'.format
for line in columns:
result_str += '<tr>'
ncols = len(line)
for i, col in enumerate(line):
colspan = 0
colstring = ''
if (depth - ncols) > 0 and i == ncols - 2:
colspan = (depth - ncols) + 1
colstring = ' colspan=%d' % colspan
result_str += td(col, colstring)
result_str += '</tr>\n'
result_str += '</table>\n'
return result_str
def unfold_columns(indict, prefix=None):
"""Converts an input dict with flattened keys to an array of columns"""
if prefix is None:
prefix = []
keys = sorted(set(list(indict.keys())))
data = []
subdict = {}
for key in keys:
col = key.split('_', 1)
if len(col) == 1:
value = indict[col[0]]
data.append(prefix + [col[0], value])
else:
if subdict.get(col[0]) is None:
subdict[col[0]] = {}
subdict[col[0]][col[1]] = indict[key]
if subdict:
for skey in sorted(list(subdict.keys())):
sskeys = list(subdict[skey].keys())
if len(sskeys) == 1:
value = subdict[skey][sskeys[0]]
newkey = '_'.join([skey] + sskeys)
data.append(prefix + [newkey, value])
else:
data += unfold_columns(
subdict[skey], prefix=prefix + [skey])
return data
def read_report_snippet(in_file):
"""Add a snippet into the report"""
import os.path as op
import re
from io import open # pylint: disable=W0622
is_svg = (op.splitext(op.basename(in_file))[1] == '.svg')
with open(in_file) as thisfile:
if not is_svg:
return thisfile.read()
svg_tag_line = 0
content = thisfile.read().split('\n')
corrected = []
for i, line in enumerate(content):
if "<svg " in line:
line = re.sub(' height="[0-9.]+[a-z]*"', '', line)
line = re.sub(' width="[0-9.]+[a-z]*"', '', line)
if svg_tag_line == 0:
svg_tag_line = i
corrected.append(line)
return '\n'.join(corrected[svg_tag_line:])
# def check_reports(dataset, settings, save_failed=True):
# """Check if reports have been created"""
# import os.path as op
# import pandas as pd
# from mriqc.utils.misc import BIDS_COMP, BIDS_EXPR
# supported_components = list(BIDS_COMP.keys())
# expr = re.compile(BIDS_EXPR)
# reports_missed = False
# missing = {}
# for mod, files in list(dataset.items()):
# missing[mod] = []
# qctype = 'anatomical' if mod == 't1w' else 'functional'
# for fname in files:
# m = expr.search(op.basename(fname)).groupdict()
# components = [m.get(key) for key in supported_components if m.get(key)]
# components.insert(0, qctype)
# report_fname = op.join(
# settings['report_dir'], '_'.join(components) + '_report.html')
# if not op.isfile(report_fname):
# missing[mod].append(
# {key: m.get(key) for key in supported_components if m.get(key)})
# mod_missing = missing[mod]
# if mod_missing:
# reports_missed = True
# if mod_missing and save_failed:
# out_file = op.join(settings['output_dir'], 'failed_%s.csv' % qctype)
# miss_cols = list(set(supported_components) & set(list(mod_missing[0].keys())))
# dframe = pd.DataFrame.from_dict(mod_missing).sort_values(
# by=miss_cols)
# dframe[miss_cols].to_csv(out_file, index=False)
# return reports_missed
``` |
{
"source": "jordantrc/domain-fudgery",
"score": 3
} |
#### File: jordantrc/domain-fudgery/fudge-domain.py
```python
import argparse
import itertools
import os
import sys
from dns import name
from dns import message
CHARACTER_LOOK_ALIKES = {
'a': ['d'],
'A': ['4'],
'b': ['1o', 'lo'],
'B': ['8'],
'd': ['ol', 'o1'],
'E': ['3'],
'i': ['1', 'l'],
'I': ['1', 'l'],
'l': ['1', 'i'],
'm': ['rn'],
'o': ['0'],
'O': ['0'],
'Q': ['O'],
's': ['5'],
'S': ['5'],
'T': ['7'],
'w': ['vv'],
'W': ['VV'],
'z': ['2'],
'Z': ['2'],
'0': ['O'],
'1': ['l'],
'2': ['Z'],
'4': ['A'],
'5': ['S'],
'7': ['T'],
'8': ['B']
}
# original TLDs, does not include restricted-use
# TLDs .edu, .gov, .mil, .int
TLDS_ORIGINAL = ['.com', '.net', '.org']
# country code TLDs
TLDS_COUNTRY_CODE = [
'.ac','.ad','.ae','.af','.ag','.ai','.al','.am','.ao','.aq','.ar','.as','.at','.au','.aw','.ax',
'.az','.ba','.bb','.bd','.be','.bf','.bg','.bh','.bi','.bj','.bm','.bn','.bo','.bq','.br','.bs',
'.bt','.bw','.by','.bz','.ca','.cc','.cd','.cf','.cg','.ch','.ci','.ck','.cl','.cm','.cn','.co',
'.cr','.cu','.cv','.cw','.cx','.cy','.cz','.de','.dj','.dk','.dm','.do','.dz','.ec','.ee','.eg',
'.eh','.er','.es','.et','.eu','.fi','.fj','.fk','.fm','.fo','.fr','.ga','.gd','.ge','.gf','.gg',
'.gh','.gi','.gl','.gm','.gn','.gp','.gq','.gr','.gs','.gt','.gu','.gw','.gy','.hk','.hm','.hn',
'.hr','.ht','.hu','.id','.ie','.il','.im','.in','.io','.iq','.ir','.is','.it','.je','.jm','.jo',
'.jp','.ke','.kg','.kh','.ki','.km','.kn','.kp','.kr','.kw','.ky','.kz','.la','.lb','.lc','.li',
'.lk','.lr','.ls','.lt','.lu','.lv','.ly','.ma','.mc','.md','.me','.mg','.mh','.mk','.ml','.mm',
'.mn','.mo','.mp','.mq','.mr','.ms','.mt','.mu','.mv','.mw','.mx','.my','.mz','.na','.nc','.ne',
'.nf','.ng','.ni','.nl','.no','.np','.nr','.nu','.nz','.om','.pa','.pe','.pf','.pg','.ph','.pk',
'.pl','.pm','.pn','.pr','.ps','.pt','.pw','.py','.qa','.re','.ro','.rs','.ru','.rw','.sa','.sb',
'.sc','.sd','.se','.sg','.sh','.si','.sk','.sl','.sm','.sn','.so','.sr','.ss','.st','.su','.sv',
'.sx','.sy','.sz','.tc','.td','.tf','.tg','.th','.tj','.tk','.tl','.tm','.tn','.to','.tr','.tt',
'.tv','.tw','.tz','.ua','.ug','.uk','.us','.uy','.uz','.va','.vc','.ve','.vg','.vi','.vn','.vu',
'.wf','.ws','.ye','.yt','.za','.zm'
]
# country codes with restricted second level domains (individuals or companies can
# only register third level domains)
TLDS_COUNTRY_CODE_RESTRICTED_LVL2 = [
'.au','.bn','.bt','.cy','.et','.fk','.gh','.gn','.gu','.jm','.ke','.kh','.kp','.kw','.lb','.lr',
'.ls','.mm','.mq','.mt','.mz','.ni','.np','.pa','.pg','.py','.qa','.sb','.sv','.sz','.th','.tz',
'.ve','.ye'
]
# the second level domains for those domains above that can be used
# for third level domains
TLDS_COUNTRY_CODE_UNRESTRICTED_LVL2 = [
'.com.au','.net.au','.org.au','.asn.au','.id.au','.com.bn','.edu.bn','.net.bn','.org.bn','.bt',
'.com.bt','.edu.bt','.net.bt','.org.bt','.ac.cy','.net.cy','.org.cy','.pro.cy','.name.cy',
'.ekloges.cy','.tm.cy','.ltd.cy','.biz.cy','.press.cy','.parliament.cy','.com.cy',
'.centralbank.cy','.com.et','.org.et','.edu.et','.net.et','.name.et','.co.fk','.org.fk',
'.ac.fk','.nom.fk','.net.fk','.com.gh','.edu.gh','.com.gn','.ac.gn','.org.gn','.net.gn',
'.com.gu','.net.gu','.org.gu','.edu.gu','.com.jm','.net.jm','.org.jm','.edu.jm','.co.ke',
'.or.ke','.ne.ke','.go.ke','.ac.ke','.sc.ke','.me.ke','.mobi.ke','.info.ke','.per.kh','.com.kh',
'.edu.kh','.net.kh','.org.kh','.aca.kp','.com.kp','.edu.kp','.law.kp','.org.kp','.rep.kp',
'.net.kp','.sca.kp','.com.kw','.ind.kw','.net.kw','.org.kw','.emb.kw','.edu.kw','.com.lb',
'.edu.lb','.net.lb','.org.lb','.com.lr','.edu.lr','.org.lr','.net.lr','.ac.ls','.co.ls',
'.net.ls','.nul.ls','.org.ls','.sc.ls','.net.mm','.com.mm','.edu.mm','.org.mm','.edu.mt',
'.com.mt','.net.mt','.org.mt','.co.mz','.net.mz','.org.mz','.ac.mz','.edu.mz','.gob.ni',
'.co.ni','.com.ni','.ac.ni','.edu.ni','.org.ni','.nom.ni','.net.ni','.edu.np','.com.np',
'.org.np','.net.np','.aero.np','.asia.np','.biz.np','.coop.np','.info.np','.jobs.np','.mobi.np',
'.museum.np','.name.np','.pro.np','.services.np','.travel.np','.net.pa','.com.pa','.ac.pa',
'.sld.pa','.edu.pa','.org.pa','.abo.pa','.ing.pa','.med.pa','.nom.pa','.com.pg','.net.pg',
'.ac.pg','.org.pg','.com.py','.coop.py','.edu.py','.org.py','.net.py','.una.py','.com.qa',
'.edu.qa','.sch.qa','.net.qa','.org.qa','.com.sb','.net.sb','.edu.sv','.com.sv','.org.sv',
'.red.sv','.co.sz','.ac.sz','.org.sz','.ac.th','.co.th','.or.th','.net.th','.in.th','.co.tz',
'.ac.tz','.or.tz','.ne.tz','.hotel.tz','.mobi.tz','.tv.tz','.info.tz','.me.tz','.arts.ve',
'.co.ve','.com.ve','.info.ve','.net.ve','.org.ve','.radio.ve','.web.ve','.com.ye','.co.ye',
'.ltd.ye','.me.ye','.net.ye','.org.ye','.plc.ye'
]
def replacement_combinations(indices):
"""returns a list of all possible replacement combinations for count
instances of a character in a string"""
result = []
for i in range(1, len(indices) + 1):
for c in itertools.combinations(indices, i):
result.append(c)
return result
def permutate_domain(domain, character, replacements):
"""returns all permutations of character replacements"""
new_domains = []
indices = [ i for i, ltr in enumerate(domain) if ltr == character ]
combinations = replacement_combinations(indices)
for c in combinations:
new_domain = domain
for i in c:
for r in replacements:
new_domain = new_domain[:i] + r + new_domain[i + 1:]
new_domains.append(new_domain)
return new_domains
def domain_permutations(domain, orig_tld, country_code_tlds=False, original_tlds=False, custom_tlds=[]):
"""returns a list of domains to check"""
result = []
domains = [domain, domain.upper()]
# character replacement
for c in CHARACTER_LOOK_ALIKES.keys():
for d in domains:
count = d.count(c)
if count > 0:
permutated_domains = permutate_domain(d, c, CHARACTER_LOOK_ALIKES[c])
for p in permutated_domains:
print(p + orig_tld)
def main():
"""Main function."""
parser = argparse.ArgumentParser(description="Finds fudged domains.")
parser.add_argument("--country-code-tlds", action='store_true', dest="country_code_tld", help="look for unregistered country code TLDs")
parser.add_argument("--original-tlds", action='store_true', dest="original_tld", help="look for unregistered original TLDs")
parser.add_argument("--custom-tlds", dest="custom_tld", help="look for custom list of TLDs")
parser.add_argument("--no-whois", action='store_true', dest="no_whois", help="disable whois queries")
parser.add_argument("--file", dest="file", help="file containing DNS names to load")
parser.add_argument("--no-active", action='store_true', dest="no_active", help="disable active checks")
parser.add_argument("domain", nargs='*', help="domain to fudge")
args = parser.parse_args()
# ensure at least one domain was provided
if not args.file and not args.domain:
print("[-] must provide a domain as argument or a file containing domains")
sys.exit(1)
domains = []
if args.file:
if os.path.isfile(args.file):
with open(args.file, "r") as fd:
domains = fd.readlines()
else:
print("[-] file not found or permission denied")
sys.exit(1)
if args.domain is not None:
domains.append(args.domain[0])
# for each domain, determine TLDs for domain
for d in domains:
domain_parts = d.split(".")
domain = domain_parts[0]
tld = "." + ".".join(domain_parts[1:])
domain_permutations(domain, tld)
if __name__ == "__main__":
main()
``` |
{
"source": "jordantrc/port_scanners",
"score": 3
} |
#### File: port_scanners/reporting/masscan_summary_report.py
```python
import csv
import ipaddress
import os
import sys
def get_matching_index(data, target, open_tcp, open_udp):
"""returns the index in data that matches all three variables"""
index = None
for i, d in enumerate(data):
if d['target'] == target and d['open_tcp'] == open_tcp and d['open_udp'] == open_udp:
index = i
break
return index
def main():
"""main function"""
if len(sys.argv) != 3:
print("Usage: masscan_summary_report.py <directory> <output file>")
sys.exit(1)
directory = sys.argv[1]
output_file = sys.argv[2]
if not os.path.isdir(directory):
print("[-] path provided is not a directory")
sys.exit(1)
print("[*] summarizing %s" % directory)
# data = [{'target': network, 'open_tcp': ports, 'open_udp': ports, 'sources': [source1, source2, ...]},
# ]
data = []
for f in os.listdir(directory):
if f.endswith(".csv"):
path = os.path.join(directory, f)
print('[*] processing %s' % path)
with open(path, 'r', newline="") as csv_fd:
reader = csv.reader(csv_fd, dialect='excel')
header = next(reader)
for row in reader:
target = row[0]
source = row[1]
open_tcp = row[2]
open_udp = row[3]
index = get_matching_index(data, target, open_tcp, open_udp)
if index is not None:
data[index]['sources'].append(source)
else:
data.append({
'target': target,
'open_tcp': open_tcp,
'open_udp': open_udp,
'sources': [source]
})
# data cleanup
for d in data:
sources = "; ".join(d['sources'])
d['sources'] = sources
d['network_id'] = int(ipaddress.IPv4Network(d['target']).network_address)
# write the summary report
field_names = ['network_id', 'target', 'sources', 'open_tcp', 'open_udp']
with open(output_file, 'w', newline="") as csv_fd:
writer = csv.DictWriter(csv_fd, fieldnames=field_names)
writer.writeheader()
for d in data:
writer.writerow(d)
if __name__ == "__main__":
main()
```
#### File: jordantrc/port_scanners/verify_and_report.py
```python
import argparse
import csv
import ipaddress
import multiprocessing
import os
import subprocess
import sys
def host_output(output_directory, proto, port, host):
"""Write a host to an output file."""
output_file = os.path.join(output_directory, "%s_%s.txt" % (proto, port))
# write the host to the output file
with open(output_file, 'a') as host_fd:
host_fd.write("%s\n" % host)
def parse_scan_file(scan_file, output_directory, exclude_ports, verboseprint):
"""Parses the initial scan file."""
with open(scan_file, 'r') as scan_fd:
for i, line in enumerate(scan_fd):
# read the header line to determine the file type
if i == 0:
if "#masscan" in line:
file_type = "masscan"
elif "# Nmap" in line:
file_type = "nmap"
else:
assert False, "file type unknown"
verboseprint("[*] file type is %s" % file_type)
port_info = parse_line(line, file_type, verboseprint) # returns [[state, proto, port, host, banner],]
for p in port_info:
if p[0] == "open" and p[2] not in exclude_ports:
host_output(output_directory, p[1], p[2], p[3])
def produce_report(output_directory, report_file, verboseprint):
"""Generates the report."""
verboseprint("[*] entering produce_report function")
file_list = os.listdir(output_directory)
service_detection_files = [os.path.join(output_directory, x) for x in file_list if "service_detection" in x]
with open(report_file, 'w') as csv_fd:
csvwriter = csv.writer(csv_fd, dialect='excel')
csvwriter.writerow(['host', 'protocol', 'port', 'state', 'service_info'])
for s in service_detection_files:
with open(s, 'r') as s_fd:
for line in s_fd.readlines():
port_info = parse_line(line, "nmap", verboseprint)
if len(port_info) > 0:
#print("line = %s, port_info = %s" % (line, port_info))
csvwriter.writerow([port_info[3], port_info[1], port_info[2], port_info[0], port_info[4]])
def parse_line(line, file_type, verboseprint):
"""Parse a scan file line, returns state, proto, port, host, banner."""
result = []
if line[0] == "#":
return result
if file_type == "masscan":
state, proto, port, host, _ = line.split()
result = ["open", proto, port, host, ""]
elif file_type == "nmap":
# Ignore these lines:
# Host: 10.1.1.1 () Status: Up
if "Status:" not in line:
# Host: 10.1.1.1 () Ports: 21/filtered/tcp//ftp///, 80/open/tcp//http///,
# 53/open|filtered/udp//domain///, 137/open/udp//netbios-ns/// Ignored State: filtered (195)
# Ports: 25/open/tcp//smtp//Microsoft Exchange smtpd/
host_info, port_info = line.split("Ports:")
host = host_info.strip().split(' ')[1]
# get the port information
port_info = port_info.strip()
if "Ignored State" in port_info:
port_info, _ = port_info.split('Ignored State:')
port_info = port_info.strip()
port_list = port_info.split(',')
port_list = [ x.strip() for x in port_list ]
for p in port_list:
try:
port, state, proto, _, _, _, banner, _ = p.split('/')
result.append([state, proto, port, host, banner])
except ValueError as err:
print("[-] Error occurred: %s" % str(err))
print("[-] offending line: %s" % p)
verboseprint("[*] parse line result: %s" % result)
return result
def probe_service(args):
"""Probes all hosts given the host file, which has the naming convention <protocol>_<port>.txt"""
pps = args[0]
host_file = args[1]
host_file_name = os.path.basename(host_file)
host_file_dir = os.path.dirname(host_file)
protocol, port = host_file_name.split(".")[0].split("_")
#print("host_file_name = %s, protocol = %s, port = %s" % (host_file_name, protocol, port))
if protocol in ["tcp", "udp"]:
output_file = os.path.join(host_file_dir, "service_detection_%s_%s.gnmap" % (protocol, port))
# determine the scan type and run it
if protocol == "tcp":
scan_type = "" # nmap defaults to a TCP scan
else:
scan_type = "-sU"
nmap_command = "nmap %s -Pn -p%s --max-rate %s -sV -iL %s -oG %s" % (scan_type, port, pps, host_file, output_file)
print("[*] initiating service detection for %s/%s" % (protocol.upper(), port))
print(nmap_command)
nmap_command = nmap_command.split()
result = subprocess.run(nmap_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if len(result.stderr) > 0:
print("[-] ERROR in nmap command %s" % nmap_command)
print("[-] %s" % result.stderr.decode('ascii'))
def main():
verbose = False
parser = argparse.ArgumentParser("verifies and reports on a masscan file")
parser.add_argument("-x", "--exclude", required=False, help="ports to exclude, comma-sparated")
parser.add_argument("-v", "--verbose", action="store_true", required=False, help="provide verbose output")
parser.add_argument("scan_file", nargs=1, help="masscan file to use for verification and reporting")
parser.add_argument("num_scans", nargs=1, help="number of scans to run concurrently")
parser.add_argument("max_pps", nargs=1, help="maximum packets per second across all scans")
args = parser.parse_args()
# required arguments
scan_file = args.scan_file[0]
assert os.path.isfile(scan_file), "scan file %s does not exist" % scan_file
# get directory of scan file
scan_file_dir = os.path.dirname(scan_file)
scan_file_base = os.path.splitext(os.path.basename(scan_file))[0]
if scan_file_dir == "":
scan_file_dir = "."
output_directory = os.path.join(scan_file_dir, scan_file_base)
report_output_file = os.path.join(output_directory, scan_file_base + ".csv")
assert not os.path.isdir(output_directory), "output directory %s already exists" % output_directory
num_scans = int(args.num_scans[0])
max_pps = int(args.max_pps[0])
pps_per_scan = max(1, int(max_pps / num_scans))
# options
exclude_ports = []
if args.exclude is not None:
exclude_ports = args.exclude.split(",")
exclude_ports = [x.strip() for x in exclude_ports]
if args.verbose is not None:
verbose = args.verbose
print("[*] enabling verbose output")
verboseprint = print if verbose else lambda *a, **k: None
# make the directory
verboseprint("[*] creating directory")
os.mkdir(output_directory, 0o755)
parse_scan_file(scan_file, output_directory, exclude_ports, verboseprint)
# output_directory is now full of files named protocol_port number.txt
host_files = os.listdir(output_directory)
host_files = [[pps_per_scan, os.path.join(output_directory, x)] for x in host_files]
with multiprocessing.Pool(processes=num_scans) as pool:
pool.map(probe_service, host_files)
produce_report(output_directory, report_output_file, verboseprint)
if __name__ == "__main__":
main()
``` |
{
"source": "jordantrc/shodan-parser",
"score": 3
} |
#### File: jordantrc/shodan-parser/shodan_parser.py
```python
import argparse
import csv
import ipaddress
import json
import os
port_fields = [
'ip',
'ip_str',
'transport',
'port',
'product',
'hostnames',
'cpe',
'timestamp'
]
http_fields = [
'host',
'title',
'server',
'robots',
'location',
'waf'
]
vulnerability_fields = [
'ip_str',
'transport',
'port',
'cve',
'verified',
'cvss',
'summary'
]
def write_csv(file, data, fields):
with open(file, 'w', newline='', encoding="utf-8") as csv_fd:
csvwriter = csv.DictWriter(csv_fd, fieldnames=fields, dialect='excel')
data = [x for x in data if x is not None] # remove null values
csvwriter.writeheader()
for d in data:
csvwriter.writerow(d)
def main():
parser = argparse.ArgumentParser("Shodan Parser")
parser.add_argument("input_file", nargs=1, help="Shodan output source file")
parser.add_argument("output_base", nargs=1, help="Output file base name")
args = parser.parse_args()
input_file = args.input_file[0]
output_base = args.output_base[0]
print("[*] input file = {}".format(input_file))
print("[*] output base name = {}".format(output_base))
with open(input_file, 'r') as json_fd:
json_lines = json_fd.readlines()
ports = []
http = []
vulns = []
addresses = set()
for l in json_lines:
json_obj = json.loads(l)
# port information
port_data_dict = {}
for f in port_fields:
try:
if type(json_obj[f]) != list:
port_data_dict[f] = json_obj[f]
else:
values = json_obj[f]
port_data_dict[f] = ','.join(values)
except:
continue
ports.append(port_data_dict)
# http information
http_data_dict = None
if 'http' in json_obj.keys():
http_data_dict = {}
for f in http_fields:
try:
if type(json_obj['http'][f]) != list:
http_data_dict[f] = json_obj['http'][f]
else:
values = json_obj['http'][f]
http_data_dict[f] = ','.join(values)
except:
continue
http.append(http_data_dict)
vuln_data_dict = None
if 'vulns' in json_obj.keys():
for v in json_obj['vulns'].keys():
vuln_data_dict = {}
try:
for f in vulnerability_fields:
vuln_data_dict[f] = port_data_dict[f]
except:
pass
vuln_data_dict['cve'] = v
vuln_data_dict['verified'] = json_obj['vulns'][v]['verified']
vuln_data_dict['cvss'] = json_obj['vulns'][v]['cvss']
vuln_data_dict['summary'] = json_obj['vulns'][v]['summary']
# print("DEBUG vuln_data_dict = {}".format(vuln_data_dict))
vulns.append(vuln_data_dict)
# create the list of addresses
for p in ports:
addresses.add(p['ip_str'])
# print a summary of what was found
print("Parsed {} IP addresses, {} port entries, {} http entries, {} vulnerabilities".format(len(list(addresses)), len(ports), len(http), len(vulns)))
# output
port_output_file = "{}_ports.csv".format(output_base)
http_output_file = "{}_http.csv".format(output_base)
vuln_output_file = "{}_vulns.csv".format(output_base)
addr_output_file = "{}_ips.txt".format(output_base)
write_csv(port_output_file, ports, port_fields)
write_csv(http_output_file, http, http_fields)
write_csv(vuln_output_file, vulns, vulnerability_fields)
with open(addr_output_file, 'w') as ip_fd:
for a in sorted(list(addresses)):
ip_fd.write(a + "\n")
if __name__ == "__main__":
main()
``` |
{
"source": "jordanvance/xlcalculator",
"score": 3
} |
#### File: xlcalculator/tests/testing.py
```python
import os
import unittest
from dataclasses import dataclass
from decimal import Decimal, ROUND_UP, ROUND_DOWN
from xlcalculator import model, evaluator
RESOURCE_DIR = os.path.join(os.path.dirname(__file__), 'resources')
def get_resource(filename):
return os.path.join(RESOURCE_DIR, filename)
@dataclass
class f_token:
tvalue: str
ttype: str
tsubtype: str
@classmethod
def from_token(cls, token):
return cls(token.tvalue, token.ttype, token.tsubtype)
def __repr__(self):
return "<{} tvalue: {} ttype: {} tsubtype: {}>".format(
self.__class__.__name__, self.tvalue, self.ttype, self.tsubtype)
def __str__(self):
return self.__repr__()
class XlCalculatorTestCase(unittest.TestCase):
def assertEqualRounded(self, lhs, rhs, rounding_precision=None):
if rounding_precision is None:
lhs_split = str(lhs).split('.')
rhs_split = str(rhs).split('.')
if len(lhs_split) > 1:
len_lhs_after_decimal = len(lhs_split[1])
else:
len_lhs_after_decimal = None
if len(rhs_split) > 1:
len_rhs_after_decimal = len(rhs_split[1])
else:
len_rhs_after_decimal = None
if len_lhs_after_decimal is None or len_rhs_after_decimal is None:
return self.assertEqual(round(lhs), round(rhs))
rounding_precision = min(
len_lhs_after_decimal, len_rhs_after_decimal)
precision_mask = "{0:." + str(rounding_precision - 1) + "f}1"
precision = precision_mask.format(0.0)
if lhs > rhs:
lhs_value = Decimal(lhs).quantize(Decimal(
precision), rounding=ROUND_DOWN)
rhs_value = Decimal(rhs).quantize(
Decimal(precision), rounding=ROUND_UP)
else:
lhs_value = Decimal(rhs).quantize(
Decimal(precision), rounding=ROUND_UP)
rhs_value = Decimal(lhs).quantize(
Decimal(precision), rounding=ROUND_DOWN)
return self.assertEqual(lhs_value, rhs_value)
def assertEqualTruncated(self, lhs, rhs, truncating_places=None):
lhs_before_dec, lhs_after_dec = str(lhs).split('.')
rhs_before_dec, rhs_after_dec = str(rhs).split('.')
if truncating_places is None:
truncating_places = min(
len(str(lhs).split('.')[1]), len(str(rhs).split('.')[1]))
if 'E' in lhs_after_dec:
lhs_value = float('.'.join((lhs_before_dec, lhs_after_dec)))
else:
lhs_value = float('.'.join((
lhs_before_dec, lhs_after_dec[0:truncating_places])))
if 'E' in lhs_after_dec:
rhs_value = float('.'.join((rhs_before_dec, rhs_after_dec)))
else:
rhs_value = float('.'.join((
rhs_before_dec, rhs_after_dec[0:truncating_places])))
return self.assertAlmostEqual(lhs_value, rhs_value, truncating_places)
def assertASTNodesEqual(self, lhs, rhs):
lhs = [f_token.from_token(t) for t in lhs]
rhs = [f_token.from_token(t) for t in rhs]
return self.assertEqual(lhs, rhs)
class FunctionalTestCase(XlCalculatorTestCase):
filename = None
def setUp(self):
compiler = model.ModelCompiler()
self.model = compiler.read_and_parse_archive(
get_resource(self.filename))
self.evaluator = evaluator.Evaluator(self.model)
```
#### File: tests/xlfunctions_vs_excel/if_test.py
```python
from .. import testing
class CountIfTest(testing.FunctionalTestCase):
filename = "IF.xlsx"
def test_evaluation_ABCDE_1(self):
for col in "ABCDE":
cell = f'Sheet1!{col}1'
excel_value = self.evaluator.get_cell_value(cell)
value = self.evaluator.evaluate(cell)
self.assertEqual(excel_value, value)
```
#### File: xlcalculator/xlfunctions/logical.py
```python
from typing import Tuple
from . import xl, xlerrors, func_xltypes
@xl.register()
@xl.validate_args
def AND(
*logicals: Tuple[func_xltypes.XlExpr]
) -> func_xltypes.XlBoolean:
"""Determine if all conditions in a test are TRUE
https://support.office.com/en-us/article/
and-function-5f19b2e8-e1df-4408-897a-ce285a19e9d9
"""
if not logicals:
raise xlerrors.NullExcelError('logical1 is required')
# Use delayed evaluation to minimize th amount of values to evaluate.
for logical in logicals:
val = logical()
for item in xl.flatten([val]):
if func_xltypes.Blank.is_blank(item):
continue
if not bool(item):
return False
return True
@xl.register()
@xl.validate_args
def FALSE() -> func_xltypes.XlBoolean:
"""Returns the logical value FALSE.
https://support.office.com/en-us/article/
false-function-2d58dfa5-9c03-4259-bf8f-f0ae14346904
"""
return False
@xl.register()
@xl.validate_args
def OR(
*logicals: Tuple[func_xltypes.XlExpr]
) -> func_xltypes.XlBoolean:
"""Determine if any conditions in a test are TRUE.
https://support.office.com/en-us/article/
or-function-7d17ad14-8700-4281-b308-00b131e22af0
"""
if not logicals:
raise xlerrors.NullExcelError('logical1 is required')
# Use delayed evaluation to minimize th amount of valaues to evaluate.
for logical in logicals:
val = logical()
for item in xl.flatten([val]):
if func_xltypes.Blank.is_blank(item):
continue
if bool(item):
return True
return False
@xl.register()
@xl.validate_args
def IF(
logical_test: func_xltypes.XlExpr,
value_if_true: func_xltypes.XlExpr = True,
value_if_false: func_xltypes.XlExpr = False
):
"""Return one value if a condition is true and another value if it's false.
https://support.office.com/en-us/article/
if-function-69aed7c9-4e8a-4755-a9bc-aa8bbff73be2
"""
# Use delayed evaluation to only evaluate the true or false value but not
# both.
return value_if_true() if logical_test() else value_if_false()
@xl.register()
@xl.validate_args
def NOT(logical: func_xltypes.XlExpr) -> func_xltypes.XlBoolean:
"""Return inverse of boolean representation of value.
https://support.microsoft.com/en-us/office/
not-function-9cfc6011-a054-40c7-a140-cd4ba2d87d77
"""
return not bool(logical())
@xl.register()
@xl.validate_args
def TRUE() -> func_xltypes.XlBoolean:
"""Returns the logical value TRUE.
https://support.office.com/en-us/article/
true-function-7652c6e3-8987-48d0-97cd-ef223246b3fb
"""
return True
``` |
{
"source": "jordanvance/z3c.rml",
"score": 2
} |
#### File: z3c/rml/directive.py
```python
import logging
import zope.interface
import zope.schema
from lxml import etree
from z3c.rml import interfaces
from z3c.rml.attr import getManager
logging.raiseExceptions = False
logger = logging.getLogger("z3c.rml")
ABORT_ON_INVALID_DIRECTIVE = False
def DeprecatedDirective(iface, reason):
zope.interface.directlyProvides(iface, interfaces.IDeprecatedDirective)
iface.setTaggedValue('deprecatedReason', reason)
return iface
def getFileInfo(directive, element=None):
root = directive
while root.parent:
root = root.parent
if element is None:
element = directive.element
return '(file %s, line %i)' %(root.filename, element.sourceline)
@zope.interface.implementer(interfaces.IRMLDirective)
class RMLDirective:
signature = None
factories = {}
def __init__(self, element, parent):
self.element = element
self.parent = parent
def getAttributeValues(self, ignore=None, select=None, attrMapping=None,
includeMissing=False, valuesOnly=False):
"""See interfaces.IRMLDirective"""
manager = getManager(self)
cache = '{}.{}'.format(self.signature.__module__, self.signature.__name__)
if cache in manager.attributesCache:
fields = manager.attributesCache[cache]
else:
fields = []
for name, attr in zope.schema.getFieldsInOrder(self.signature):
fields.append((name, attr))
manager.attributesCache[cache] = fields
items = []
for name, attr in fields:
# Only add the attribute to the list, if it is supposed there
if ((ignore is None or name not in ignore) and
(select is None or name in select)):
# Get the value.
value = attr.bind(self).get()
# If no value was found for a required field, raise a value
# error
if attr.required and value is attr.missing_value:
raise ValueError(
'No value for required attribute "%s" '
'in directive "%s" %s.' % (
name, self.element.tag, getFileInfo(self)))
# Only add the entry if the value is not the missing value or
# missing values are requested to be included.
if value is not attr.missing_value or includeMissing:
items.append((name, value))
# Sort the items based on the section
if select is not None:
select = list(select)
items = sorted(items, key=lambda n: select.index(n[0]))
# If the attribute name does not match the internal API
# name, then convert the name to the internal one
if attrMapping:
items = [(attrMapping.get(name, name), value)
for name, value in items]
# Sometimes we only want the values without the names
if valuesOnly:
return [value for name, value in items]
return items
def processSubDirectives(self, select=None, ignore=None):
# Go through all children of the directive and try to process them.
for element in self.element.getchildren():
# Ignore all comments
if isinstance(element, etree._Comment):
continue
# Raise an error/log any unknown directive.
if element.tag not in self.factories:
msg = "Directive %r could not be processed and was " \
"ignored. %s" %(element.tag, getFileInfo(self, element))
# Record any tags/elements that could not be processed.
logger.warning(msg)
if ABORT_ON_INVALID_DIRECTIVE:
raise ValueError(msg)
continue
if select is not None and element.tag not in select:
continue
if ignore is not None and element.tag in ignore:
continue
directive = self.factories[element.tag](element, self)
directive.process()
def process(self):
self.processSubDirectives()
```
#### File: z3c/rml/page.py
```python
import io
from z3c.rml import attr, directive, interfaces
try:
import pikepdf
from pikepdf import Object
except ImportError:
# We don't want to require pikepdf, if you do not want to use the features
# in this module.
pikepdf = None
def mergePage(layerPage, mainPage, pdf, name) -> None:
contentsForName = pdf.copy_foreign(
pikepdf.Page(layerPage).as_form_xobject()
)
newContents = b'q\n %s Do\nQ\n' % (name.encode())
if not mainPage.Resources.get("/XObject"):
mainPage.Resources["/XObject"] = pikepdf.Dictionary({})
mainPage.Resources["/XObject"][name] = contentsForName
# Use the MediaBox from the merged page
mainPage.MediaBox = layerPage.MediaBox
mainPage.contents_add(
contents=pikepdf.Stream(pdf, newContents),
prepend=True
)
class MergePostProcessor:
def __init__(self):
self.operations = {}
def process(self, inputFile1):
input1 = pikepdf.open(inputFile1)
count = 0
for (num, page) in enumerate(input1.pages):
if num in self.operations:
for mergeFile, mergeNumber in self.operations[num]:
mergePdf = pikepdf.open(mergeFile)
toMerge = mergePdf.pages[mergeNumber]
name = f"/Fx{count}"
mergePage(toMerge, page, input1, name)
outputFile = io.BytesIO()
input1.save(outputFile)
return outputFile
class IMergePage(interfaces.IRMLDirectiveSignature):
"""Merges an existing PDF Page into the one to be generated."""
filename = attr.File(
title='File',
description=('Reference to the PDF file to extract the page from.'),
required=True)
page = attr.Integer(
title='Page Number',
description='The page number of the PDF file that is used to merge..',
required=True)
class MergePage(directive.RMLDirective):
signature = IMergePage
def getProcessor(self):
manager = attr.getManager(self, interfaces.IPostProcessorManager)
procs = dict(manager.postProcessors)
if 'MERGE' not in procs:
proc = MergePostProcessor()
manager.postProcessors.append(('MERGE', proc))
return proc
return procs['MERGE']
def process(self):
if pikepdf is None:
raise Exception(
'pikepdf is not installed, so this feature is not available.')
inputFile, inPage = self.getAttributeValues(valuesOnly=True)
manager = attr.getManager(self, interfaces.ICanvasManager)
outPage = manager.canvas.getPageNumber()-1
proc = self.getProcessor()
pageOperations = proc.operations.setdefault(outPage, [])
pageOperations.append((inputFile, inPage))
class MergePageInPageTemplate(MergePage):
def process(self):
if pikepdf is None:
raise Exception(
'pikepdf is not installed, so this feature is not available.')
inputFile, inPage = self.getAttributeValues(valuesOnly=True)
onPage = self.parent.pt.onPage
def drawOnCanvas(canvas, doc):
onPage(canvas, doc)
outPage = canvas.getPageNumber()-1
proc = self.getProcessor()
pageOperations = proc.operations.setdefault(outPage, [])
pageOperations.append((inputFile, inPage))
self.parent.pt.onPage = drawOnCanvas
```
#### File: rml/tests/test_dtd.py
```python
import os
import unittest
import lxml.etree
from z3c.rml import dtd
class DTDTestCase(unittest.TestCase):
level = 2
def runTest(self):
path = os.path.join(os.path.dirname(dtd.__file__), 'rml.dtd')
# Write the file.
with open(path, 'w') as file:
file.write(dtd.generate())
# Ensure we produced a processable DTD.
try:
edtd = lxml.etree.DTD(path)
except:
raise
import pdb; pdb.set_trace()
def test_suite():
return unittest.TestSuite([DTDTestCase()])
``` |
{
"source": "jordanvrtanoski/block",
"score": 2
} |
#### File: jordanvrtanoski/block/setup.py
```python
import sys
import os
import shutil
from block_utils import (
PATH as DIR, __version__ as VERSION, __name__ as NAME,
__author__ as AUTHOR, __email__ as EMAIL, __description__ as DESCRIPTION,
__license__ as LICENSE
)
import logging
from distutils.ccompiler import new_compiler
from distutils.core import setup, Extension
from distutils import sysconfig
logging.basicConfig( level=logging.DEBUG )
LOGGER = logging.getLogger( 'SETUP' )
# Set platform constants.
PYVERSION = sys.version_info
PLATFORM = sys.platform
SUPPORTED_PLATFORMS = ['win32', 'linux', 'linux2', 'darwin']
# Define `block/block` and `block/block/build` paths.
BLOCK_DIR = os.path.join( DIR, 'block' )
BUILD_DIR = os.path.join( BLOCK_DIR, 'build' )
def clean():
"""
Clean build directory (removing the entire `BUILD` tree).
"""
# Check if `BUILD_DIR` exists.
if os.path.exists( BUILD_DIR ):
# Remove the entire `BUILD` tree.
shutil.rmtree( BUILD_DIR )
def build():
"""
Compile library into `BUILD` directory.
"""
# Clean build directory.
clean()
# Create directory `./block/build`.
try:
os.makedirs( BUILD_DIR )
except OSError as exception:
# Raise Error whether `./block` isn't a valid path.
if not exception.errno == errno.EEXIST:
raise
try:
# Create distutils C compiler.
CC = new_compiler()
# Pass include directory.
CC.add_include_dir( BLOCK_DIR )
CC.add_include_dir( sysconfig.get_python_inc() )
# Compile core.
objects = CC.compile([ os.path.join( BLOCK_DIR, 'core.c' ) ], output_dir=BUILD_DIR )
# Link shared libraries.
CC.link_shared_lib( objects, "core", output_dir=BUILD_DIR )
except Exception as e:
clean()
raise e
if 'install' in sys.argv or 'build' in sys.argv:
# If `install` or `build` are passed as parameters then
# compile the library.
build()
elif 'clean' in sys.argv:
# If `clean` is passed as parameter clean build – Delete
# `./block/build`.
clean()
# Open and store content of `README.md` into `README`.
README = 'README.md'
try:
with open( os.path.join( DIR, README ), 'r' ) as readme:
README = readme.read()
except IOError:
README = ''
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=README,
author=AUTHOR,
author_email=EMAIL,
license=LICENSE,
platforms=SUPPORTED_PLATFORMS,
)
``` |
{
"source": "JordanV/TempNetSVG",
"score": 3
} |
#### File: JordanV/TempNetSVG/main.py
```python
import sys
import pdb
import svgfig
import json
import os
import math
import random
def show_help():
print("Usage: main.py input_file [--silent] [--output=<out.svg>]" +
" [--order=order.txt]")
print("Input file is either a text file containing t u v," +
"or a JSON file where the following properties are available:")
print(" from")
print(" to")
print(" time")
print(" color: to be chosen in " +
"http://www.december.com/html/spec/colorsvg.html")
print("The orderFile contains a list of all nodes to display " +
"in the order of appearance in orderFile.")
def read_argv(argv):
for arg in sys.argv:
if "=" in arg:
content = arg.split("=")
arg_name = content[0].replace("--", "")
argv[arg_name] = content[1]
elif "--" in arg:
arg_name = arg.replace("--", "")
argv[arg_name] = True
def version():
sys.stderr.write("\tLinkStreamViz 1.0 -- <NAME> 2015\n")
class idGenerator:
"""generates id"""
def __init__(self):
self.lookUp = dict() # dict[Node] = id
self.idCount = 0
self.reverse = dict() # dict[id] = node
def impose(self, node, id_):
self.lookUp[node] = id_
self.reverse[id_] = node
def contains(self, element):
return element in self.lookUp
def get(self, element):
if element not in self.lookUp:
while self.idCount in self.reverse and self.reverse[self.idCount] != element:
self.idCount += 1
self.lookUp[element] = self.idCount
self.reverse[self.idCount] = element
return self.lookUp[element]
def size(self):
return len(self.lookUp)
class Link:
def __init__(self, t, u, v, color="black", direction=0, duration=0, duration_color="black"):
self.t = float(t)
self.u = int(min(u, v))
self.v = int(max(u, v))
self.color = color
self.direction = direction
self.duration = duration
self.duration_color = duration_color
@staticmethod
def from_dict(link):
obj = Link(link["time"],
link["from"],
link["to"])
obj.color = link.get("color", "black")
obj.direction = link.get("direction", 0)
obj.duration = float(link.get("duration", 0))
obj.duration_color = link.get("duration_color", "black")
return obj
class LinkStream:
def __init__(self, inputFile, orderFile=""):
self.links = []
self.max_time = 0
self.nodeID = idGenerator()
self.max_label_len = 0
self.g = svgfig.SVG("g")
self.ppux = 10 # piwel per unit time
if "json" in inputFile:
with open(inputFile, 'r') as inFile:
json_struct = json.loads(inFile.read())
for link_json in json_struct:
link = Link.from_dict(link_json)
self.addNode(link.u)
self.addNode(link.v)
if (link.t + link.duration) > self.max_time:
self.max_time = link.t + link.duration
self.links.append(link)
else:
with open(inputFile, 'r') as inFile:
for line in inFile:
contents = line.split(" ")
t = float(contents[0])
u = int(contents[1])
v = int(contents[2])
d = 0
if len(contents) > 3:
d = float(contents[3])
self.addNode(u)
self.addNode(v)
if t > self.max_time:
self.max_time = t
self.links.append(Link(t, u, v, duration=d))
if orderFile != "":
tmp_nodes = set()
with open(orderFile, 'r') as order:
for i, n in enumerate(order):
node = int(n)
tmp_nodes.add(node)
if self.nodeID.contains(node):
self.nodeID.impose(node, i)
self.nodes.append(node)
else:
print('The node', node, "is not present in the stream")
exit()
for node in self.nodeID.lookUp:
if node not in tmp_nodes:
print('The node', node, "is not present in", orderFile)
exit()
def addNode(self, node):
self.nodeID.get(node)
if self.max_label_len < len(str(node)):
self.max_label_len = len(str(node))
def evaluateOrder(self, order):
distance = 0
for link in self.links:
distance += abs(order[link.u]-order[link.v])
return distance
def findOrder(self):
cur_solution = self.nodeID.lookUp
cur_reverse = self.nodeID.reverse
dist = self.evaluateOrder(cur_solution)
sys.stderr.write("Order improved from "+str(dist))
for i in range(0, 10000):
i = random.randint(0, len(cur_solution) - 1)
j = random.randint(0, len(cur_solution) - 1)
cur_reverse[j], cur_reverse[i] = cur_reverse[i], cur_reverse[j]
cur_solution[cur_reverse[j]] = j
cur_solution[cur_reverse[i]] = i
tmp = self.evaluateOrder(cur_solution)
if tmp >= dist:
# re swap to go back.
cur_reverse[j], cur_reverse[i] = cur_reverse[i], cur_reverse[j]
cur_solution[cur_reverse[j]] = j
cur_solution[cur_reverse[i]] = i
else:
dist = tmp
self.nodeID.lookUp = cur_solution
new_order = "new_order.txt"
with open(new_order, "w") as out:
for node in self.nodeID.reverse:
out.write(str(self.nodeID.reverse[node]) + "\n")
sys.stderr.write(" to "+str(dist)+". Order saved in:"+new_order+"\n")
def addDuration(self, origin, duration, color, amplitude=1):
freq = 0.8 # angular frequency
duration = duration * self.ppux
self.g.append(svgfig.SVG("line",
stroke=color,
stroke_opacity=0.8,
stroke_width=1.1,
x1=origin["x"],
y1=origin["y"],
x2=origin["x"]+duration,
y2=origin["y"]))
def draw(self, outputFile):
self.findOrder()
offset = 1.5 * self.ppux
# Define dimensions
label_margin = 5 * self.max_label_len
origleft = label_margin + 1 * self.ppux
right_margin = self.ppux
width = origleft + self.ppux * math.ceil(self.max_time) + right_margin
svgfig._canvas_defaults["width"] = str(width) + 'px'
arrow_of_time_height = 5
height = 5 + 10 * int(self.nodeID.size() + 1) + arrow_of_time_height
svgfig._canvas_defaults["height"] = str(height) + 'px'
origtop = 10
################
# Draw background lines
for node in self.nodeID.lookUp:
horizonta_axe = self.ppux * self.nodeID.get(node) + origtop
self.g.append(svgfig.SVG("text", str(node),
x=str(label_margin),
y=horizonta_axe + 2,
fill="black", stroke_width=0,
text_anchor="end",
font_size="6"))
self.g.append(svgfig.SVG("line", stroke_dasharray="2,2",
stroke_width=0.5,
x1=str(origleft-5),
y1=horizonta_axe,
x2=width - right_margin,
y2=horizonta_axe))
# Add timearrow
self.g.append(svgfig.SVG("line",
stroke_width=0.5,
x1=self.ppux ,
y1=10*(self.nodeID.size()+1),
x2=width-5,
y2=10*(self.nodeID.size()+1)))
self.g.append(svgfig.SVG("line", stroke_width=0.5,
x1=width-8,
y1=10*(self.nodeID.size()+1)-3,
x2=width-5,
y2=10*(self.nodeID.size()+1)))
self.g.append(svgfig.SVG("line", stroke_width=0.5,
x1=width-8,
y1=10*(self.nodeID.size()+1)+3,
x2=width-5,
y2=10*(self.nodeID.size()+1)))
self.g.append(svgfig.SVG("text", str("Time"),
x=width-19,
y=10*(self.nodeID.size()+1)-3,
fill="black", stroke_width=0,
font_size="6"))
#
# Add time ticks
for i in range(0, int(math.ceil(self.max_time)+1), 5):
x_tick = i * self.ppux + origleft
self.g.append(svgfig.SVG("line",
stroke_width=0.5,
x1=str(x_tick),
y1=10*(self.nodeID.size()+1)-3,
x2=str(x_tick),
y2=10*(self.nodeID.size()+1)+3))
self.g.append(svgfig.SVG("text", str(i),
x=str(x_tick), y=10*(self.nodeID.size()+1)+7,
fill="black", stroke_width=0,
font_size="6"))
for link in self.links:
ts = link.t
node_1 = min(self.nodeID.get(link.u), self.nodeID.get(link.v))
node_2 = max(self.nodeID.get(link.u), self.nodeID.get(link.v))
offset = ts * self.ppux + origleft
y_node1 = 10 * node_1 + origtop
y_node2 = 10 * node_2 + origtop
# Add nodes
self.g.append(svgfig.SVG("circle",
cx=offset, cy=y_node1,
r=1, fill=link.color))
self.g.append(svgfig.SVG("circle",
cx=offset, cy=y_node2,
r=1, fill=link.color))
x = 0.2 * ((10 * node_2 - 10 * node_1) / math.tan(math.pi / 3)) + offset
y = (y_node1 + y_node2) / 2
param_d = "M" + str(offset) + "," + str(y_node1) +\
" C" + str(x) + "," + str(y) + " " + str(x) + "," + str(y) +\
" " + str(offset) + "," + str(y_node2)
self.g.append(svgfig.SVG("path", stroke=link.color,
d=param_d))
self.addDuration({"x": x, "y": (y_node1+y_node2)/2}, link.duration, link.duration_color)
# Save to svg file
viewBoxparam = "0 0 " + str(width) + " " + str(height)
svgfig.canvas(self.g, viewBox=viewBoxparam).save(outputFile)
if __name__ == '__main__':
if len(sys.argv) < 2 or "--help" in sys.argv or "-h" in sys.argv:
show_help()
sys.exit()
if "-v" in sys.argv or "--version" in sys.argv:
version()
exit()
argv = {"order": "", "silent": False}
read_argv(argv)
Links = LinkStream(sys.argv[1], argv["order"])
default_output = os.path.basename(sys.argv[1]).split(".")[0]+".svg"
argv["output"] = argv.get("output", default_output)
Links.draw(argv["output"])
if not argv["silent"]:
sys.stderr.write("Output generated to " + argv["output"] + ".\n")
``` |
{
"source": "jordanwimb/kbpython",
"score": 2
} |
#### File: kbpython/class3/exercise2.py
```python
from snmp_helper import snmp_get_oid_v3, snmp_extract
import pygal
import time
#from sched import scheduler
input_oct = '1.3.6.1.2.1.2.2.1.10.5'
output_oct = '1.3.6.1.2.1.2.2.1.16.5'
input_ucast = '1.3.6.1.2.1.2.2.1.11.5'
output_ucast = '1.3.6.1.2.1.2.2.1.17.5'
fa4_in_octets = []
fa4_out_octets = []
fa4_in_packets = []
fa4_out_packets = []
fa4_in_bytes = []
fa4_out_bytes = []
def main():
a_user = 'pysnmp'
auth_key = 'galileo1'
encrypt_key = 'galileo1'
rtr_ip = '192.168.3.11'
snmp_user = (a_user, auth_key, encrypt_key)
pynet_rtr1 = (rtr_ip, 7961)
router = pynet_rtr1
fa4_in_octets,fa4_out_octets,fa4_in_packets,fa4_out_packets = get_intf_stats(router,snmp_user)
fa4_in_bytes = get_count_values(fa4_in_octets)
fa4_out_bytes = get_count_values(fa4_out_octets)
fa4_in_packets = get_count_values(fa4_in_packets)
fa4_out_packets = get_count_values(fa4_out_packets)
gen_graph(fa4_in_bytes,fa4_out_bytes,"bytes-graph","bytes")
gen_graph(fa4_in_packets,fa4_out_packets,"packets-graph","packets")
def get_intf_stats(device,user):
count = 0
global fa4_in_octets,fa4_out_octets,fa4_in_packets,fa4_out_packets
print("Gathering statistics. Check back in 1 hour.")
while count <= 12:
fa4_in_oct_count = int(snmp_extract(snmp_get_oid_v3(device,user,oid=input_oct)))
fa4_in_octets.append(fa4_in_oct_count)
fa4_out_oct_count = int(snmp_extract(snmp_get_oid_v3(device,user,oid=output_oct)))
fa4_out_octets.append(fa4_out_oct_count)
fa4_in_count = int(snmp_extract(snmp_get_oid_v3(device,user,oid=input_ucast)))
fa4_in_packets.append(fa4_in_count)
fa4_out_count = int(snmp_extract(snmp_get_oid_v3(device,user,oid=output_ucast)))
fa4_out_packets.append(fa4_out_count)
count +=1
time.sleep(300)
print("Done. Generating graph.")
return(fa4_in_octets,fa4_out_octets,fa4_in_packets,fa4_out_packets)
def get_count_values(value_list):
newlist = []
count = 1
count2 = 0
while count <= 12:
newlist.append(value_list[count] - value_list[count2])
count += 1
count2 += 1
return(newlist)
def gen_graph(input_stats,output_stats,filename,units):
line_chart = pygal.Line()
line_chart.title = 'Input/Output ' + units
line_chart.x_labels = ['5', '10', '15', '20', '25', '30', '35', '40', '45', '50', '55', '60']
line_chart.add('In ' + units, input_stats)
line_chart.add('Out ' + units, output_stats)
line_chart.render_to_file(filename + '.svg')
print("Graphing complete.")
if __name__ == "__main__":
main()
```
#### File: kbpython/class4/ex4.py
```python
from __future__ import print_function
import pexpect
import getpass
import time
def show_ip_int_brief(remote_conn):
remote_conn.sendline('show ip int brief')
remote_conn.expect('#')
print(remote_conn.before)
def set_logging(remote_conn):
remote_conn.sendline('config t')
remote_conn.expect('#')
remote_conn.sendline('logging buffered 20011')
remote_conn.expect('#')
print(remote_conn.before)
remote_conn.sendline('exit')
remote_conn.expect('#')
def verify_config(remote_conn):
remote_conn.sendline('sh run | i logging')
time.sleep(2)
remote_conn.expect('#')
print(remote_conn.before)
def main():
#Establish SSH connection
ip_address = '172.16.58.3'
user = 'pyclass'
password = <PASSWORD>()
port = 8022
passphrase = "<PASSWORD>"
remote_conn = pexpect.spawn('ssh -l {} {} -p {}'.format(user,ip_address,port))
remote_conn.timeout = 6
remote_conn.expect(":")
remote_conn.sendline()
time.sleep(1)
remote_conn.sendline()
time.sleep(1)
remote_conn.expect('ssword:')
remote_conn.sendline(password)
remote_conn.expect('#')
print(remote_conn.before)
#Send and verify commands
set_logging(remote_conn)
verify_config(remote_conn)
if __name__ == '__main__':
main()
```
#### File: kbpython/class4/ex5.py
```python
from __future__ import print_function
from netmiko import ConnectHandler
import getpass
import time
pynet_rtr2 = {
'device_type':'cisco_ios',
'ip':'192.168.3.11',
'username':'pyclass',
'password':get<PASSWORD>.<PASSWORD>(),
'port':8022
}
def config_mode(device):
#Enter config mode
device.config_mode()
time.sleep(2)
#Verify status
output = device.check_config_mode()
time.sleep(2)
if output == True:
print("Entered config mode.")
def main():
#Establish SSH connection
rtr2 = ConnectHandler(**pynet_rtr2)
#Enter config mode
config_mode(rtr2)
if __name__ == '__main__':
main()
```
#### File: kbpython/class8/ex4.py
```python
from __future__ import print_function
import django
from net_system.models import NetworkDevice, Credentials
devices = NetworkDevice.objects.all()
creds = Credentials.objects.all()
def main():
django.setup()
dev_name = raw_input("Enter device to delete or 'q' to quit: ")
while dev_name != 'q':
dev = NetworkDevice.objects.get(device_name=dev_name)
dev.delete()
print("%s has been deleted." % dev_name)
else:
print("Quitting.")
if __name__ == '__main__':
main()
``` |
{
"source": "jordanxzz/Codes_for_Fed_Disagreement_Paper",
"score": 3
} |
#### File: Codes_for_Fed_Disagreement_Paper/Python Codes for Web Scrapping and Disagreement Measure/Transcript_Labelling.py
```python
import os
import sys, getopt
from collections import Counter, defaultdict
import re
from nltk.tokenize import word_tokenize
import pandas as pd
import numpy as np
import pickle
def remove_bad_characters(text):
return text.replace('‘',"'").replace('’',"'").replace('“','"').replace('”','"')\
.replace('!','!').replace('—',' ').replace('-',' ')
# return text
def refine_text_front(text_raw, pattern_head=None, pattern_tail=None):
try:
position_front, position_head = re.search(pattern_head, text_raw).span()
except:
position_front, position_head = 0,0
try:
position_tail = re.search(pattern_tail, text_raw).span()[0]
except:
position_tail = len(text_raw)
dirty_text = re.sub(r'([0-9]{1,2}/[0-9]{1,2}/[0-9]{2} [0-9]+|\[(Laughter|No response)\])', ' ', text_raw[position_head:position_tail])
text = remove_bad_characters(dirty_text)
front_matter = text_raw[0:position_front]
return text, front_matter
def label_transcripts(text_dir, transcript_names, df_labelled_transcripts):
os.chdir(text_dir)
for file_name in transcript_names:
file_name = file_name +".txt"
meeting_date = int(file_name[-23:-15])
with open (file_name, "r", encoding="utf-8") as f:
text_raw = f.read()
###Get rid of front and end matter###
pattern_head = re.compile(r'Transcript of Federal Open Market Committee Meeting of', re.IGNORECASE)
pattern_tail = re.compile(r'END OF MEETING')
text, front_matter = refine_text_front(text_raw, pattern_head, pattern_tail)
###Parsing the text###
speaker_list, content_list = [],[]
speaker_list.append('FRONT_MATTER')
content_list.append(front_matter)
pattern_speaker = re.compile(r'(MR\.|MS\.|MRS\.|CHAIRMAN|VICE CHAIRMAN) [A-Z]+\. ')
match_iterator = re.finditer(pattern_speaker, text)
first_match = next(match_iterator)
speech_end = first_match.span()[0]
speech_begin = first_match.span()[1]
speaker_list.append(text[speech_end:speech_begin])
for match in match_iterator:
speech_end = match.span()[0]
content_list.append(re.sub(r'(\n|/)', ' ', text[speech_begin:speech_end]))
speech_begin = match.span()[1]
speaker_list.append(text[speech_end:speech_begin])
else:
content_list.append(re.sub(r'(\n|/)', ' ', text[speech_begin:speech_end]))
###Store the parsed text###
labelled_transcript_df = pd.DataFrame({'Meeting_Date':[int(meeting_date)]*len(speaker_list),\
'Speaker':speaker_list, 'Content':content_list})
df_labelled_transcripts = df_labelled_transcripts.append(labelled_transcript_df)
return df_labelled_transcripts
if __name__ == "__main__":
data_dir = "C:/Users/xiazizhe.HKUPC2.006/Downloads"
text_dir = "C:/Users/xiazizhe.HKUPC2.006/Downloads"
os.chdir(data_dir)
transcript_names = pd.read_csv("FOMC_Links.csv", encoding='utf-8').Transcript_Name.tolist()
df_labelled_transcripts = pd.DataFrame({'Meeting_Date':[], 'Speaker':[], 'Content':[]})
df_labelled_transcripts = label_transcripts(text_dir, transcript_names, df_labelled_transcripts)
os.chdir(data_dir)
df_labelled_transcripts.to_csv("FOMC_Transcripts.txt", sep="|", index=False)
df_labelled_transcripts.to_csv("FOMC_Transcripts.csv", index=False)
df = pd.read_csv("FOMC_Transcripts.txt", sep="|", encoding='utf-8')
pickle.dump(df, open('FOMC_transcripts_df.pickle', 'wb'))
```
#### File: Codes_for_Fed_Disagreement_Paper/Python Codes for Web Scrapping and Disagreement Measure/Web_Scrapping.py
```python
from collections import Counter, defaultdict
import re
from nltk.tokenize import word_tokenize
import pandas as pd
import numpy as np
import scipy
from decimal import getcontext
from decimal import Decimal as Dec
from bs4 import BeautifulSoup
import requests,re,codecs
import urllib3
def download_file(download_url, file_name):
http = urllib3.PoolManager()
response = http.request('GET', download_url)
with open(file_name, 'wb') as f:
f.write(response.data)
response.release_conn()
def refine_text(text_raw, pattern_head=None, pattern_tail=None):
try:
position_head = re.search(pattern_head, text_raw).span()[1]
except:
position_head = 0
try:
position_tail = re.search(pattern_tail, text_raw).span()[0]
except:
position_tail = len(text_raw)
text = text_raw[position_head:position_tail]
return text
def get_transcript(transcript_link, beige_book_date, meeting_date, year):
try:
file_name = "Transcript" + transcript_link[-23:]
download_file(transcript_link, file_name)
print('%d, %d Transcript finished' % (year, meeting_date))
except:
print("Transcript error %d" % meeting_date)
def get_old_beige_book_text(page_link, div_id, beige_book_date, year):
beige_book_page = requests.get(page_link)
beige_book_page_soup = BeautifulSoup(beige_book_page.content, 'html.parser')
try:
text_raw = beige_book_page_soup.find_all('table')[0].find_all('tr')[1].find_all('td')[2].text
except:
text = ""
print("Beige book error %d, %s" % (beige_book_date, div_id))
return text
pattern_head = re.compile(r'(is not a commentary on the views of Federal Reserve officials\.)', re.IGNORECASE)
pattern_tail = re.compile(r'(Return to top)', re.IGNORECASE)
text = refine_text(text_raw, pattern_head, pattern_tail)
return text
def get_new_beige_book_text(page_link, div_id, beige_book_date, year):
beige_book_page = requests.get(page_link)
beige_book_page_soup = BeautifulSoup(beige_book_page.content, 'html.parser')
beige_book_page_content = beige_book_page_content = beige_book_page_soup.find(id="leftText")
try:
text_raw = beige_book_page_content.find(id=div_id).text
except:
text = ""
print("Beige book error %d, %s" % (beige_book_date, div_id))
return text
pattern_head = re.compile(r'(is not a commentary on the views of Federal Reserve officials\.)', re.IGNORECASE)
pattern_tail = re.compile(r'(Return to top)', re.IGNORECASE)
text = refine_text(text_raw, pattern_head, pattern_tail)
return text
def get_beige_book(beige_book_link, df_beige_books, beige_book_date, meeting_date, year):
div_id_list = ['div_summary','div_boston','div_new_york','div_philadelphia',\
'div_cleveland','div_richmond','div_atlanta','div_chicago',\
'div_st_louis','div_minneapolis','div_kansas_city',\
'div_dallas','div_san_francisco']
beige_book_dict = {}
beige_book_dict['Year'], beige_book_dict['Meeting_Date'], beige_book_dict['Beige_Book_Date']\
= [year], [meeting_date], [beige_book_date]
if year < 2011:
beige_book_dict['div_summary'] = [get_old_beige_book_text(beige_book_link, 'div_summary', beige_book_date, year)]
for i in range(1,13):
district_link = beige_book_link[0:-11] + str(i) + '.htm'
beige_book_dict[div_id_list[i]] = [get_old_beige_book_text(district_link, div_id_list[i], beige_book_date, year)]
else:
for i in range(0,13):
beige_book_dict[div_id_list[i]] = [get_new_beige_book_text(beige_book_link, div_id_list[i], beige_book_date, year)]
beige_book_df = pd.DataFrame(beige_book_dict)
# df_beige_books = df_beige_books.append(beige_book_df)
print('%d, %d Beige Book finished' % (year, beige_book_date))
return df_beige_books.append(beige_book_df)
# signal printed in subfunctions
def get_statement(statement_link, df_statements, beige_book_date, meeting_date, year):
try:
statement_page = requests.get(statement_link)
statement_page_soup = BeautifulSoup(statement_page.content, 'html.parser')
if year < 2006:
text_raw = statement_page_soup.find_all('table')[-1].text
else:
text_raw = statement_page_soup.find(id="article").\
find(class_="col-xs-12 col-sm-8 col-md-8").text
pattern_tail = re.compile(r'(Voting for the FOMC monetary policy action were|\
[0-9]{4} Monetary Policy)', re.IGNORECASE)
text = refine_text(text_raw, None, pattern_tail)
statement_df = pd.DataFrame({'Year':[year], 'Meeting_Date':[meeting_date],\
'Beige_Book_Date':[beige_book_date], 'Statement':[text]})
except:
statement_df = pd.DataFrame({'Year':[year], 'Meeting_Date':[meeting_date],\
'Beige_Book_Date':[beige_book_date], 'Statement':[""]})
print("Statement error %d" % meeting_date)
print('%d, %d Statement finished' % (year, meeting_date))
# df_statements = df_statements.append(statement_df)
return df_statements.append(statement_df)
def get_materials(df_links, df_beige_books, df_statements):
for row in df_links.itertuples():
year, meeting_date , beige_book_date, beige_book_link, statement_link, transcript_link\
= row.Year, row.Meeting_Date, row.Beige_Book_Date, row.Beige_Book_Link, row.Statement_Link, row.Transcript_Link
get_transcript(transcript_link, beige_book_date, meeting_date, year)
# df_beige_books = get_beige_book(beige_book_link, df_beige_books, beige_book_date, meeting_date, year)
df_statements = get_statement(statement_link, df_statements, beige_book_date, meeting_date, year)
return df_beige_books, df_statements
def get_links(year_start, year_end):
df_links = pd.DataFrame({'Year':[], 'Meeting_Date':[], 'Beige_Book_Date':[],\
'Beige_Book_Link':[], 'Statement_Link':[], 'Transcript_Link':[]})
for year in range(year_start, year_end+1):
year_link = "https://www.federalreserve.gov/monetarypolicy/fomchistorical" + str(year) +".htm"
###Yearly page search###
year_page = requests.get(year_link)
year_page_soup = BeautifulSoup(year_page.content, 'html.parser')
year_page_content = year_page_soup.find(id="article")
meeting_contents = year_page_content.find_all(class_="panel panel-default")
inyear_count = 0
for meeting_content in meeting_contents:
try:
beige_book_link = "https://www.federalreserve.gov" + \
meeting_content.find('a', href=re.compile(r'.+beigebook.+\.htm')).get('href')
beige_book_date = re.findall(r'[0-9]{6}', beige_book_link)[0]
except:
beige_book_link = ""
beige_book_date = ""
try:
statement_link = "https://www.federalreserve.gov" + \
meeting_content.find('a', text='Statement').get('href')
except:
statement_link = ""
try:
transcript_link = "https://www.federalreserve.gov" + \
meeting_content.find('a', href=re.compile(r'.+meeting\.pdf')).get('href')
meeting_date = transcript_link[-19:-11]
except:
meeting_date = ""
transcript_link = ""
link_df = pd.DataFrame({'Year':[year], 'Meeting_Date':[meeting_date], 'Beige_Book_Date':[beige_book_date],\
'Beige_Book_Link':[beige_book_link], 'Statement_Link':[statement_link], 'Transcript_Link':[transcript_link]})
df_links = df_links.append(link_df)
inyear_count += 1
print('finish %d' % inyear_count)
print("%d finished" % year)
return df_links
if __name__ == "__main__":
###Get the Links###
df_links = get_links(1970, 1992)
df_links.to_csv("FOMC_Links.csv", index=False)
###Do the scrapping###
df_beige_books = pd.DataFrame({'Year':[], 'Meeting_Date':[], 'Beige_Book_Date':[],\
'div_summary':[],'div_boston':[],\
'div_new_york':[],'div_philadelphia':[],\
'div_cleveland':[],'div_richmond':[],\
'div_atlanta':[],'div_chicago':[],\
'div_st_louis':[],'div_minneapolis':[],\
'div_kansas_city':[],'div_dallas':[],\
'div_san_francisco':[]})
df_statements = pd.DataFrame({'Year':[], 'Meeting_Date':[],\
'Beige_Book_Date':[], 'Statement':[]})
df_links = pd.read_csv("FOMC_Links.csv", encoding='utf-8')
df_beige_books, df_statements = get_materials(df_links, df_beige_books, df_statements)
df_beige_books.to_csv("FOMC_Beige_Books.csv", index=False)
df_statements.to_csv("FOMC_Statements.csv", index=False)
``` |
{
"source": "jordanxzz/NYC_Taxi_Driver_DQN",
"score": 2
} |
#### File: jordanxzz/NYC_Taxi_Driver_DQN/deepqmodel.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
class DeepQNetwork(nn.Module):
def __init__(self, ALPHA):
# ALPHA is the learning rate of the deep q network
super(DeepQNetwork, self).__init__()
self.fc1 = nn.Linear(8, 32)
self.fc2 = nn.Linear(32, 64)
self.fc3 = nn.Linear(64, 16)
self.fc4 = nn.Linear(16, 3)
self.activate = nn.ReLU()
#self.optimizer = optim.SGD(self.parameters(), lr=self.ALPHA, momentum=0.9)
self.optimizer = optim.RMSprop(self.parameters(), lr=ALPHA)
self.loss = nn.SmoothL1Loss() #Huber loss
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.to(self.device)
def forward(self, observation):
actions = torch.DoubleTensor(observation).to(self.device)
actions = self.activate(self.fc1(actions))
actions = self.activate(self.fc2(actions))
actions = self.activate(self.fc3(actions))
actions = self.fc4(actions)
return actions
```
#### File: jordanxzz/NYC_Taxi_Driver_DQN/environment.py
```python
import pandas as pd
import numpy as np
import torch
import multiprocessing
import datahandler as dh
class NYCTaxiEnv(object):
def __init__(self, birthpoint=(165,1356), time=0, day=0, month=3,\
data_months=3, filters={'location':20,'time':900}, \
randomness={'reward':0.03, 'time':0.03, 'distance':0.03},
data_dir='./../input/', multiprocessing=False):
### action space
self._action_space = [0,1,2]
self._action_space_meanings = {0: 'accept the current offer',
1: 'reject the current offer and wait',
2: 'reject the current offer and back'}
### initial state
self._birthpoint = birthpoint ### birthpoint to be returned to
self._location = birthpoint ### location: tuples of binned long&la, default at time sq
self._time = time ### time: seconds of day passed, integer 0 - 86400-1
self._month = month ### month: month of year, 0-11
self._day = day ### day: day of week, 0-6
### ride offer generation params
self._data_month = data_months%12 ### number of months to collect data from
self._data_dir = data_dir ### store the data dir
self._filters = filters ### filters for ride selection, location (in binned int) & time distance (in seconds int)
self._randomness = randomness ### randomness float: add randomness in ride generation as std/total, set as 0 to exclude randomness
self._multiprocessing = multiprocessing
self._data_retrieved = False
def get_action_space(self):
return self._action_space
def get_action_space_meanings(self):
return self._action_space_meanings
# def traffic_at_time(self, time, times_, traffic):
# self._time = time
# times_.append(time)
# traffic.append(len(self.filter_rides()))
def get_status(self):
status = {'location':self._location,
'time':self._time,
'month':self._month,
'day':self._day}
return status
def collect_data(self):
'''
collect the data on request to save memory
'''
if not self._data_retrieved:
month = self._month
data_months = self._data_month
data_dir = self._data_dir
incremental_list = [0,1,-1,2,-2,3,-3,4,-4,5,-5,6]
month_list = [(month + i)%12 for i in incremental_list[0:data_months]]
dfs = {i: dh.read_df(data_dir+'/taxi_'+str(i)+'.csv') for i in month_list} ### need amendment
self._dfs = dfs
self._data_retrieved = True
else:
pass
def get_traffic_at_times(self):
'''
get the traffic (num of rides) at the current location throughout the day
'''
location = self._location
times = [i * 900 for i in range(96)]
traffic = []
times_ = []
def do(time):
self._time = time
times_.append(time)
traffic.append(len(self.filter_rides()))
if self._multiprocessing:
### multi process
p = multiprocessing.Pool(8)
p.map(do, times)
else:
### single process
for time in times:
do(time)
return pd.DataFrame({'time':times_, location:traffic}).sort_values(by=['time']).set_index('time')
def get_traffic_at_locations(self):
'''
get the traffic (num of rides) at the current time throughout the city
'''
time = self._time
locations = np.concatenate([df.origin.unique() for m,df in self._dfs.items()])
traffic = []
locations_ = []
def do(location):
self._location = location
locations_.append(location)
traffic.append(len(self.filter_rides()))
if self._multiprocessing:
### multi process
p = multiprocessing.Pool(8)
p.map(do, locations)
else:
### single process
for time in locations:
do(time)
return pd.DataFrame({'location':locations_, time:traffic}).sort_values(by=['location']).set_index('location')
def reset(self):
'''
call this to initialize the episode, and collect the data
i.e. randomize the date, set timer to zero, location to birthpoint
need to call this before any action
'''
self._location = self._birthpoint ### reset location back to the birthpoint
self._time = 0 ### reset time
self.collect_data() ### collect data
self.generate_rides()
return self.observe()
def filter_rides(self):
'''
filter all rides from the data (dfs) according to the filter
call this only after collecting the data
need to handle empty situation?? or be aware of that
'''
dfs = self._dfs
location = self._location
time = self._time
day = self._day
filters = self._filters
f_location = lambda x: abs(x[0]-location[0]) + abs(x[1]-location[1]) <= filters['location']
f_time = lambda x: np.logical_or((x - time).abs() <= filters['time'], (x - time).abs() >= 86400 - filters['time'])
f_day = lambda x: x == day
f = lambda df: df[np.logical_and.reduce((f_time(df.time), np.array([f_location(x) for x in df.origin],dtype=bool), f_day(df.day)))]
filtered_rides = pd.concat((f(df) for m,df in dfs.items()))
return filtered_rides
def generate_rides(self):
'''
generate 3 rides corresponds to three action and store them
ride {} keys: 'reward','trip_distance','trip_time_in_secs','passenger_count','destination'
not return anything but store the rides in self._rides
'''
### get params
randomness = self._randomness
filters = self._filters
### generate ride0: random ride from data
filtered_rides = self.filter_rides()
traffic = len(filtered_rides)
# ride_probability = traffic / (traffic + 1000)
ride_probability = min(1,traffic/(filters['time']/10 * filters['location']**2/1000))
rand = np.random.random()
if rand <= ride_probability and traffic != 0:
rands = np.random.normal(size=3)
sampled_ride = filtered_rides.sample()
# print(sampled_ride)
ride0 = {'reward':abs(sampled_ride.reward.iloc[0] * (1 + randomness['reward']*rands[0])),
'trip_distance':abs(sampled_ride.trip_distance.iloc[0] * (1 + randomness['distance']*rands[1])),
'trip_time_in_secs':int(abs(sampled_ride.trip_time_in_secs.iloc[0] * (1 + randomness['time']*rands[2]))),
'passenger_count':sampled_ride.passenger_count.iloc[0],
'destination':sampled_ride.destination.iloc[0]}
else:
ride0 = {'reward':0,
'trip_distance':0,
'trip_time_in_secs':60,
'passenger_count':1,
'destination':self._location}
### generate ride1: wait for another ride
ride1 = {'reward':0,
'trip_distance':0,
'trip_time_in_secs':60,
'passenger_count':1,
'destination':self._location}
### generate ride2: back to the birthpoint
rands = np.random.normal(size=2)
birthpoint = self._birthpoint
location = self._location
manhatten_distance = sum(abs(a-b) for a,b in zip(birthpoint, location))
trip_distance = 1.3 + 0.0041 * manhatten_distance ### from simple regression
trip_time_in_secs = 580 + 0.47 * manhatten_distance
ride2 = {'reward':0,
'trip_distance':abs(trip_distance * (1 + randomness['distance']*rands[0])),
'trip_time_in_secs':60 + int(abs(trip_time_in_secs * (1 + randomness['time']*rands[1]))),
'passenger_count':1,
'destination':birthpoint}
### store the rides
rides = {0:ride0, 1:ride1, 2:ride2}
self._rides = rides
def step(self, action):
'''
make a step and get the reward
update status, return new observation, reward, done
'''
ride = self._rides[action]
t_start = self._time
trip_time_in_secs = ride['trip_time_in_secs']
self._time = t_start + trip_time_in_secs
self._location = ride['destination']
reward = ride['reward']
done = self._time >= 86400
if done:
reward = reward * (86400 - t_start) / trip_time_in_secs
else:
self.generate_rides() ### need no more ride if done
observation = self.observe()
return observation, reward, done
def observe(self):
'''
get the current state of the agent and the ride offers
observation dict
'''
observation = {'status':self.get_status(),
'rides':self._rides}
return tensorify_obs(observation)
def tensorify_obs(obs):
'''
transform the observation input into tensors
IMPORTANT: omit the unobservable items such as trip time
Note that some features not needed: info of r1, r2
'''
s,r0,r1,r2 = obs['status'],obs['rides'][0],obs['rides'][1],obs['rides'][2]
x = [s['location'][0], s['location'][1], s['time'], #s['month'], s['day'],
r0['trip_distance'],r0['passenger_count'],r0['destination'][0],r0['destination'][1],
# r1['trip_distance'],r1['passenger_count'],r1['destination'][0],r1['destination'][1],
# r2['trip_distance'],r2['passenger_count'],r2['destination'][0],r2['destination'][1]
r2['trip_distance']]
return torch.Tensor(x).double()
if __name__ == '__main__':
driver = NYCTaxiEnv()
observation = driver.reset()
df = driver.filter_rides()
print(len(df))
df.to_csv('filtered.csv')
``` |
{
"source": "jordanyaker/drunken-data-quality",
"score": 3
} |
#### File: python/pyddq/exceptions.py
```python
class JavaClassNotFoundException(Exception):
"""
Raise if required Java class is not found by py4j
"""
def __init__(self, java_class):
Exception.__init__(self)
self.java_class = java_class
def __str__(self):
return "%s. Did you forget to add the jar to the class path?" % (
self.java_class
)
def __repr__(self):
return "%s: %s" % (self.__class__.__name__, self.java_class)
```
#### File: python/pyddq/streams.py
```python
class OutputStream(object):
_jvm = None
_jvm_obj = None
@property
def jvm_obj(self):
raise NotImplementedError
@property
def jvm(self):
if not self._jvm:
raise AttributeError("jvm is not yet set!")
else:
return self._jvm
@jvm.setter
def jvm(self, value):
"""
Sets JVMView used for creating java.io.OutputStream.
Cannot be changed after the assignment
Args:
value (jvm y4j.java_gateway.JVMView)
"""
if self._jvm:
raise AttributeError("jvm is already set!")
else:
self._jvm = value
class FileOutputStream(OutputStream):
"""
A wrapper around java.io.FileOutputStream
Args:
descriptor (file): open file descriptor to write the output.
Supports sys.stdout and sys.stderr
"""
def __init__(self, descriptor):
if not isinstance(descriptor, file):
raise ValueError("Descriptor is not a file")
elif descriptor.closed:
raise ValueError("Descriptor is closed")
elif descriptor.mode == "r":
raise ValueError("Descriptor is opened for reading")
self.descriptor = descriptor
@property
def jvm_obj(self):
"""
Returns underlying instance of java.io.FileOutputStream.
Requires jvm attribute to be set
"""
if not self._jvm_obj:
stds = {
"<stdout>": self.jvm.System.out,
"<stderr>": self.jvm.System.err
}
if self.descriptor.name in stds:
self._jvm_obj = stds[self.descriptor.name]
else:
self._jvm_obj = self.jvm.java.io.FileOutputStream(
self.descriptor.name,
"a" in self.descriptor.mode
)
return self._jvm_obj
class ByteArrayOutputStream(OutputStream):
"""
A wrapper around java.io.ByteArrayOutputStream
"""
@property
def jvm_obj(self):
"""
Returns underlying instance of java.io.ByteArrayOutputStream.
Requires jvm attribute to be set
"""
if not self._jvm_obj:
self._jvm_obj = self.jvm.java.io.ByteArrayOutputStream()
return self._jvm_obj
def get_output(self):
return self.jvm_obj.toString().strip()
class PrintStream(object):
"""
A wrapper around java.io.PrintStream
"""
def __init__(self, jvm, output_stream):
self.output_stream = output_stream
self.output_stream.jvm = jvm
self.jvm_obj = jvm.java.io.PrintStream(output_stream.jvm_obj)
```
#### File: tests/integration/test_check.py
```python
import unittest
from uuid import UUID, uuid4
from pyspark import SparkContext, StorageLevel
from pyspark.sql import SQLContext
from pyddq.core import Check
class ConstructorTest(unittest.TestCase):
def setUp(self):
self.sc = SparkContext()
self.sql = SQLContext(self.sc)
self.df = self.sql.createDataFrame([(1, "a"), (1, None), (3, "c")])
def test_default_args(self):
check = Check(self.df)
self.assertEqual(check.name, "DataFrame[_1: bigint, _2: string]")
self.assertEqual(check.cacheMethod, None)
try:
UUID(check.id, version=4)
except ValueError:
raise self.fail("id is not a correct uuid4")
self.assertEqual(
check.jvmCheck.getClass().toString(),
"class de.frosner.ddq.core.Check"
)
def test_passed_args(self):
display_name = "display name"
id = "id"
cache_method = StorageLevel.DISK_ONLY
check = Check(self.df, display_name, cache_method, id)
# check wrapper
self.assertEqual(check.name, display_name)
self.assertEqual(check.id, id)
self.assertEqual(check.cacheMethod, cache_method)
# check jvm check
self.assertEqual(
check.jvmCheck.getClass().toString(),
"class de.frosner.ddq.core.Check"
)
self.assertEqual(check.jvmCheck.name(), check.name)
self.assertEqual(check.jvmCheck.id(), check.id)
jvm_cache_method = check.jvmCheck.cacheMethod().get()
self.assertEqual(
jvm_cache_method.useDisk(),
check.cacheMethod.useDisk
)
self.assertEqual(
jvm_cache_method.useMemory(),
check.cacheMethod.useMemory
)
self.assertEqual(
jvm_cache_method.useOffHeap(),
check.cacheMethod.useOffHeap
)
self.assertEqual(
jvm_cache_method.deserialized(),
check.cacheMethod.deserialized
)
self.assertEqual(
jvm_cache_method.replication(),
check.cacheMethod.replication
)
def tearDown(self):
self.sc.stop()
if __name__ == '__main__':
unittest.main()
```
#### File: tests/integration/test_reporters.py
```python
import unittest
from mock import Mock, patch
from pyspark import SparkContext, StorageLevel
from pyspark.sql import SQLContext
from pyddq.core import Check
from pyddq.reporters import ConsoleReporter, MarkdownReporter, ZeppelinReporter, EmailReporter
from pyddq.streams import ByteArrayOutputStream
class ConsoleReporterTest(unittest.TestCase):
def setUp(self):
self.sc = SparkContext()
self.sql = SQLContext(self.sc)
self.df = self.sql.createDataFrame([(1, "a"), (1, None), (3, "c")])
def tearDown(self):
self.sc.stop()
def test_output(self):
check = Check(self.df).hasUniqueKey("_1").hasUniqueKey("_1", "_2")
baos = ByteArrayOutputStream()
reporter = ConsoleReporter(baos)
check.run([reporter])
expected_output = """
\x1b[34mChecking [_1: bigint, _2: string]\x1b[0m
\x1b[34mIt has a total number of 2 columns and 3 rows.\x1b[0m
\x1b[31m- Column _1 is not a key (1 non-unique tuple).\x1b[0m
\x1b[32m- Columns _1, _2 are a key.\x1b[0m
""".strip()
self.assertEqual(baos.get_output(), expected_output)
class MarkdownReporterTest(unittest.TestCase):
def setUp(self):
self.sc = SparkContext()
self.sql = SQLContext(self.sc)
self.df = self.sql.createDataFrame([(1, "a"), (1, None), (3, "c")])
def tearDown(self):
self.sc.stop()
def test_output(self):
check = Check(self.df).hasUniqueKey("_1").hasUniqueKey("_1", "_2")
baos = ByteArrayOutputStream()
reporter = MarkdownReporter(baos)
check.run([reporter])
expected_output = """
**Checking [_1: bigint, _2: string]**
It has a total number of 2 columns and 3 rows.
- *FAILURE*: Column _1 is not a key (1 non-unique tuple).
- *SUCCESS*: Columns _1, _2 are a key.
""".strip()
self.assertEqual(baos.get_output(), expected_output)
class ZeppelinReporterTest(unittest.TestCase):
def setUp(self):
self.sc = SparkContext()
self.sql = SQLContext(self.sc)
self.df = self.sql.createDataFrame([(1, "a"), (1, None), (3, "c")])
def tearDown(self):
self.sc.stop()
def test_output(self):
with patch("pyddq.reporters.get_field") as get_field:
baos = ByteArrayOutputStream()
baos.jvm = self.df._sc._jvm
get_field.return_value = baos.jvm_obj
check = Check(self.df).hasUniqueKey("_1").hasUniqueKey("_1", "_2")
z = Mock()
reporter = ZeppelinReporter(z)
check.run([reporter])
expected_output = """
%html
</p>
<h4>Checking [_1: bigint, _2: string]</h4>
<h5>It has a total number of 2 columns and 3 rows.</h5>
<table>
<tr><td style="padding:3px">❌</td><td style="padding:3px">Column _1 is not a key (1 non-unique tuple).</td></tr>
<tr><td style="padding:3px">✅</td><td style="padding:3px">Columns _1, _2 are a key.</td></tr>
</table>
<p hidden>
""".strip()
self.assertEqual(baos.get_output(), expected_output)
class EmailReporterTest(unittest.TestCase):
def setUp(self):
self.sc = SparkContext()
self.sql = SQLContext(self.sc)
self.df = self.sql.createDataFrame([(1, "a"), (1, None), (3, "c")])
def tearDown(self):
self.sc.stop()
def test_default_arguments(self):
check = Check(self.df).hasUniqueKey("_1").hasUniqueKey("_1", "_2")
reporter = EmailReporter("<EMAIL>", {"<EMAIL>"})
check.run([reporter])
def test_passed_arguments(self):
check = Check(self.df).hasUniqueKey("_1").hasUniqueKey("_1", "_2")
smtpServer = "<EMAIL>"
to = {"<EMAIL>"}
cc = {"<EMAIL>"}
subjectPrefix = "my subject prefix: "
smtpPort = 9000
from_ = "test.ddq.io"
usernameAndPassword = ("username", "password")
reportOnlyOnFailure = True
accumulatedReport = True
reporter = EmailReporter(
smtpServer, to, cc, subjectPrefix, smtpPort, from_,
usernameAndPassword, reportOnlyOnFailure, accumulatedReport
)
check.run([reporter])
def test_accumulated_report(self):
check = Check(self.df).hasUniqueKey("_1").hasUniqueKey("_1", "_2")
reporter = EmailReporter("<EMAIL>", {"<EMAIL>"}, accumulatedReport=True)
check.run([reporter])
reporter.sendAccumulatedReport()
reporter.sendAccumulatedReport("111")
if __name__ == '__main__':
unittest.main()
```
#### File: tests/unit/test_streams.py
```python
import sys
import unittest
from mock import Mock
from pyddq.streams import FileOutputStream, ByteArrayOutputStream, OutputStream
class OutputStreamTest(unittest.TestCase):
def test_jvm_obj(self):
# check that AttributeError is raised
# when jvm is set more than once for the same instance
stream = OutputStream()
with self.assertRaises(AttributeError):
stream.jvm = 1
stream.jvm = 2
class FileOutputStreamTest(unittest.TestCase):
def test_constructor(self):
self.assertRaisesRegexp(ValueError, "Descriptor is not a file",
FileOutputStream, "not a file")
descriptor = Mock(spec=file, closed=True)
self.assertRaisesRegexp(ValueError, "Descriptor is closed",
FileOutputStream, descriptor)
descriptor = Mock(spec=file, closed=False, mode="r")
self.assertRaisesRegexp(ValueError, "Descriptor is opened for reading",
FileOutputStream, descriptor)
descriptor = Mock(spec=file, closed=False, mode="w")
stream = FileOutputStream(descriptor)
self.assertEqual(stream.descriptor, descriptor)
def test_jvm_obj(self):
jvm = Mock()
stdout = Mock(spec=file, mode="w", closed=False)
stdout.name="<stdout>"
fos = FileOutputStream(stdout)
# check that AttributeError is raised
# when jvm_obj is accessed before jvm is set
with self.assertRaises(AttributeError):
jvm_obj = fos.jvm_obj
# check that stdout mapping works fine
fos.jvm = jvm
jvm_obj = fos.jvm_obj
self.assertEqual(jvm_obj, jvm.System.out)
# check that file descriptor is converted to FileOutputStream
descriptor = Mock(spec=file, mode="w", closed=False)
jvmFileOutputStream = Mock()
jvm.java.io.FileOutputStream = jvmFileOutputStream
fos = FileOutputStream(descriptor)
fos.jvm = jvm
jvm_obj = fos.jvm_obj
self.assertEqual(jvm_obj, jvmFileOutputStream())
# check that on the second call FileOutputStream returns the same jvm_obj
jvm.java.io.FileOutputStream = Mock(
sides_effects=[1, 2]
)
fos = FileOutputStream(descriptor)
fos.jvm = jvm
jvm_obj1 = fos.jvm_obj
jvm_obj2 = fos.jvm_obj
self.assertEqual(jvm_obj1, jvm_obj2)
class ByteArrayOutputStreamTest(unittest.TestCase):
def test_jvm_obj(self):
jvm = Mock()
baos = ByteArrayOutputStream()
with self.assertRaises(AttributeError):
jvm_obj = baos.jvm_obj
# check that on the second call ByteArrayOutputStream returns the same jvm_obj
jvm.java.io.ByteArrayOutputStream = Mock(
side_effects=[1, 2]
)
baos.jvm = jvm
jvm_obj1 = baos.jvm_obj
jvm_obj2 = baos.jvm_obj
self.assertEqual(jvm_obj1, jvm_obj2)
def test_get_output(self):
jvm = Mock()
baos = ByteArrayOutputStream()
baos.jvm = jvm
baos.get_output()
baos.jvm_obj.toString().strip.assert_called()
``` |
{
"source": "JordanYeomans/DeepLearning",
"score": 2
} |
#### File: DeepLearning/Keras_Callbacks/Keras_Callbacks.py
```python
from keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger, EarlyStopping
# Model Save Path
def standard_callbacks(model_filepath = './Saved_models/latest_new_model.hdf5', patience = 10):
#checkpoint = save_best(model_filepath)
tensorboard = start_tensorboard()
earlystopping = early_stopping(patience)
return [tensorboard, earlystopping] #[checkpoint, tensorboard, earlystopping]
def save_best(model_filepath):
return ModelCheckpoint(filepath = model_filepath, verbose = 1, save_best_only = True)
def start_tensorboard():
# Command Line: tensorboard --logdir ./Graph
return TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True)
def early_stopping(patience = 10):
return EarlyStopping(monitor='val_loss', min_delta=0, patience=patience, verbose=0, mode='auto')
```
#### File: DeepLearning/Keras_Models/TimeSeries.py
```python
from keras.models import Sequential, load_model
from keras.layers import Dense, LSTM, Dropout, Conv1D, MaxPooling1D, BatchNormalization, Flatten, Reshape
from keras.optimizers import Adam
import keras.backend as K
def TimeSeries_Conv_LSTM_Dense_0001_a(input_data, output_data, batch_size, lr):
model = Sequential()
## Convolutional Network
model.add(Conv1D(filters=64, kernel_size=5, padding='same', activation='relu',
batch_input_shape=(batch_size, input_data.shape[1], input_data.shape[2]),
kernel_initializer='TruncatedNormal'))
model.add(Conv1D(filters=128, kernel_size=5, padding='same', activation='relu', kernel_initializer='TruncatedNormal'))
model.add(MaxPooling1D(2))
model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu', kernel_initializer='TruncatedNormal'))
model.add(MaxPooling1D(2))
model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu', kernel_initializer='TruncatedNormal'))
model.add(MaxPooling1D(2))
model.add(Conv1D(filters=256, kernel_size=5, padding='same', activation='relu', kernel_initializer='TruncatedNormal'))
model.add(MaxPooling1D(2))
model.add(Conv1D(filters=256, kernel_size=7, padding='valid', activation='relu', kernel_initializer='TruncatedNormal'))
model.add(MaxPooling1D(2))
model.add(LSTM(256, return_sequences=True, kernel_initializer='TruncatedNormal'))
model.add(LSTM(256, return_sequences=True, kernel_initializer='TruncatedNormal'))
model.add(LSTM(256))
# Multi Layer Perceptron Network
model.add(Dense(4096, activation='relu', kernel_initializer='TruncatedNormal'))
model.add(Dense(4096, activation='relu', kernel_initializer='TruncatedNormal'))
model.add(Dense(4096, activation='relu', kernel_initializer='TruncatedNormal'))
model.add(Dense(output_data.shape[1]))
# Define Optimiser
optimizer = Adam(lr)
# Compile Model
model.compile(loss='mean_squared_error', optimizer=optimizer)
return model
```
#### File: DeepLearning/Reinforcement_Learning/Agent.py
```python
import numpy as np
import time
import gym
import os
import shutil
import Logging as Logging
import NeuralNetwork as NeuralNetwork
import ReplayMemory
import RLFunctions
class Agent:
"""
This implements the function for running the game-environment with
an agent that uses Reinforcement Learning. This class also creates
instances of the Replay Memory and Neural Network.
"""
def __init__(self, env_name, Monty, CommandCenter, render=False, use_logging=True, verbose=False):
"""
Create an object-instance. This also creates a new object for the
Replay Memory and the Neural Network.
Replay Memory will only be allocated if training==True.
:param env_name:
Name of the game-environment in OpenAI Gym.
Examples: 'Breakout-v0' and 'SpaceInvaders-v0'
:param training:
Boolean whether to train the agent and Neural Network (True),
or test the agent by playing a number of episodes of the game (False).
:param render:
Boolean whether to render the game-images to screen during testing.
:param use_logging:
Boolean whether to use logging to text-files during training.
"""
self.replay_size = 5000
# Whether this bot is acting as a worker -> saving data rather than processing
self.worker = CommandCenter.worker
self.trainer = CommandCenter.trainer
self.env_name = env_name
# Create the game-environment using OpenAI Gym.
self.env = gym.make(self.env_name)
# Get Checkpoint Directory
# self.checkpoint_dir = CommandCenter.model_path
if self.worker:
checkpoint_base_dir = CommandCenter.data_path
elif self.trainer:
checkpoint_base_dir = CommandCenter.model_path
else:
checkpoint_base_dir = None
Logging.update_paths(env_name=env_name, checkpoint_base_dir=checkpoint_base_dir)
# The number of possible actions that the agent may take in every step.
self.num_actions = self.env.action_space.n
# Whether to render each image-frame of the game-environment to screen.
self.render = render
# Whether to use logging during training.
self.use_logging = use_logging
self.verbose = verbose
if self.use_logging and self.worker:
# Used for logging Q-values and rewards during training.
self.log_q_values = Logging.LogQValues()
self.log_reward = Logging.LogReward()
else:
self.log_q_values = None
self.log_reward = None
# List of string-names for the actions in the game-environment.
self.action_names = self.env.unwrapped.get_action_meanings()
self.epsilon_greedy = RLFunctions.EpsilonGreedy(start_value=1.0,
end_value=0.1,
num_iterations=1e6,
num_actions=self.num_actions,
epsilon_testing=0.01)
self.replay_fraction = 1.0
self.learning_rate = 1e-5
self.loss_limit = 0.01
self.max_epochs = 5.0
# We only create the replay-memory when we are training the agent,
# because it requires a lot of RAM. The image-frames from the
# game-environment are resized to 105 x 80 pixels gray-scale,
# and each state has 2 channels (one for the recent image-frame
# of the game-environment, and one for the motion-trace).
# Each pixel is 1 byte, so this replay-memory needs more than
# 3 GB RAM (105 x 80 x 2 x 200000 bytes).
self.replay_memory = ReplayMemory.ReplayMemory(size=self.replay_size, state_shape=Monty.get_nn_state_shape(), num_actions=self.num_actions)
# Create the Neural Network used for estimating Q-values.
self.model = NeuralNetwork.NeuralNetwork(num_actions=self.num_actions,
state_shape=Monty.get_nn_state_shape(),
checkpoint_dir=CommandCenter.model_path,
worker=self.worker)
# Log of the rewards obtained in each episode during calls to run()
self.episode_rewards = []
self.agent_model_version = CommandCenter.model_version
def reset_episode_rewards(self):
"""Reset the log of episode-rewards."""
self.episode_rewards = []
def get_action_name(self, action):
"""Return the name of an action."""
return self.action_names[action]
def get_lives(self):
"""Get the number of lives the agent has in the game-environment."""
return self.env.unwrapped.ale.lives()
def check_end_of_life(self, num_lives, end_episode):
# Determine if a life was lost in this step.
num_lives_new = self.get_lives()
end_life = (num_lives_new < num_lives)
if end_life:
end_episode = True
return end_life, end_episode
def run_worker(self, Monty, CommandCenter):
"""
Run the game-environment and use the Neural Network to decide
which actions to take in each step through Q-value estimates.
:param num_episodes:
Number of episodes to process in the game-environment.
If None then continue forever. This is useful during training
where you might want to stop the training using Ctrl-C instead.
"""
# This will cause a reset in the first iteration of the following loop.
end_episode = True
reward_episode = 0.0
num_lives = 0
# Counter for the number of states we have processed.
# This is stored in the TensorFlow graph so it can be
# saved and reloaded along with the checkpoint.
count_states = self.model.get_count_states()
# Counter for the number of episodes we have processed.
count_episodes = self.model.get_count_episodes()
sess_episodes = 0
try:
training_array = np.load(CommandCenter.data_path + 'TrainingArray.npy')
epsilon_array = np.load(CommandCenter.data_path + 'EpsilonArray.npy')
except:
training_array, epsilon_array = Monty.create_training_array()
Monty.send_training_array(training_array, epsilon_array)
while True:
if end_episode:
# Reset the game-environment and get the first image-frame.
img = self.env.reset()
# Reset the reward for the entire episode to zero.
# This is only used for printing statistics.
reward_episode = 0.0
# Increase the counter for the number of episodes.
# This counter is stored inside the TensorFlow graph
# so it can be saved and restored with the checkpoint.
count_episodes = self.model.increase_count_episodes()
# Get the number of lives that the agent has left in this episode.
num_lives = self.get_lives()
# Keep a record of current training array
training_array, epsilon_array = Monty.get_training_array()
# Reset Monty Agent (This deletes the training array)
Monty.reset(img, sess_episodes)
# Send training array to monty
Monty.send_training_array(training_array, epsilon_array)
sess_episodes += 1
# Get the state of the game-environment from the motion-tracer.
# The state has two images: (1) The last image-frame from the game
# and (2) a motion-trace that shows movement trajectories.
state = Monty.get_nn_state()
if self.verbose:
Monty.view_states()
# Use the Neural Network to estimate the Q-values for the state.
# Note that the function assumes an array of states and returns
# a 2-dim array of Q-values, but we just have a single state here.
q_values = self.model.get_q_values(states=[state])[0]
if self.render:
epsilon = 0.03
else:
epsilon = Monty.get_epsilon()
# Determine the action that the agent must take in the game-environment.
# The epsilon is just used for printing further below.
action, epsilon = self.epsilon_greedy.get_action(q_values=q_values,
iteration=count_states,
training=self.worker,
epsilon_override=epsilon)
# Take a step in the game-environment using the given action.
img, _, end_episode, info = self.env.step(action=action)
# Check if end of game
episode_failed, end_episode = self.check_end_of_life(num_lives=num_lives, end_episode=end_episode)
# Update Monty
Monty.update(img, episode_failed)
# Update Reward
reward_episode += Monty.get_reward()
if Monty.end_due_to_reward:
end_episode = True
if Monty.end_due_to_time:
print('Ended Due To Time')
end_episode = True
# Increase the counter for the number of states that have been processed.
count_states = self.model.increase_count_states()
# If we want to render the game
if self.render:
self.env.render()
time.sleep(0.005)
# Add the state of the game-environment to the replay-memory.
self.replay_memory.add(state=state,
q_values=q_values,
action=action,
reward=Monty.reward,
end_life=episode_failed,
end_episode=end_episode)
# When the replay-memory is sufficiently full.
if self.replay_memory.is_full():
# Update all Q-values in the replay-memory through a backwards-sweep.
self.replay_memory.update_all_q_values()
# Log statistics for the Q-values to file.
if self.use_logging:
self.log_q_values.write(count_episodes=count_episodes,
count_states=count_states,
q_values=self.replay_memory.q_values)
# Get the control parameters for optimization of the Neural Network.
# These are changed linearly depending on the state-counter.
# learning_rate = self.learning_rate_control.get_value(iteration=count_states)
# loss_limit = self.loss_limit_control.get_value(iteration=count_states)
# max_epochs = self.max_epochs_control.get_value(iteration=count_states)
# Save Training and Epsilon Arrays
np.save(CommandCenter.data_path + 'TrainingArray.npy', training_array)
np.save(CommandCenter.data_path + 'EpsilonArray.npy', epsilon_array)
# Save Replay Memory
data_save_folder = CommandCenter.data_path + 'data_' +str(time.time())[:10] + '/'
if not os.path.exists(data_save_folder):
os.makedirs(data_save_folder)
try:
self.replay_memory.save_numpy_arrays(data_save_folder)
except FileNotFoundError:
pass
self.replay_memory.reset()
CommandCenter.update_worker_paths()
if CommandCenter.model_version != self.agent_model_version:
loaded = False
while loaded is False:
Logging.update_paths(env_name=self.env_name, checkpoint_base_dir=CommandCenter.data_path)
self.model.update_checkpoint_dir(CommandCenter.model_path)
loaded = self.model.load_checkpoint(reset_if_error=False)
if loaded is True:
self.agent_model_version = CommandCenter.model_version
else:
print('Tried to load model. Trying again.')
CommandCenter.update_worker_paths()
time.sleep(5)
if end_episode:
# Add the episode's reward to a list for calculating statistics.
self.episode_rewards.append(reward_episode)
# Mean reward of the last 30 episodes.
if len(self.episode_rewards) == 0:
# The list of rewards is empty.
reward_mean = 0.0
else:
reward_mean = np.mean(self.episode_rewards[-30:])
if end_episode:
# Log reward to file.
if self.use_logging:
self.log_reward.write(count_episodes=count_episodes,
count_states=count_states,
reward_episode=reward_episode,
reward_mean=reward_mean)
# Print reward to screen.
msg = "{0:4}:{1}\t Epsilon: {2:4.2f}\t Reward: {3:.1f}\t Episode Mean: {4:.1f}"
print(msg.format(count_episodes, count_states, epsilon,
reward_episode, reward_mean))
# elif (Monty.reward != 0.0 or episode_failed or end_episode):
# # Print Q-values and reward to screen.
# msg = "{0:4}:{1}\tQ-min: {2:5.3f}\tQ-max: {3:5.3f}\tSess_Eps: {4}\tReward: {5:.1f}\tEpisode Mean: {6:.1f}"
# print(msg.format(count_episodes, count_states, np.min(q_values),
# np.max(q_values), sess_episodes, reward_episode, reward_mean))
def run_trainer(self, CommandCenter):
self.replay_memory.num_used = self.replay_size
while True:
CommandCenter.trainer_find_data_filepaths()
# Check that all_valid_data_paths is not empty
if CommandCenter.all_valid_data_paths:
increment = CommandCenter.calc_increment_trainer()
if increment:
CommandCenter.delete_old_data_path()
print('Total Folders Deleted = {}'.format(CommandCenter.deleted_folders))
# Save a checkpoint of the Neural Network so we can reload it.
self.model.update_next_version_checkpoint_dir(CommandCenter.model_path)
self.model.save_checkpoint()
for data_path in CommandCenter.all_valid_data_paths:
print('Using Data from '.format(data_path))
try:
self.replay_memory.load_numpy_arrays(data_path=data_path)
# Perform an optimization run on the Neural Network so as to
# improve the estimates for the Q-values.
# This will sample random batches from the replay-memory.
self.model.optimize(replay_memory=self.replay_memory,
learning_rate=self.learning_rate,
loss_limit=self.loss_limit,
max_epochs=self.max_epochs)
shutil.rmtree(data_path)
except:
try:
shutil.rmtree(data_path)
except:
pass
else:
print('Waiting for Data for Version {}'.format(CommandCenter.model_version))
time.sleep(5)
```
#### File: DeepLearning/Reinforcement_Learning/Logging.py
```python
import numpy as np
import sys
import os
import csv
# Default base-directory for the checkpoints and log-files.
# The environment-name will be appended to this.
# Combination of base-dir and environment-name.
checkpoint_dir = None
# Full path for the log-file for rewards.
log_reward_path = None
# Full path for the log-file for Q-values.
log_q_values_path = None
def update_paths(env_name, checkpoint_base_dir):
"""
Update the path-names for the checkpoint-dir and log-files.
Call this after you have changed checkpoint_base_dir and
before you create the Neural Network.
:param env_name:
Name of the game-environment you will use in OpenAI Gym.
"""
global checkpoint_dir
global log_reward_path
global log_q_values_path
# Add the environment-name to the checkpoint-dir.
checkpoint_dir = checkpoint_base_dir
# Create the checkpoint-dir if it does not already exist.
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# File-path for the log-file for episode rewards.
log_reward_path = os.path.join(checkpoint_dir, "log_reward.txt")
# File-path for the log-file for Q-values.
log_q_values_path = os.path.join(checkpoint_dir, "log_q_values.txt")
########################################################################
# Classes used for logging data during training.
class Log:
"""
Base-class for logging data to a text-file during training.
It is possible to use TensorFlow / TensorBoard for this,
but it is quite awkward to implement, as it was intended
for logging variables and other aspects of the TensorFlow graph.
We want to log the reward and Q-values which are not in that graph.
"""
def __init__(self, file_path):
"""Set the path for the log-file. Nothing is saved or loaded yet."""
# Path for the log-file.
self.file_path = file_path
# Data to be read from the log-file by the _read() function.
self.count_episodes = None
self.count_states = None
self.data = None
def _write(self, count_episodes, count_states, msg):
"""
Write a line to the log-file. This is only called by sub-classes.
:param count_episodes:
Counter for the number of episodes processed during training.
:param count_states:
Counter for the number of states processed during training.
:param msg:
Message to write in the log.
"""
with open(file=self.file_path, mode='a', buffering=1) as file:
msg_annotated = "{0}\t{1}\t{2}\n".format(count_episodes, count_states, msg)
file.write(msg_annotated)
def _read(self):
"""
Read the log-file into memory so it can be plotted.
It sets self.count_episodes, self.count_states and self.data
"""
# Open and read the log-file.
with open(self.file_path) as f:
reader = csv.reader(f, delimiter="\t")
self.count_episodes, self.count_states, *data = zip(*reader)
# Convert the remaining log-data to a NumPy float-array.
self.data = np.array(data, dtype='float')
class LogReward(Log):
"""Log the rewards obtained for episodes during training."""
def __init__(self):
# These will be set in read() below.
self.episode = None
self.mean = None
# Super-class init.
Log.__init__(self, file_path=log_reward_path)
def write(self, count_episodes, count_states, reward_episode, reward_mean):
"""
Write the episode and mean reward to file.
:param count_episodes:
Counter for the number of episodes processed during training.
:param count_states:
Counter for the number of states processed during training.
:param reward_episode:
Reward for one episode.
:param reward_mean:
Mean reward for the last e.g. 30 episodes.
"""
msg = "{0:.1f}\t{1:.1f}".format(reward_episode, reward_mean)
self._write(count_episodes=count_episodes, count_states=count_states, msg=msg)
def read(self):
"""
Read the log-file into memory so it can be plotted.
It sets self.count_episodes, self.count_states, self.episode and self.mean
"""
# Read the log-file using the super-class.
self._read()
# Get the episode reward.
self.episode = self.data[0]
# Get the mean reward.
self.mean = self.data[1]
class LogQValues(Log):
"""Log the Q-Values during training."""
def __init__(self):
# These will be set in read() below.
self.min = None
self.mean = None
self.max = None
self.std = None
# Super-class init.
Log.__init__(self, file_path=log_q_values_path)
def write(self, count_episodes, count_states, q_values):
"""
Write basic statistics for the Q-values to file.
:param count_episodes:
Counter for the number of episodes processed during training.
:param count_states:
Counter for the number of states processed during training.
:param q_values:
Numpy array with Q-values from the replay-memory.
"""
msg = "{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}".format(np.min(q_values),
np.mean(q_values),
np.max(q_values),
np.std(q_values))
self._write(count_episodes=count_episodes,
count_states=count_states,
msg=msg)
def read(self):
"""
Read the log-file into memory so it can be plotted.
It sets self.count_episodes, self.count_states, self.min / mean / max / std.
"""
# Read the log-file using the super-class.
self._read()
# Get the logged statistics for the Q-values.
self.min = self.data[0]
self.mean = self.data[1]
self.max = self.data[2]
self.std = self.data[3]
########################################################################
def print_progress(msg):
"""
Print progress on a single line and overwrite the line.
Used during optimization.
"""
sys.stdout.write("\r" + msg)
sys.stdout.flush()
``` |
{
"source": "jordao76/rebalance",
"score": 3
} |
#### File: rebalance/rebalance/googlefinance.py
```python
from datetime import datetime
from decimal import Decimal
import requests
import numpy as np
from rebalance.instrument import *
ONE_DAY_IN_SECONDS = 24 * 60 * 60
class GoogleFinanceClient:
# http://www.networkerror.org/component/content/article/1-technical-wootness/44-googles-undocumented-finance-api.html
def get_prices(self, instrument, exchange='TSE', interval=ONE_DAY_IN_SECONDS):
period = '1Y' # 1 year
fields = 'd,c' # d=datetime, c=closing price
params = {'q':instrument,'x':exchange,'p':period,'i':interval,'f':fields}
price_data = requests.get("https://www.google.com/finance/getprices", params=params)
lines = price_data.text.splitlines()
return self.parse_prices(lines, interval)
def parse_prices(self, lines, interval=ONE_DAY_IN_SECONDS):
dates, prices = [], []
start_epoch = 0
for price in lines:
cols = price.split(",")
curr_epoch = 0
if cols[0][0] == 'a':
curr_epoch = start_epoch = int(cols[0][1:])
elif cols[0][0].isdigit():
curr_epoch = start_epoch + int(cols[0]) * interval
if curr_epoch > 0:
dates.append([datetime.fromtimestamp(curr_epoch).date()])
prices.append([Decimal(cols[1])])
return np.array(dates), np.array(prices)
```
#### File: rebalance/rebalance/instrument.py
```python
from collections import namedtuple
import numpy as np
from datetime import date
from decimal import Decimal
from rebalance.plotting import Plotter
from rebalance.utils import dates_till_target
##############
class Instrument(namedtuple('Instrument', 'symbol, name, exchange')):
price_service = None
__slots__ = ()
def __repr__(self):
return self.symbol
def get_prices(self):
# daily prices for a year
return Instrument.price_service.get_prices(
self.symbol, exchange=self.exchange)
def get_returns(self, investment):
# daily returns for a year with initial investment
# note: dividends NOT accounted for
dates, prices = self.get_prices()
# calculate how many shares are bought on the first day (first price)
# (using fractional shares)
shares = investment / prices[0][0]
returns = prices * shares
return dates, returns
def plot_prices(self, plotter=None):
dates, prices = self.get_prices()
return self.__plot(dates, prices, plotter)
def plot_returns(self, investment, plotter=None):
dates, returns = self.get_returns(investment)
return self.__plot(dates, returns, plotter)
def __plot(self, dates, prices, plotter):
if plotter == None: plotter = Plotter()
plotter.plot_prices(dates, prices, label=self.symbol, title=self.symbol)
return plotter
Instrument.__new__.__defaults__ = ('', 'TSE')
##############
class Cash(Instrument):
__slots__ = ()
def get_prices(self):
# daily prices for a year
dates = dates_till_target(days=365, target=date.today())
prices = np.full((365,1), Decimal(1))
return dates, prices
CASH = Cash('CASH', 'Cash', None)
##############
```
#### File: rebalance/rebalance/portfolio.py
```python
from collections import namedtuple, defaultdict
from decimal import Decimal
from rebalance.instrument import Instrument, CASH
from rebalance.plotting import Plotter
from rebalance.utils import fill_price_gaps
import matplotlib.pyplot as plt
##############
class Action(namedtuple('Action', 'direction, name')):
__slots__ = ()
def __repr__(self): return self.name
# when sorting, SELL comes before BUY
# first sell, then buy
SELL = Action(-1, 'Sell')
BUY = Action(1, 'Buy')
##############
Order = namedtuple('Order', 'action, instrument, amount')
##############
ZERO = Decimal(0)
class Portfolio:
def __init__(self, positions):
self.total = sum(positions.values())
self.positions = positions
self.allocations = self.__calc_allocations()
def __calc_allocations(self):
res = defaultdict(Decimal)
for instrument, value in self.positions.items():
res[instrument] += value / self.total * 100
return res
def is_balanced(self, model_portfolio, threshold=ZERO):
for _, value_offset, target_value in self.__diff(model_portfolio):
# check that the difference to the target_value is beyond the threshold
if abs(value_offset) / target_value > threshold / 100:
return False
return True
def rebalance(self, model_portfolio, threshold=ZERO):
if self.is_balanced(model_portfolio, threshold): return []
orders = []
for instrument, value_offset, _ in self.__diff(model_portfolio):
if instrument != CASH:
action = (BUY if value_offset > 0 else SELL)
orders.append(Order(action, instrument, abs(value_offset)))
return sorted(orders)
def __diff(self, model_portfolio):
target_allocations = self.__resolve_target_allocations(model_portfolio)
res = []
for instrument, target_alloc in target_allocations.items():
curr_alloc = self.allocations[instrument]
alloc_offset = target_alloc - curr_alloc
value_offset = self.total * alloc_offset / 100
if value_offset != 0:
value = self.positions.get(instrument, ZERO)
target_value = value + value_offset
# the instrument is off by value_offset to its target_value
res.append((instrument, value_offset, target_value))
return res
def __resolve_target_allocations(self, model_portfolio):
# so that all relevant instruments are covered,
# start with the model portfolio allocations,
# then add the instruments not present there,
# but present in this portfolio, with a ZERO allocation
res = dict(model_portfolio.allocations)
for instrument in self.positions.keys():
if instrument not in res:
res[instrument] = ZERO
return res
def plot(self, plt=plt):
symbols = list(self.positions.keys())
values = list(self.positions.values())
index = 0
def get_text(pct):
nonlocal values, index
value = values[index]
index += 1
return '${:,.2f} ({:.2f}%)'.format(value, pct)
fig, ax = plt.subplots()
ax.pie(values, labels=symbols, autopct=get_text)
ax.axis('equal')
def get_returns(self):
dates, returns = [], ZERO
for instrument, value in self.positions.items():
if instrument == CASH: continue
curr_dates, prices = fill_price_gaps(*instrument.get_returns(value))
if len(dates) == 0:
dates = curr_dates
else:
assert(len(dates) == len(curr_dates))
assert((dates == curr_dates).all())
returns += prices
return self.__plus_cash(dates, returns)
def __plus_cash(self, dates, returns):
cash_amount = self.positions.get(CASH, ZERO)
if len(dates) == 0:
dates, returns = CASH.get_returns(cash_amount)
else:
returns += cash_amount
return dates, returns
def plot_returns(self, plotter=None):
dates, returns = self.get_returns()
if plotter == None: plotter = Plotter()
plotter.plot_prices(dates, returns, label='Total')
return plotter
##############
``` |
{
"source": "JordaoA/Python",
"score": 3
} |
#### File: Python/sorts/cocktailSort.py
```python
def pureCocktailSort(a):
length = len(a)
semaphore = True
start = 0
end = length-1
while (semaphore == True):
semaphore = False
for i in range (start, end):
if (a[i] > a[i+1]) :
a[i], a[i+1]= a[i+1], a[i]
semaphore=True
if (semaphore == False):
break
semaphore = False
end = end-1
for i in range(end-1, start-1,-1):
if (a[i] > a[i+1]):
a[i], a[i+1] = a[i+1], a[i]
semaphore = True
start = start+1
``` |
{
"source": "jordaycs/rippleproto",
"score": 2
} |
#### File: rippleproto/database/schemas.py
```python
create_TS_S = """CREATE TABLE IF NOT EXISTS '{}' (
id INTEGER PRIMARY KEY,
time TEXT ,
value NUMERIC
)
"""
create_TS_A = """CREATE TABLE IF NOT EXISTS '{}' (
id INTEGER PRIMARY KEY,
time_from TEXT ,
time_to TEXT ,
title TEXT,
text TEXT
);
"""
create_N_N = """CREATE TABLE IF NOT EXISTS {} (
id INTEGER PRIMARY KEY,
node TEXT
);
"""
create_N_E = """CREATE TABLE IF NOT EXISTS {} (
id INTEGER PRIMARY KEY,
source TEXT ,
target TEXT
);
"""
create_N_A = """CREATE TABLE IF NOT EXISTS {} (
id INTEGER PRIMARY KEY,
source TEXT ,
target TEXT ,
title TEXT,
text TEXT
);
"""
create_TN_N = """CREATE TABLE IF NOT EXISTS {} (
id INTEGER PRIMARY KEY,
node TEXT,
time_to TEXT ,
time_from TEXT
);
"""
create_TN_E = """CREATE TABLE IF NOT EXISTS {} (
id INTEGER PRIMARY KEY,
source TEXT ,
target TEXT ,
time_to TEXT ,
time_from TEXT
);
"""
create_TN_A = """CREATE TABLE IF NOT EXISTS {} (
id INTEGER PRIMARY KEY,
source TEXT,
target TEXT,
time_from TEXT,
time_to TEXT,
title TEXT,
text TEXT,
);
"""
# Contains all the annotations
create_G_A = """CREATE TABLE IF NOT EXISTS all_annotations (
id INTEGER PRIMARY KEY,
source TEXT,
target TEXT,
collection TEXT,
series TEXT,
time_from TEXT,
time_to TEXT,
title TEXT,
text TEXT,
property TEXT,
type TEXT
)
"""
def update_G_A(obj):
return """UPDATE all_annotations SET {} = '{}', {} = '{}',{} = '{}',{} = '{}',{} = '{}',{} = '{}',{} = '{}',{} = '{}',{} = '{}',{} = '{}'
WHERE id = {};""".format(
"source", obj.get("source", ""),
"target", obj.get("target", ""),
"collection", obj.get("collection", ""),
"series", obj.get("series", ""),
"time_from", obj.get("time_from", ""),
"time_to", obj.get("time_to", ""),
"title", obj.get("title", ""),
"text", obj.get("text", ""),
"property", obj.get("property", ""),
"type", obj.get("type", ""),
obj["dbId"]
)
def insert_G_A(obj):
return """INSERT INTO all_annotations ('source','target','collection','series','time_from','time_to','title','text','property','type') VALUES ('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}');""".format(
obj.get("source", ""),
obj.get("target", ""),
obj.get("collection", ""),
obj.get("series", ""),
obj.get("time_from", ""),
obj.get("time_to", ""),
obj.get("title", ""),
obj.get("text", ""),
obj.get("property", ""),
obj.get("type", "")
)
``` |
{
"source": "Jordemar-D-Bousquet/Exercicios_Python",
"score": 4
} |
#### File: Jordemar-D-Bousquet/Exercicios_Python/ex105.py
```python
def notas(*n,sit= False):
'''
-> Função para ler várias notas de uma turma de aluno e mostrar os resultados
:param n: Lê varias notas de alunos
:param sit: (opcional) mostra a sitação da média de um aluna(Ruim,Razoável ou Boa)
:return: Quantidade de notas, a maior nota, a menor nota , média e situação(opcional)
'''
d = {}
d['total'] = len(n)
d['maior'] = max(n)
d['menor'] = min(n)
d['media'] = sum(n)/len(n)
if sit == True:
if d['media'] < 5:
d['situação'] = 'Ruim'
if d['media'] >=5 and d['media'] < 7:
d['situação'] = 'Razoável'
if d['media'] >= 7:
d['situação'] = 'Boa'
return (d)
#Programa Principal
resp = notas(2.5,2,1,2, sit=True)
print(resp)
#help(notas)
```
#### File: Exercicios_Python/ex108/moeda.py
```python
def aumentar (preco=0,taxa=0):
res = preco + (preco * taxa/100)
return res
def diminuir(preco=0,taxa=0):
res = preco - (preco * taxa/100)
return res
def dobro(preco=0):
res = preco*2
return res
def metade(preco=0):
res = preco/2
return res
def moeda(preco=0, moeda='R$'):
return f'{moeda}{preco:>.2f}'.replace('.',',')
``` |
{
"source": "Jorden3/KeylaKam-feeder",
"score": 2
} |
#### File: Jorden3/KeylaKam-feeder/piServer.py
```python
from flask import Flask
from feeder import *
app = Flask(__name__)
@app.route("/")
def feed():
run = motorRun()
return {"How's it": run}
if __name__ == "__main__":
app.run()
``` |
{
"source": "jordenson/nakama-cpp",
"score": 2
} |
#### File: nakama-cpp/gen_api/generate_cpp_api.py
```python
from __future__ import print_function
import os
import sys
import subprocess
import argparse
import platform
def getEnvVar(name):
if name in os.environ:
return os.environ[name]
return ''
parser = argparse.ArgumentParser(description='Nakama C++ API generator')
parser.add_argument('-n', '--nakama', help='Nakama server sources')
parser.add_argument('-g', '--gateway', help='grpc-gateway sources')
args = parser.parse_args()
def getArgOrEnvVar(env_var_name, arg_value):
if arg_value:
value = arg_value
else:
value = getEnvVar(env_var_name)
if not value:
print('Error: missing "' + env_var_name + '" env variable')
sys.exit(-1)
return value
# https://github.com/heroiclabs/nakama
NAKAMA = getArgOrEnvVar('NAKAMA', args.nakama)
# https://github.com/grpc-ecosystem/grpc-gateway
GRPC_GATEWAY = getArgOrEnvVar('GRPC_GATEWAY', args.gateway)
def path(p):
return os.path.normpath(p)
def get_host_arch():
import platform
bits, _ = platform.architecture()
if bits == '64bit':
arch = 'x64'
elif bits == '32bit':
arch = 'x86'
else:
arch = bits
return arch
NAKAMA_CPP = os.path.abspath('./..')
GRPC = path(NAKAMA_CPP + '/third_party/grpc')
GOOGLEAPIS = path(GRPC_GATEWAY + '/third_party/googleapis')
PROTOBUF_SRC = path(GRPC + '/third_party/protobuf/src')
OUT = os.path.abspath('cppout')
is_windows = platform.system() == 'Windows'
is_mac = platform.system() == 'Darwin'
if is_windows:
build_dir = NAKAMA_CPP + '/build/windows/build/v142_x86'
elif is_mac:
build_dir_debug = NAKAMA_CPP + '/build/mac/build/Debug'
build_dir_release = NAKAMA_CPP + '/build/mac/build/Release'
else:
# linux
arch = get_host_arch()
build_dir_debug = NAKAMA_CPP + '/build/linux/build/Debug_' + arch
build_dir_release = NAKAMA_CPP + '/build/linux/build/Release_' + arch
def find_grpc_cpp_plugin():
if is_windows:
grpc_cpp_plugin = path(build_dir + '/third_party/grpc/Debug/grpc_cpp_plugin.exe')
if not os.path.exists(grpc_cpp_plugin):
grpc_cpp_plugin = path(build_dir + '/third_party/grpc/Release/grpc_cpp_plugin.exe')
else:
grpc_cpp_plugin = path(build_dir_release + '/third_party/grpc/grpc_cpp_plugin')
if not os.path.exists(grpc_cpp_plugin):
grpc_cpp_plugin = path(build_dir_debug + '/third_party/grpc/grpc_cpp_plugin')
if not os.path.exists(grpc_cpp_plugin):
print('grpc_cpp_plugin not found')
print('Please build for desktop OS first')
sys.exit(-1)
return grpc_cpp_plugin
def find_protoc():
if is_windows:
protoc = path(build_dir + '/third_party/grpc/third_party/protobuf/Debug/protoc.exe')
if not os.path.exists(protoc):
protoc = path(build_dir + '/third_party/grpc/third_party/protobuf/Release/protoc.exe')
else:
protoc = path(build_dir_release + '/third_party/grpc/third_party/protobuf/protoc')
if not os.path.exists(protoc):
protoc = path(build_dir_debug + '/third_party/grpc/third_party/protobuf/protoc')
if not os.path.exists(protoc):
print('protoc not found')
print('Please build for desktop OS first')
sys.exit(-1)
return protoc
def call(commands, shell=False):
#print('call', str(commands))
res = subprocess.call(commands, shell=shell)
if res != 0:
sys.exit(-1)
def check_required_folder(folder):
if not os.path.exists(folder):
print('ERROR: not exist', folder)
sys.exit(-1)
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def mklink(link, target):
if not os.path.exists(link):
if is_windows:
call(['mklink', link, target], shell=True)
else:
call(['ln', '-s', target, link], shell=False)
GRPC_CPP_PLUGIN = find_grpc_cpp_plugin()
PROTOC = find_protoc()
check_required_folder(NAKAMA)
check_required_folder(GRPC)
check_required_folder(GRPC_GATEWAY)
check_required_folder(GOOGLEAPIS)
check_required_folder(GRPC_CPP_PLUGIN)
check_required_folder(PROTOC)
check_required_folder(PROTOBUF_SRC)
CUR_DIR = os.path.abspath('.')
makedirs(OUT)
makedirs(path(OUT + '/google/api'))
makedirs(path(OUT + '/google/rpc'))
makedirs(path(CUR_DIR + '/github.com/heroiclabs/nakama-common/api'))
makedirs(path(CUR_DIR + '/github.com/heroiclabs/nakama/apigrpc'))
makedirs(path(CUR_DIR + '/github.com/heroiclabs/nakama-common/rtapi'))
mklink(path(CUR_DIR + '/github.com/heroiclabs/nakama-common/api/api.proto'), path(NAKAMA + '/vendor/github.com/heroiclabs/nakama-common/api/api.proto'))
mklink(path(CUR_DIR + '/github.com/heroiclabs/nakama/apigrpc/apigrpc.proto'), path(NAKAMA + '/apigrpc/apigrpc.proto'))
mklink(path(CUR_DIR + '/github.com/heroiclabs/nakama-common/rtapi/realtime.proto'), path(NAKAMA + '/vendor/github.com/heroiclabs/nakama-common/rtapi/realtime.proto'))
print('generating apigrpc')
call([PROTOC, '-I.', '-I' + GRPC_GATEWAY, '-I' + GOOGLEAPIS, '-I' + PROTOBUF_SRC, '--grpc_out=' + OUT, '--plugin=protoc-gen-grpc=' + GRPC_CPP_PLUGIN, path('github.com/heroiclabs/nakama/apigrpc/apigrpc.proto')])
call([PROTOC, '-I.', '-I' + GRPC_GATEWAY, '-I' + GOOGLEAPIS, '-I' + PROTOBUF_SRC, '--cpp_out=' + OUT, path('github.com/heroiclabs/nakama/apigrpc/apigrpc.proto')])
call([PROTOC, '-I.', '-I' + GRPC_GATEWAY, '-I' + GOOGLEAPIS, '-I' + PROTOBUF_SRC, '--cpp_out=' + OUT, path('github.com/heroiclabs/nakama-common/api/api.proto')])
os.chdir(path(GOOGLEAPIS + '/google/rpc'))
call([PROTOC, '-I.', '-I' + GRPC_GATEWAY, '-I' + GOOGLEAPIS, '-I' + PROTOBUF_SRC, '--cpp_out=' + path(OUT + '/google/rpc'), 'status.proto'])
os.chdir(path(GOOGLEAPIS + '/google/api'))
call([PROTOC, '-I.', '-I' + GRPC_GATEWAY, '-I' + GOOGLEAPIS, '-I' + PROTOBUF_SRC, '--cpp_out=' + path(OUT + '/google/api'), 'annotations.proto'])
call([PROTOC, '-I.', '-I' + GRPC_GATEWAY, '-I' + GOOGLEAPIS, '-I' + PROTOBUF_SRC, '--cpp_out=' + path(OUT + '/google/api'), 'http.proto'])
os.chdir(CUR_DIR)
call([PROTOC, '-I.', '-I' + GRPC_GATEWAY, '-I' + GOOGLEAPIS, '-I' + PROTOBUF_SRC, '--cpp_out=' + OUT, path(GRPC_GATEWAY + '/protoc-gen-swagger/options/annotations.proto')])
call([PROTOC, '-I.', '-I' + GRPC_GATEWAY, '-I' + GOOGLEAPIS, '-I' + PROTOBUF_SRC, '--cpp_out=' + OUT, path(GRPC_GATEWAY + '/protoc-gen-swagger/options/openapiv2.proto')])
print('generating rtapi')
call([PROTOC, '-I.', '-I' + PROTOBUF_SRC, '--cpp_out=' + OUT, path('github.com/heroiclabs/nakama-common/rtapi/realtime.proto')])
print('done.')
``` |
{
"source": "jordeu/gendas",
"score": 2
} |
#### File: gendas/examples/rank.py
```python
from collections import defaultdict
from gendas.engine import Gendas
from gendas.experimental import HG19Source
# Create a Gendas engine
gd = Gendas('data/gendas.conf')
gd['hg19'] = HG19Source()
def mut_rank(gd, baserow):
# Get scores of all the position in this gene group by tri>alt
context = defaultdict(list)
for r in gd['cadd'].merge(gd['hg19']):
key = "{}>{}".format(r['hg19'][-1:1], r['cadd']['ALT'])
context[key].append(r['cadd']['PHRED'])
rows = []
for m in gd['variants'].merge(gd['cadd'], on=['REF', 'ALT']).merge(gd['hg19']):
key = "{}>{}".format(m['hg19'][-1:1], m['variants']['ALT'])
ctx_scores = context[key]
# Create the output row
row = dict(baserow)
for k, v in m['variants'].items():
row[k] = v
row['KEY'] = key
row['SCORE'] = m['cadd']['PHRED']
row['CONTEXT'] = ctx_scores
rows.append(row)
return rows
for res in gd.groupby(gd['exons']['GENE']).aggregate_seq(mut_rank):
for r in res:
print(r)
break
```
#### File: gendas/gendas/engine.py
```python
import logging
import os
from os.path import join, dirname
from configobj import ConfigObj, Section
from pathos.pools import ProcessPool, ParallelPool
from tqdm import tqdm
from gendas.sources import GendasSource, TabixSource, IntervalTreeSource
from gendas.statistics import count
from gendas.utils import _get_chunks, _overlap_intervals
logger = logging.getLogger("gendas")
SOURCE_TYPES = {
'tabix': TabixSource,
'mem': IntervalTreeSource
}
class Gendas:
"""
Gendas main engine that represents all the loaded datasets.
All the queries start here.
"""
def __init__(self, configfile: 'str' = None, workers: 'int' = os.cpu_count(), servers=None, progress: 'int' = 20):
"""
Initialize a gendas engine
Args:
configfile: A file that contains the datasets definitions. Defaults to None, an empty engine.
workers: Total number of workers to parallelize the computations. Defaults to total number of cores.
servers: A list of servers where to distribute the parallelization. Defaults to only localhost.
progress: A smaller number means that gendas will report progress more often. Defaults to 20.
"""
self.workers = workers
self.servers = servers
self.progress = progress
self.sources = {}
if configfile is not None:
if not os.path.exists(configfile):
raise FileNotFoundError("File {} not found".format(configfile))
# Load datasets from config file
config = ConfigObj(configfile)
for key, section in config.items():
# Skip general parameters
if type(section) != Section:
continue
# Load the source type
source = SOURCE_TYPES[str(section['type']).lstrip().lower()]
# Create a source instance from the configuration
self[key] = source(
join(dirname(configfile), section['file']),
header=section.get('header', None),
ctypes=None if section.get('ctypes', None) is None else [eval(t) for t in section['ctypes']],
sequence=section['sequence'],
begin=section['begin'],
end=section['end'],
indices=section.get('indices', None)
)
def __setitem__(self, label: 'str', source: object) -> object:
"""
Add a source
Args:
label: Source name
source: Source object
"""
source.label = label
self.sources[label] = source
def __getitem__(self, source: 'str') -> 'GendasDataset':
"""
Returns a dataset view of the given source
Args:
source: The source key
Returns: A dataset view of the source
"""
return GendasDataset(self.sources[source], self)
def groupby(self, field) -> 'GendasGroupBy':
"""
Create a groupby view by the given field
Args:
field: The field to group
Returns: A groupby view of the current dataset
"""
return GendasGroupBy(field, self)
def pool(self):
"""
Returns: The computing pool to process run the queries
"""
if self.servers is None:
return ProcessPool(nodes=self.workers)
else:
return ParallelPool(nodes=self.workers, servers=self.servers)
class GendasSlice:
"""
A gendas slice is a view of only some genomic regions (segments) of the whole genome.
"""
def __init__(self, manager: Gendas, segments: list):
"""
Args:
manager: A gendas engine
segments: The genomic segments of interest. A list of tuples like (chromosome, start, end)
"""
self.manager = manager
self.segments = segments
def __getitem__(self, source):
"""
Args:
source: The source of interest
Returns: A slice view limiting to the given source
"""
return GendasSliceDataset(self.manager.sources[source], self)
class GendasDataset:
"""
A gendas dataset is the interface (a dataframe like structure) to access
and manipulate the data from a gendas source
"""
def __init__(self, source: GendasSource, manager: Gendas):
self.manager = manager
self.source = source
def __getitem__(self, field):
"""
Args:
field: The column of interest
Returns: A view of only one column of the dataset
"""
return GendasColumn(field, self)
def merge(self, right, on=None):
"""
Join this dataset with another dataset. By default (without any details) the datasets are always merge using
the genomic coordinates, but you can add more restrictions using the 'on' arguments.
Args:
right: The other dataset to join with.
on: A list of extra fields to add to the join (both datasets need to have the same field names)
Returns: A gendas merge view of both datasets
"""
return GendasMerge(self, right, on=on)
def map(self, fn):
"""
Apply a function to all the rows in this dataset.
Args:
fn: The function to apply to each row.
Returns: A generator to the results.
"""
return self._map_par(fn)
def _map_seq(self, fn):
"""
Sequential implementation of the map (for debugging purposes only)
"""
return map(fn, self)
def _map_par(self, fn):
"""
Parallel implementation of the map
"""
cores = self.manager.workers
logger.debug("ready to create pool")
with self.manager.pool() as executor:
logger.debug("pool created")
partitions = cores * self.manager.progress
mapfn = lambda p: list(map(fn, self.__iter__(p=(p, partitions))))
for items in executor.imap(mapfn, range(cores)):
for item in items:
yield item
def filter(self, fn):
"""
Filter the rows of this dataset
Args:
fn: A function that return true/false to filter the rows
Returns:
A filtered view of this dataset
"""
return GendasDatasetFilter(self, fn)
def count(self, progress=False):
"""
Count how many rows has this dataset
Args:
progress: True to show progress
Returns:
Total number of rows
"""
return self._count_seq(progress=progress)
def _count_seq(self, progress=False):
"""
count sequential implementation (for debugging purposes)
"""
logger.debug("Count sequencial")
if progress:
return count(tqdm(self))
return count(self)
def _count_par(self, progress=False):
"""
count parallel implementation
"""
logger.debug("Count parallell")
cores = self.manager.workers
logger.debug("ready to create pool")
with self.manager.pool() as executor:
logger.debug("pool created")
partitions = cores * self.manager.progress
mapfn = lambda p: list(map(count, self.__iter__(p=(p, partitions))))
it = executor.uimap(mapfn, range(cores))
if progress:
it = tqdm(it, total=cores)
return sum(it)
def head(self, n=10):
"""
Returns a generator to iterate the first 'n' rows.
Args:
n: Total number of rows to return
"""
for i, r in enumerate(self, start=1):
if i > n:
break
yield r
if i == n:
break
def _rows(self, p=None):
"""
Private implementation to iterate over this dataset
Args:
p: Partition parameter. Useful when iterating in parallel.
"""
return self.source.__iter__(p=p)
def __iter__(self, p=None):
"""
Iterate this dataset rows
"""
for r in self._rows(p=p):
yield r
def __len__(self):
"""
Returns: How many rows has this dataset
"""
return self.count()
class GendasDatasetFilter(GendasDataset):
"""
A filtered view of a Gendas dataset
"""
def __init__(self, dataset: 'GendasDataset', filter):
"""
Args:
dataset: Dataset to filter
filter: Filtering function
"""
super().__init__(dataset.source, dataset.manager)
self.dataset = dataset
self.filter = filter
def __iter__(self, p=None):
return filter(self.filter, self.dataset)
class GendasMergeDataset(GendasDataset):
"""
A dataset view of only one dataset in a gendas merges
"""
def __init__(self, merge: 'GendasMerge', source: 'GendasSource'):
super().__init__(source, merge.left.manager)
self.merge = merge
def _rows(self, p=None):
for r in self.merge.__iter__(p=None):
yield r[self.source.label]
def __len__(self):
return len(self.merge)
class GendasMerge(GendasDataset):
"""
A view that joins two datasets
"""
def __init__(self, left: 'GendasDataset', right: 'GendasDataset', on: list):
"""
Args:
left: The left dataset
right: The right dataset
on: Extra columns to do the join (both datasets need to have the same column label)
"""
super().__init__(None, left.manager)
self.left = left
self.right = right
self.on = on
self.sources = {
left.source.label: left.source,
right.source.label: right.source
}
def __getitem__(self, source):
"""
Get dataset view of only one of the merged datasets.
Args:
source: The source label
Returns: A gendas dataset view
"""
return GendasMergeDataset(self, self.sources[source])
def merge(self, right, on=None):
"""
Merge more datasets with this merge view.
Args:
right: The right dataset to merge
on: A list with extra columns to do the join
Returns: A gendas merge view
"""
return GendasMultipleMerge(self, right, on=on)
def filter(self, fn):
"""
Filter a merge view
Args:
fn: The filtering function
Returns: A gendas merge view filtered
"""
return GendasMergeFilter(self, fn)
def _rows(self, p=None):
"""
Iterate the left most dataset computing the 'inner join' merge
of other datasets
Args:
p: partition. Internal parameter to use when doing iterations in parallel
"""
for l_row in self.left.__iter__(p=p):
seq = l_row[self.left.source.sequence]
begin = l_row[self.left.source.begin]
end = l_row[self.left.source.end]
if self.on is not None:
l_key = [l_row[o] for o in self.on]
else:
l_key = None
r_rows = []
for r in self.right.source.query(seq, begin - 1, end):
if self.on is not None:
r_key = [r[o] for o in self.on]
if l_key != r_key:
continue
r_rows.append(r)
# Inner join
for r_row in r_rows:
yield {
self.left.source.label: l_row,
self.right.source.label: r_row
}
def __iter__(self, p=None):
"""
Return a generator to iterate this
"""
for r in self._rows(p=p):
yield r
def __len__(self):
# TODO Implement the gendas merge count (without iterating everything)
raise NotImplementedError("GendasMerge.__len__ not implemented")
class GendasMergeFilter(GendasMerge):
"""
Filtered view of a gendas merge
"""
def __init__(self, merge: 'GendasMerge', filter):
super().__init__(merge.left, merge.right, merge.on)
self.merge = merge
self.filter = filter
def __iter__(self, p=None):
return filter(self.filter, self.merge)
class GendasMultipleMerge(GendasMerge):
"""
Merge more than two datasets.
"""
def __init__(self, merge: 'GendasMerge', right: 'GendasDataset', on: list):
super().__init__(merge.left, right, on)
self.merge = merge
# Add other sources
for k, v in merge.sources.items():
self.sources[k] = v
def _rows(self, p=None):
for m_row in self.merge.__iter__(p=p):
l_row = m_row[self.left.source.label]
seq = l_row[self.left.source.sequence]
begin, end = _overlap_intervals(
[(m_row[s.label][s.begin], m_row[s.label][s.end])
for s in self.merge.sources.values()]
)
if self.on is not None:
m_key = []
for o in self.on:
for label, source in self.merge.sources.items():
if o in source.header:
m_key.append(m_row[label][o])
else:
m_key = None
r_rows = []
for r in self.right.source.query(seq, begin - 1, end):
if self.on is not None:
r_key = [r[o] for o in self.on]
if m_key != r_key:
continue
r_rows.append(r)
# Inner join
for r_row in r_rows:
res = {k: v for k, v in m_row.items()}
res[self.right.source.label] = r_row
yield res
class GendasSliceDataset(GendasDataset):
"""
A dataset view of a source filtered by a gendas slice (a genomic regions definition)
"""
def __init__(self, source: 'GendasSource', slice: 'GendasSlice'):
super().__init__(source, slice.manager)
self.slice = slice
self.rows = None
def _rows(self, p=None):
if self.rows is None:
self.rows = []
for s in self.slice.segments:
self.rows += list(self.source.query(s[0], s[1], s[2]))
return self.rows
class GendasColumn:
"""
A view of only one column of a dataset
"""
def __init__(self, label: str, dataset: GendasDataset):
self.dataset = dataset
self.label = label
def __iter__(self):
for r in self.dataset:
yield r[self.label]
def __len__(self):
return len(self.dataset)
class GendasGroupBy:
"""
A grouped view of a dataset. That lets you apply an agreggating function to each group.
"""
def __init__(self, field: 'GendasColumn', manager: 'Gendas'):
"""
Args:
field: The column that defines a group
manager: A gendas manager
"""
self.field = field
self.manager = manager
def aggregate(self, aggregator, **kwargs):
"""
Returns a generator that returns the result of apply the aggregator function
to each group.
Args:
aggregator: An aggregation function
**kwargs: Extra parameters to pass to the aggregation function
Returns: A generator
"""
return self._aggregate_par(aggregator, **kwargs)
def _compute(self, aggregator, args, groups) -> dict:
label, segments = groups
v = {self.field.label: label}
partition = GendasSlice(self.manager, segments)
if type(aggregator) == dict:
for f, aggregator in aggregator.items():
if len(args) > 0:
v[f] = aggregator(partition, **args)
else:
v[f] = aggregator(partition)
else:
if len(args) > 0:
v = aggregator(partition, v, **args)
else:
v = aggregator(partition, v)
return v
def _compute_par(self, aggregator, args, groups):
result = [self._compute(aggregator, args, group) for group in groups]
return result
def _mapfn(self, r):
return self._compute_par(self.aggregator, self.kwargs, r)
def _aggregate_par(self, aggregator: dict, **kwargs):
"""
Parallel implementation of the aggregate method
"""
cores = self.manager.workers
regions = self.field.dataset.source.index(self.field.label)
logger.debug("Retrive valid column names")
labels = set(self.field)
logger.debug("Retrive regions to aggregate")
regions = list(filter(lambda r: r[0] in labels, regions))
regions_size = len(regions)
partitions = cores * self.manager.progress
chunksize = (regions_size // partitions) + 1
regions = list(_get_chunks(regions, size=chunksize))
logger.debug(
"{} chunks of {} regions (total {}) to run in {} partitions at {} cores".format(len(regions), chunksize,
regions_size, partitions,
cores))
# mapfn = lambda r: self._compute_par(aggregator, kwargs, r)
self.aggregator = aggregator
self.kwargs = kwargs
with self.manager.pool() as executor:
logger.debug("pool created")
for items in executor.uimap(self._mapfn, regions):
for item in items:
yield item
def _aggregate_seq(self, fields, **kwargs):
"""
Sequential implementation of the aggregate method (for testing/debugging purposes)
"""
regions = self.field.dataset.source.index(self.field.label)
labels = set(self.field)
regions = list(filter(lambda r: r[0] in labels, regions))
for label, segments in regions:
yield self._compute(fields, kwargs, (label, segments))
``` |
{
"source": "jordeu/itab",
"score": 3
} |
#### File: itab/itab/writer.py
```python
import csv
import os
from itab.files import open_file
from itab.schema import DEFAULT_DELIMITER, Schema
class TabWriter(object):
def __init__(self, f, schema=None, headers=None, comments=None, write_headers=False, delimiter=DEFAULT_DELIMITER):
# Load schema
self.schema = Schema(schema, headers=headers, basedir=os.path.dirname(f))
# Check if the schema is a URL and save it as a comment
if type(schema) == str:
metadata = {'schema': schema}
else:
metadata = None
# Open an annotated and commented file iterator
self.fd = open_file(f, metadata=metadata, mode="w", comments=comments)
# Use default python writer
self.writer = csv.writer(self.fd, delimiter=delimiter)
# Write header
if write_headers:
self.writer.writerow(self.schema.headers)
self.line_num = 0
def writerow(self, row):
result = []
errors = []
for ix, x in enumerate(row):
val, err = self.schema.format_cell(x, row, self.line_num, ix, parser='writer')
result.append(val)
if err is not None:
errors.append(err)
self.writer.writerow(result)
self.line_num += 1
return errors
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.fd.close()
class TabDictWriter(TabWriter):
def __init__(self, file, schema=None, headers=None, extrasaction='ignore', write_headers=True):
"""
:param file: File path
:param schema: A file, url or python dictionary with the tab schema
:param extrasaction: If it's equal to 'ignore', the values with a key not defined as a schema field will be
ignored. If it's equal to 'append' the values will be added at the end of the line, but without a header.
"""
TabWriter.__init__(self, file, schema=schema, headers=headers, write_headers=write_headers)
self.extrasaction = extrasaction
def writerow(self, row_dict):
"""
:param row_dict: A dictionary with the values and the field names as keys.
:return: A list with the writing or validation errors. An empty list if there is no error.
"""
# Check if there is a value without a defined field
errors = []
for k in row_dict.keys():
if k not in self.schema.headers:
if self.extrasaction == 'append':
self.schema.headers.append(k)
err_msg = "You will have some extra values without header."
else:
err_msg = "This values are ignored."
errors += "The key '{}' is not a valid schema field. {}".format(k, err_msg)
row_list = [row_dict.get(h, None) for h in self.schema.headers]
errors += TabWriter.writerow(self, row_list)
return errors
``` |
{
"source": "jordi1215/quantum-walk-visualization",
"score": 3
} |
#### File: jordi1215/quantum-walk-visualization/quantum_walk.py
```python
import retworkx
from retworkx.visualization import mpl_draw
import matplotlib.pyplot as plt
from qiskit import *
import numpy as np
from numpy import linalg as la
from scipy.linalg import expm
from qiskit.extensions import HamiltonianGate
from qiskit.visualization import plot_histogram
import imageio
def pad_zeros(adjacency):
'''
Helper function for padding zeros to increase adjacency
matrix of shape (n,n) to (2**n, 2**n).
Parameters: adjacency (ndarray): adjacency of graph
Returns: full_matrix (ndarray): new adjacency with padded zeroes
'''
full_matrix = np.zeros((2 ** len(adjacency), 2 ** len(adjacency)))
for i in range(len(adjacency)):
for j in range(len(adjacency)):
if adjacency[i][j] != 0:
full_matrix[2 ** i][2 ** j] = adjacency[i][j]
return full_matrix
def create_walk_circuit(adj_matrix, total_dur, num_snaps):
'''
Helper function for generating walk circuit with snapshots
after each evolution of the quantum walk.
Parameters: adj_matrix (ndarray): adjacency of graph (2**n, 2**n)
total_dur (float): total time for quantum walk
num_snaps (int): number of snapshots throughout walk
Returns: circ (QuantumCircuit): resulting circuit
'''
# create matrix exponential gate and circuit
num_qubits = np.log2(len(adj_matrix))
ExpGate = HamiltonianGate(adj_matrix, total_dur / num_snaps)
qr = QuantumRegister(num_qubits, 'q')
cr = ClassicalRegister(num_qubits)
circ = QuantumCircuit(qr, cr)
# intialize to state |0...01> and add gate with snapshots
circ.x(0)
circ.snapshot(str(0))
for i in range(num_snaps):
circ.append(ExpGate, qr)
circ.snapshot(str(i + 1))
# return circuit
return circ
def get_snapshots(adj_matrix, total_dur, num_snaps):
'''
Function for returning snapshots of quantum walk.
Parameters: adj_matrix (ndarray): adjacency of graph (2**n, 2**n)
total_dur (float): total time for quantum walk
num_snaps (int): number of snapshots throughout walk
Returns: map from iteration number to snapshot, snapshot counts
up from binary in ordering (00, 01, 10, 11, ...)
'''
qc = create_walk_circuit(adj_matrix, total_dur, num_snaps)
backend = Aer.get_backend('statevector_simulator')
result = execute(qc, backend).result()
return result.data()['snapshots']['statevector']
def generate_digraph_at_snapshot(adj_matrix, amplitude_array):
'''
Helper function that creates a graph for each snapshot.
Parameters: adj_matrix (ndarray): adjacency of graph (unpadded, nxn)
amplitude_array (ndarray): value from snapshot dictionary for a specific snapshot
Returns: pydigraph and list of colors for each node in the graph
'''
g = retworkx.PyDiGraph()
n = len(adj_matrix)
# add nodes
#lst = ["|" + str(bin(i))[2:].zfill(int(np.log2(n))) + ">" for i in range(n)]
lst = ["|" + str(bin(2**i))[2:].zfill(int(n)) + ">" for i in range(n)]
g.add_nodes_from(lst)
# add edges
for i in range(len(adj_matrix)):
for j in range(len(adj_matrix[0])):
if adj_matrix[i][j] != 0:
g.add_edge(i, j, adj_matrix[i][j])
# compute colors based on how probable the node is
colors = []
for i in range(len(adj_matrix)):
alpha = abs(amplitude_array[2 ** i])
# rescale our transparency
alpha = alpha * 0.9 + 0.1
colors.append((0.0, 0.0, 1.0, alpha))
return g, colors
def generate_gif(adj_matrix, snapshots, gifname="quantum_walk", snapshot_dir="."):
'''
Function that makes a gif of the quantum walk.
Parameters: adj_matrix (ndarray): adjacency of graph (unpadded, nxn)
snapshots (ndarray dict): map from iteration number to snapshot, snapshot counts
up from binary in ordering (00, 01, 10, 11 for 2 nodes)
gifname (string): name of the gif file created
snapshot_dir (string): name of the directory to store the snapshot png's
Returns: saves a gif to the notebook files
'''
n = len(snapshots.items())
pos = None
# create all the images of the graphs
for i in range(n):
g, colors = generate_digraph_at_snapshot(adj_matrix, snapshots[str(i)][0])
# save the position of the first graph so all subsequent graphs use the same node positioning
if i == 0:
pos = retworkx.spring_layout(g)
plt.clf()
mpl_draw(g, pos=pos, with_labels=True, labels=lambda node: node, arrows=False, node_size=1000, node_color= colors)
plt.draw()
plt.text(0.1, 0.1, 'snapshot ' + str(i), size=15, color='purple')
plt.savefig(snapshot_dir + '/snapshot' + str(i) + '.png')
# concatenate images into gif
images = []
filenames = [snapshot_dir + '/snapshot' + str(i) + '.png' for i in range(n)]
for filename in filenames:
images.append(imageio.imread(filename))
imageio.mimsave(gifname + ".gif", images, duration = .5)
def visualize_walk(adj_matrix, total_dur, num_snaps, gifname="quantum_walk", snapshot_dir="."):
'''
Function for bringing it all together
Parameters: adj_matrix (ndarray): adjacency of graph (unpadded, nxn)
total_dur (float): total time for quantum walk
num_snaps (int): number of snapshots throughout walk
gifname (string): name of the gif file created
snapshot_dir (string): name of the directory to store the snapshot png's
'''
pad_adj = pad_zeros(adj_matrix)
snaps = get_snapshots(pad_adj, total_dur, num_snaps)
generate_gif(adj_matrix, snaps, gifname, snapshot_dir)
``` |
{
"source": "jordi2326/RaccBoot",
"score": 3
} |
#### File: RaccBoot/code/pruebaRaccApi.py
```python
import logging
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters,
ConversationHandler)
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
GENDER, PHOTO, LOCATION, LOCATIONMANUAL, BIO, REPARACIONES, TRAMITES, TAREASHOGAR, FAMILIAYSALUD, ASISTENCIA, RECADOS, MOVILIDAD, MASCOTAS, OCIOYVIAJE, SELECTOR, VOLVER, SUBREPARACIONES, SUBSUBREPARACIONES, SUBSUBSUBREPARACIONES, BACK, CONTINUAR, INFORMACION, INFORMACION1, PAGAR, FECHA = range(
25)
selecion1 = ""
selecion2 = ""
selecion3 = ""
selecion4 = ""
email = ""
telefono = ""
detected_address = ""
detected_address_extra = ""
presupuesto = 0
def start(update, context):
reply_keyboard = [['Reparaciones', 'Tramites', 'Tareas del hogar'], ['Familia y salud',
'Asistencia', 'Recados'],
['Movilidad', 'Mascotas', 'Ocio y viajes']]
user = update.message.from_user
nom = user.first_name
logger.info("%s: %s", nom, update.message.text)
update.message.reply_text(
'¡Bienvenido! Soy Nestor y voy a ser tu asistente personal.\n\n'
'Pídeme lo que quieras y disfruta de tu tiempo para las cosas que realmente importan.\n'
'Reserva tu servicio en pocos pasos y obtén un presupuesto al momento.\n'
'Puedes enviar "/cancel" en cualquier momento para dejar de hablar conmigo.\n\n'
'¿Que servicio necesitas?\n',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
return SELECTOR
def selector(update, context):
reply_keyboard = [['Continuar', 'Volver']]
user = update.message.from_user
selecion1 = update.message.text
logger.info("%s: %s", user.first_name, update.message.text)
update.message.reply_text("Has seleccionado: " + str(selecion1))
# update.message.reply_text(update.message.text)
update.message.reply_text('Introduzca /continuar si no introduzca /volver',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
if selecion1 == 'Reparaciones':
return REPARACIONES
return VOLVER
def reparaciones(update, context):
user = update.message.from_user
reply_keyboard = [['Manitas', 'Cerrajero', 'Electricista', 'Fontanero'],
['Pintor', 'Carpintero', 'Climatización', 'Persianista'],
['Parquetista', 'Antenista', 'Albañil', 'Cristalero'],
['Electrodomésticos', 'Informática', 'Asistencia mecánica y reparación']]
logger.info("%s: %s", user.first_name, update.message.text)
update.message.reply_text(
'¿Que tipo de reparaciones necesitas?\n\n'
'Selecciona la opción que quieras.\n\n'
'Envía /cancel para dejar de hablar conmigo.\n\n'
'Si quieres volver al inicio pulsa /volver',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
return SUBREPARACIONES
def subreparaciones(update, context):
global selecion2
if (update.message.text != '/back'):
selecion2 = update.message.text
logger.info("Last %s", update.message.text)
update.message.reply_text("Has seleccionado: " + str(selecion2))
if selecion2 == 'Manitas':
reply_keyboard = [['Reparación en casa', 'Montaje de TV'],['Montaje de muebles', 'Otros']]
update.message.reply_text(
'¡Dinos que necesitas y nos encargamos de todo!\n'
'Manitas verificados de confianza.\n'
'Garantía de 6 meses.\n'
'Desplazamiento incluido.\n\n'
'Selecciona la opción que quieras.\n\n'
'Envía /cancel para dejar de hablar conmigo.\n\n'
'Envía /back para volver a elegir el servicio.\n\n',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
return SUBSUBREPARACIONES
def subsubrepaciones(update, context):
global selecion3
if update.message.text != 'Back':
selecion3 = update.message.text
if selecion3 == 'Reparación en casa':
reply_keyboard = [['2 horas', '3 horas', '4 horas']]
update.message.reply_text(
'Indica el numero de horas que más se adapte a tus necesidades.\n\n'
'Envía /cancel para dejar de hablar conmigo.\n\n'
'Envía /back para cambiar de servicio.\n\n',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
return SUBSUBSUBREPARACIONES
def subsubsubreparaciones(update, context):
reply_keyboard = [['Continuar', 'Volver'],['Cancelar', 'Anterior']]
global selecion4
global presupuesto
selecion4 = update.message.text
if selecion4 == '2 horas' or selecion4 == '3 horas':
update.message.reply_text('Precio : 61,71 €')
presupuesto = 61.71
if (selecion4 == '4 horas'):
update.message.reply_text('Precio : 111,08 €')
presupuesto = 111.08
return informacion(update, context)
def informacion(update, context):
update.message.reply_text('Introduzca correo electrónico')
return INFORMACION
def informacion2(update, context):
global email
email = update.message.text
# Enviar correo en 2o plano background
receiver_email = email # Enter receiver address
import threading
download_thread = threading.Thread(target=enviarCorreo, args=[receiver_email])
download_thread.start()
update.message.reply_text('Presupuesto enviado por correo. Introduzca número de teléfono')
return INFORMACION1
def informacion3(update, context):
global telefono
telefono = update.message.text
update.message.reply_text('Envíe fotos para más detalles o /skip para omitir')
return PHOTO
def photo(update, context):
user = update.message.from_user
photo_file = update.message.photo[-1].get_file()
photo_file.download('user_photo.jpg')
logger.info("Photo of %s: %s", user.first_name, 'user_photo.jpg')
update.message.reply_text('Introduzca su ubicación o /skip para omitir')
return LOCATION
def location(update, context):
global detected_address
user = update.message.from_user
user_location = update.message.location
logger.info("Location of %s: %f / %f", user.first_name, user_location.latitude,
user_location.longitude)
url = "https://maps.googleapis.com/maps/api/geocode/json?latlng=" + str(user_location.latitude) + "," + str(
user_location.longitude) + "&key=<KEY>"
import requests, json, urllib3
logger.info(url)
response = requests.get(url).json()
logger.info(response)
logger.info(response["results"][0]["formatted_address"])
detected_address = response["results"][0]["formatted_address"]
update.message.reply_text("Hemos detectado la ubicacion seguiente: ")
update.message.reply_text(str(detected_address))
update.message.reply_text("Ahora agrega mas informacion como el numero de piso. Para saltar este paso introduce /skip")
return LOCATIONMANUAL
# location, location manual, skip locationman, skip location, 22-23, func declaration, global
def location_manual(update, context):
global detected_address_extra
detected_address_extra = update.message.text
update.message.reply_text("Introduce el dia cuando usted quiere que nos pasemos en formato dd/mm/aaaa. Para saltar este paso introduce /skip")
return FECHA
def skip_location_manual(update, context):
update.message.reply_text("Introduce el dia cuando usted quiere que nos pasemos en formato dd/mm/aaaa. Para saltar este paso introduce /skip")
return FECHA
def skip_location(update, context):
update.message.reply_text("Introduce el dia cuando usted quiere que nos pasemos en formato dd/mm/aaaa. Para saltar este paso introduce /skip")
return FECHA
def fecha(update, context):
update.message.reply_text('Para pagar introduzca la tarjeta bancaria en formato [numero],[mes],[anyo],[cvc]. Por ejemplo 4242424242424242,6,2021,314\nO introduzca /skip para omitir y pagar en efectivo')
return PAGAR
def skip_fecha(update, context):
update.message.reply_text('Para pagar introduzca la tarjeta bancaria en formato [numero],[mes],[anyo],[cvc]. Por ejemplo 4242424242424242,6,2021,314\nO introduzca /skip para omitir y pagar en efectivo')
return PAGAR
def introloc(update, context):
update.message.reply_text('Introduzca su ubicación o /skip para omitir')
return LOCATION
def bio(update, context):
user = update.message.from_user
logger.info("Bio of %s: %s", user.first_name, update.message.text)
update.message.reply_text('¡Gracias! Espero poder hablar de nuevo con usted.')
return ConversationHandler.END
def enviarCorreo(receiver_email):
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# port = 465 # For SSL
smtp_server = "smtp.gmail.com"
sender_email = "<EMAIL>" # Enter your address
# password = input("Type your password and press enter: ")
password = "<PASSWORD>"
message = MIMEMultipart("alternative")
message["Subject"] = "Presupuesto manitas"
message["From"] = "Bot RACC"
message["To"] = receiver_email
# Create the plain-text and HTML version of your message
text = """\
Hola,
Adjuntamos el presupuesto de los servicios que ha solicitado.
Muchas gracias por su confianza.
Atentamente,
RACC"""
html = """\
<html>
<body>
<p>Hola,<br>
Adjuntamos el presupuesto de los servicios que ha solicitado.<br>
Muchas gracias por su confianza.<br>
<br>
Atentamente,<br>
RACC
</p>
</body>
</html>
"""
# Turn these into plain/html MIMEText objects
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
message.attach(part1)
message.attach(part2)
filename = "Presupuesto.pdf" # In same directory as script
# Open PDF file in binary mode
with open(filename, "rb") as attachment:
# Add file as application/octet-stream
# Email client can usually download this automatically as attachment
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
# Add attachment to message and convert message to string
message.attach(part)
# Create a secure SSL context
context = ssl.create_default_context()
server = smtplib.SMTP_SSL(smtp_server)
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message.as_string())
server.quit()
def cancel(update, context):
user = update.message.from_user
logger.info("User %s canceled the conversation.", user.first_name)
update.message.reply_text('¡Adiós! Espero poder hablar de nuevo con usted.',
reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def pagar(update, context):
# para testear: 4242424242424242,6,2021,314
if(update.message.text == "/skip"): return skip_pagar(update, context)
tarjeta_info = update.message.text.split(",")
import stripe
stripe.api_key = "<KEY>"
global presupuesto
try:
charge = stripe.Charge.create(
amount=int(presupuesto * 100),
currency="eur",
description="My First Test Charge (created for API docs)",
source=stripe.Token.create(
card={
"number": tarjeta_info[0],
"exp_month": int(tarjeta_info[1]),
"exp_year": int(tarjeta_info[2]),
"cvc": tarjeta_info[3],
},
),
)
except:
update.message.reply_text('Algún dato de su tarjeta es incorrecto. Por favor ingreselo nuevamente')
return PAGAR
return skip_pagar(update, context)
def skip_pagar(update, context):
user = update.message.from_user
update.message.reply_text('¡Gracias! Espero poder hablar de nuevo con usted.')
return ConversationHandler.END
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
# Create the Updater and pass it your bot's token.
# Make sure to set use_context=True to use the new context based callbacks
# Post version 12 this will no longer be necessary
updater = Updater("1197852167:AAETya5xtPT06hjp8VJQxbZQuL5M7Fk8h8k", use_context=True)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# Add conversation handler with the states GENDER, PHOTO, LOCATION and BIO
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
REPARACIONES: [MessageHandler(Filters.regex('^(Continuar)$'), reparaciones),
CommandHandler('reparaciones', reparaciones), CommandHandler('continuar', reparaciones),
MessageHandler(Filters.regex('^(Volver)$'), start)],
TRAMITES: [MessageHandler(Filters.regex('^(Tramites)$'), start)],
TAREASHOGAR: [MessageHandler(Filters.regex('^(Tareas del hogar)$'), start)],
FAMILIAYSALUD: [MessageHandler(Filters.regex('^(Familia y salud)$'), start)],
ASISTENCIA: [MessageHandler(Filters.regex('^(Asistencia)$'), start)],
RECADOS: [MessageHandler(Filters.regex('^(Continuar)$'), start),
CommandHandler('recados', start)],
MOVILIDAD: [MessageHandler(Filters.regex('^(Movilidad)$'), start)],
MASCOTAS: [MessageHandler(Filters.regex('^(Mascotas)$'), start)],
OCIOYVIAJE: [MessageHandler(Filters.regex('^(Ocio y viajes)$'), start)],
SELECTOR: [CommandHandler('cancel', cancel),
MessageHandler(Filters.regex('^(Reparaciones)$'), reparaciones)],
PHOTO: [MessageHandler(Filters.photo, photo),
CommandHandler('skip', introloc)],
LOCATION: [MessageHandler(Filters.location, location),
CommandHandler('skip', skip_location)],
LOCATIONMANUAL: [MessageHandler(Filters.text, location_manual),
CommandHandler('skip', skip_location_manual)],
FECHA: [MessageHandler(Filters.text, fecha),
CommandHandler('skip', skip_fecha)],
BIO: [MessageHandler(Filters.text, bio)],
VOLVER: [MessageHandler(Filters.regex('^(Volver)$'), start)],
SUBREPARACIONES: [MessageHandler(Filters.text, subreparaciones),
CommandHandler('subreparaciones', subreparaciones)],
SUBSUBREPARACIONES: [
MessageHandler(Filters.regex('^(Reparación en casa|Montaje de TV|Montaje de muebles|Otros|Anterior)$'),
subsubrepaciones),
CommandHandler('subsubreparaciones', subsubrepaciones), CommandHandler('back', reparaciones)],
SUBSUBSUBREPARACIONES: [MessageHandler(Filters.regex('^(2 horas|3 horas|4 horas)$'), subsubsubreparaciones),
CommandHandler('skip', start), CommandHandler('back', subreparaciones)],
PAGAR: [MessageHandler(Filters.text, pagar),
CommandHandler('skip', bio)],
INFORMACION: [MessageHandler(Filters.text, informacion2),
CommandHandler('skip', start)],
INFORMACION1: [MessageHandler(Filters.text, informacion3),
CommandHandler('skip', start)]
},
fallbacks=[CommandHandler('cancel', cancel), CommandHandler('volver', start)]
)
dp.add_handler(conv_handler)
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
```
#### File: telegram/ext/messagehandler.py
```python
import warnings
from telegram.utils.deprecate import TelegramDeprecationWarning
from telegram import Update
from telegram.ext import Filters
from .handler import Handler
class MessageHandler(Handler):
"""Handler class to handle telegram messages. They might contain text, media or status updates.
Attributes:
filters (:obj:`Filter`): Only allow updates with these Filters. See
:mod:`telegram.ext.filters` for a full list of all available filters.
callback (:obj:`callable`): The callback function for this handler.
pass_update_queue (:obj:`bool`): Determines whether ``update_queue`` will be
passed to the callback function.
pass_job_queue (:obj:`bool`): Determines whether ``job_queue`` will be passed to
the callback function.
pass_user_data (:obj:`bool`): Determines whether ``user_data`` will be passed to
the callback function.
pass_chat_data (:obj:`bool`): Determines whether ``chat_data`` will be passed to
the callback function.
message_updates (:obj:`bool`): Should "normal" message updates be handled?
Default is ``None``.
channel_post_updates (:obj:`bool`): Should channel posts updates be handled?
Default is ``None``.
edited_updates (:obj:`bool`): Should "edited" message updates be handled?
Default is ``None``.
Note:
:attr:`pass_user_data` and :attr:`pass_chat_data` determine whether a ``dict`` you
can use to keep any data in will be sent to the :attr:`callback` function. Related to
either the user or the chat that the update was sent in. For each update from the same user
or in the same chat, it will be the same ``dict``.
Note that this is DEPRECATED, and you should use context based callbacks. See
https://git.io/fxJuV for more info.
Args:
filters (:class:`telegram.ext.BaseFilter`, optional): A filter inheriting from
:class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in
:class:`telegram.ext.filters.Filters`. Filters can be combined using bitwise
operators (& for and, | for or, ~ for not). Default is
:attr:`telegram.ext.filters.Filters.update`. This defaults to all message_type updates
being: ``message``, ``edited_message``, ``channel_post`` and ``edited_channel_post``.
If you don't want or need any of those pass ``~Filters.update.*`` in the filter
argument.
callback (:obj:`callable`): The callback function for this handler. Will be called when
:attr:`check_update` has determined that an update should be processed by this handler.
Callback signature for context based API:
``def callback(update: Update, context: CallbackContext)``
The return value of the callback is usually ignored except for the special case of
:class:`telegram.ext.ConversationHandler`.
pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called
``update_queue`` will be passed to the callback function. It will be the ``Queue``
instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher`
that contains new updates which can be used to insert updates. Default is ``False``.
DEPRECATED: Please switch to context based callbacks.
pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called
``job_queue`` will be passed to the callback function. It will be a
:class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater`
which can be used to schedule new jobs. Default is ``False``.
DEPRECATED: Please switch to context based callbacks.
pass_user_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called
``user_data`` will be passed to the callback function. Default is ``False``.
DEPRECATED: Please switch to context based callbacks.
pass_chat_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called
``chat_data`` will be passed to the callback function. Default is ``False``.
DEPRECATED: Please switch to context based callbacks.
message_updates (:obj:`bool`, optional): Should "normal" message updates be handled?
Default is ``None``.
DEPRECATED: Please switch to filters for update filtering.
channel_post_updates (:obj:`bool`, optional): Should channel posts updates be handled?
Default is ``None``.
DEPRECATED: Please switch to filters for update filtering.
edited_updates (:obj:`bool`, optional): Should "edited" message updates be handled? Default
is ``None``.
DEPRECATED: Please switch to filters for update filtering.
Raises:
ValueError
"""
def __init__(self,
filters,
callback,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
message_updates=None,
channel_post_updates=None,
edited_updates=None):
super(MessageHandler, self).__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data)
if message_updates is False and channel_post_updates is False and edited_updates is False:
raise ValueError(
'message_updates, channel_post_updates and edited_updates are all False')
self.filters = filters
if self.filters is not None:
self.filters &= Filters.update
else:
self.filters = Filters.update
if message_updates is not None:
warnings.warn('message_updates is deprecated. See https://git.io/fxJuV for more info',
TelegramDeprecationWarning,
stacklevel=2)
if message_updates is False:
self.filters &= ~Filters.update.message
if channel_post_updates is not None:
warnings.warn('channel_post_updates is deprecated. See https://git.io/fxJuV '
'for more info',
TelegramDeprecationWarning,
stacklevel=2)
if channel_post_updates is False:
self.filters &= ~Filters.update.channel_post
if edited_updates is not None:
warnings.warn('edited_updates is deprecated. See https://git.io/fxJuV for more info',
TelegramDeprecationWarning,
stacklevel=2)
if edited_updates is False:
self.filters &= ~(Filters.update.edited_message
| Filters.update.edited_channel_post)
def check_update(self, update):
"""Determines whether an update should be passed to this handlers :attr:`callback`.
Args:
update (:class:`telegram.Update`): Incoming telegram update.
Returns:
:obj:`bool`
"""
if isinstance(update, Update) and update.effective_message:
return self.filters(update)
def collect_additional_context(self, context, update, dispatcher, check_result):
if isinstance(check_result, dict):
context.update(check_result)
```
#### File: RaccBoot/tests/test_callbackcontext.py
```python
import pytest
from telegram import Update, Message, Chat, User, TelegramError
from telegram.ext import CallbackContext
class TestCallbackContext(object):
def test_non_context_dp(self, dp):
with pytest.raises(ValueError):
CallbackContext(dp)
def test_from_job(self, cdp):
job = cdp.job_queue.run_once(lambda x: x, 10)
callback_context = CallbackContext.from_job(job, cdp)
assert callback_context.job is job
assert callback_context.chat_data is None
assert callback_context.user_data is None
assert callback_context.bot_data is cdp.bot_data
assert callback_context.bot is cdp.bot
assert callback_context.job_queue is cdp.job_queue
assert callback_context.update_queue is cdp.update_queue
def test_from_update(self, cdp):
update = Update(0, message=Message(0, User(1, 'user', False), None, Chat(1, 'chat')))
callback_context = CallbackContext.from_update(update, cdp)
assert callback_context.chat_data == {}
assert callback_context.user_data == {}
assert callback_context.bot_data is cdp.bot_data
assert callback_context.bot is cdp.bot
assert callback_context.job_queue is cdp.job_queue
assert callback_context.update_queue is cdp.update_queue
callback_context_same_user_chat = CallbackContext.from_update(update, cdp)
callback_context.bot_data['test'] = 'bot'
callback_context.chat_data['test'] = 'chat'
callback_context.user_data['test'] = 'user'
assert callback_context_same_user_chat.bot_data is callback_context.bot_data
assert callback_context_same_user_chat.chat_data is callback_context.chat_data
assert callback_context_same_user_chat.user_data is callback_context.user_data
update_other_user_chat = Update(0, message=Message(0, User(2, 'user', False),
None, Chat(2, 'chat')))
callback_context_other_user_chat = CallbackContext.from_update(update_other_user_chat, cdp)
assert callback_context_other_user_chat.bot_data is callback_context.bot_data
assert callback_context_other_user_chat.chat_data is not callback_context.chat_data
assert callback_context_other_user_chat.user_data is not callback_context.user_data
def test_from_update_not_update(self, cdp):
callback_context = CallbackContext.from_update(None, cdp)
assert callback_context.chat_data is None
assert callback_context.user_data is None
assert callback_context.bot_data is cdp.bot_data
assert callback_context.bot is cdp.bot
assert callback_context.job_queue is cdp.job_queue
assert callback_context.update_queue is cdp.update_queue
callback_context = CallbackContext.from_update('', cdp)
assert callback_context.chat_data is None
assert callback_context.user_data is None
assert callback_context.bot_data is cdp.bot_data
assert callback_context.bot is cdp.bot
assert callback_context.job_queue is cdp.job_queue
assert callback_context.update_queue is cdp.update_queue
def test_from_error(self, cdp):
error = TelegramError('test')
update = Update(0, message=Message(0, User(1, 'user', False), None, Chat(1, 'chat')))
callback_context = CallbackContext.from_error(update, error, cdp)
assert callback_context.error is error
assert callback_context.chat_data == {}
assert callback_context.user_data == {}
assert callback_context.bot_data is cdp.bot_data
assert callback_context.bot is cdp.bot
assert callback_context.job_queue is cdp.job_queue
assert callback_context.update_queue is cdp.update_queue
def test_match(self, cdp):
callback_context = CallbackContext(cdp)
assert callback_context.match is None
callback_context.matches = ['test', 'blah']
assert callback_context.match == 'test'
def test_data_assignment(self, cdp):
update = Update(0, message=Message(0, User(1, 'user', False), None, Chat(1, 'chat')))
callback_context = CallbackContext.from_update(update, cdp)
with pytest.raises(AttributeError):
callback_context.chat_data = {"test": 123}
with pytest.raises(AttributeError):
callback_context.user_data = {}
with pytest.raises(AttributeError):
callback_context.chat_data = "test"
def test_dispatcher_attribute(self, cdp):
callback_context = CallbackContext(cdp)
assert callback_context.dispatcher == cdp
```
#### File: RaccBoot/tests/test_chatpermissions.py
```python
import pytest
from telegram import ChatPermissions
@pytest.fixture(scope="class")
def chat_permissions():
return ChatPermissions(can_send_messages=True, can_send_media_messages=True,
can_send_polls=True, can_send_other_messages=True,
can_add_web_page_previews=True, can_change_info=True,
can_invite_users=True, can_pin_messages=True)
class TestChatPermissions(object):
can_send_messages = True
can_send_media_messages = True
can_send_polls = True
can_send_other_messages = False
can_add_web_page_previews = False
can_change_info = False
can_invite_users = None
can_pin_messages = None
def test_de_json(self, bot):
json_dict = {
'can_send_messages': self.can_send_messages,
'can_send_media_messages': self.can_send_media_messages,
'can_send_polls': self.can_send_polls,
'can_send_other_messages': self.can_send_other_messages,
'can_add_web_page_previews': self.can_add_web_page_previews,
'can_change_info': self.can_change_info,
'can_invite_users': self.can_invite_users,
'can_pin_messages': self.can_pin_messages
}
permissions = ChatPermissions.de_json(json_dict, bot)
assert permissions.can_send_messages == self.can_send_messages
assert permissions.can_send_media_messages == self.can_send_media_messages
assert permissions.can_send_polls == self.can_send_polls
assert permissions.can_send_other_messages == self.can_send_other_messages
assert permissions.can_add_web_page_previews == self.can_add_web_page_previews
assert permissions.can_change_info == self.can_change_info
assert permissions.can_invite_users == self.can_invite_users
assert permissions.can_pin_messages == self.can_pin_messages
def test_to_dict(self, chat_permissions):
permissions_dict = chat_permissions.to_dict()
assert isinstance(permissions_dict, dict)
assert permissions_dict['can_send_messages'] == chat_permissions.can_send_messages
assert (permissions_dict['can_send_media_messages']
== chat_permissions.can_send_media_messages)
assert permissions_dict['can_send_polls'] == chat_permissions.can_send_polls
assert (permissions_dict['can_send_other_messages']
== chat_permissions.can_send_other_messages)
assert (permissions_dict['can_add_web_page_previews']
== chat_permissions.can_add_web_page_previews)
assert permissions_dict['can_change_info'] == chat_permissions.can_change_info
assert permissions_dict['can_invite_users'] == chat_permissions.can_invite_users
assert permissions_dict['can_pin_messages'] == chat_permissions.can_pin_messages
```
#### File: RaccBoot/tests/test_chatphoto.py
```python
import os
import pytest
from flaky import flaky
from telegram import ChatPhoto, Voice, TelegramError
@pytest.fixture(scope='function')
def chatphoto_file():
f = open('tests/data/telegram.jpg', 'rb')
yield f
f.close()
@pytest.fixture(scope='function')
def chat_photo(bot, super_group_id):
return bot.get_chat(super_group_id, timeout=50).photo
class TestChatPhoto(object):
chatphoto_small_file_id = 'smallCgADAQADngIAAuyVeEez0xRovKi9VAI'
chatphoto_big_file_id = 'bigCgADAQADngIAAuyVeEez0xRovKi9VAI'
chatphoto_small_file_unique_id = 'smalladc3145fd2e84d95b64d68eaa22aa33e'
chatphoto_big_file_unique_id = 'bigadc3145fd2e84d95b64d68eaa22aa33e'
chatphoto_file_url = 'https://python-telegram-bot.org/static/testfiles/telegram.jpg'
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_send_all_args(self, bot, super_group_id, chatphoto_file, chat_photo, thumb_file):
assert bot.set_chat_photo(super_group_id, chatphoto_file)
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_get_and_download(self, bot, chat_photo):
new_file = bot.get_file(chat_photo.small_file_id)
assert new_file.file_id == chat_photo.small_file_id
assert new_file.file_path.startswith('https://')
new_file.download('telegram.jpg')
assert os.path.isfile('telegram.jpg')
new_file = bot.get_file(chat_photo.big_file_id)
assert new_file.file_id == chat_photo.big_file_id
assert new_file.file_path.startswith('https://')
new_file.download('telegram.jpg')
assert os.path.isfile('telegram.jpg')
def test_send_with_chat_photo(self, monkeypatch, bot, super_group_id, chat_photo):
def test(_, url, data, **kwargs):
return data['photo'] == chat_photo
monkeypatch.setattr('telegram.utils.request.Request.post', test)
message = bot.set_chat_photo(photo=chat_photo, chat_id=super_group_id)
assert message
def test_de_json(self, bot, chat_photo):
json_dict = {
'small_file_id': self.chatphoto_small_file_id,
'big_file_id': self.chatphoto_big_file_id,
'small_file_unique_id': self.chatphoto_small_file_unique_id,
'big_file_unique_id': self.chatphoto_big_file_unique_id,
}
chat_photo = ChatPhoto.de_json(json_dict, bot)
assert chat_photo.small_file_id == self.chatphoto_small_file_id
assert chat_photo.big_file_id == self.chatphoto_big_file_id
assert chat_photo.small_file_unique_id == self.chatphoto_small_file_unique_id
assert chat_photo.big_file_unique_id == self.chatphoto_big_file_unique_id
def test_to_dict(self, chat_photo):
chat_photo_dict = chat_photo.to_dict()
assert isinstance(chat_photo_dict, dict)
assert chat_photo_dict['small_file_id'] == chat_photo.small_file_id
assert chat_photo_dict['big_file_id'] == chat_photo.big_file_id
assert chat_photo_dict['small_file_unique_id'] == chat_photo.small_file_unique_id
assert chat_photo_dict['big_file_unique_id'] == chat_photo.big_file_unique_id
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_error_send_empty_file(self, bot, super_group_id):
chatphoto_file = open(os.devnull, 'rb')
with pytest.raises(TelegramError):
bot.set_chat_photo(chat_id=super_group_id, photo=chatphoto_file)
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_error_send_empty_file_id(self, bot, super_group_id):
with pytest.raises(TelegramError):
bot.set_chat_photo(chat_id=super_group_id, photo='')
def test_error_send_without_required_args(self, bot, super_group_id):
with pytest.raises(TypeError):
bot.set_chat_photo(chat_id=super_group_id)
def test_get_small_file_instance_method(self, monkeypatch, chat_photo):
def test(*args, **kwargs):
return args[1] == chat_photo.small_file_id
monkeypatch.setattr('telegram.Bot.get_file', test)
assert chat_photo.get_small_file()
def test_get_big_file_instance_method(self, monkeypatch, chat_photo):
def test(*args, **kwargs):
return args[1] == chat_photo.big_file_id
monkeypatch.setattr('telegram.Bot.get_file', test)
assert chat_photo.get_big_file()
def test_equality(self):
a = ChatPhoto(self.chatphoto_small_file_id, self.chatphoto_big_file_id,
self.chatphoto_small_file_unique_id, self.chatphoto_big_file_unique_id)
b = ChatPhoto(self.chatphoto_small_file_id, self.chatphoto_big_file_id,
self.chatphoto_small_file_unique_id, self.chatphoto_big_file_unique_id)
c = ChatPhoto('', '', self.chatphoto_small_file_unique_id,
self.chatphoto_big_file_unique_id)
d = ChatPhoto('', '', 0, 0)
e = Voice(self.chatphoto_small_file_id, self.chatphoto_small_file_unique_id, 0)
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
```
#### File: RaccBoot/tests/test_commandhandler.py
```python
import re
from queue import Queue
import pytest
import itertools
from telegram.utils.deprecate import TelegramDeprecationWarning
from telegram import Message, Update, Chat, Bot
from telegram.ext import CommandHandler, Filters, CallbackContext, JobQueue, PrefixHandler
from tests.conftest import make_command_message, make_command_update, make_message, \
make_message_update
def is_match(handler, update):
"""
Utility function that returns whether an update matched
against a specific handler.
:param handler: ``CommandHandler`` to check against
:param update: update to check
:return: (bool) whether ``update`` matched with ``handler``
"""
check = handler.check_update(update)
return check is not None and check is not False
class BaseTest(object):
"""Base class for command and prefix handler test classes. Contains
utility methods an several callbacks used by both classes."""
test_flag = False
SRE_TYPE = type(re.match("", ""))
@pytest.fixture(autouse=True)
def reset(self):
self.test_flag = False
PASS_KEYWORDS = ('pass_user_data', 'pass_chat_data', 'pass_job_queue', 'pass_update_queue')
@pytest.fixture(scope='module', params=itertools.combinations(PASS_KEYWORDS, 2))
def pass_combination(self, request):
return {key: True for key in request.param}
def response(self, dispatcher, update):
"""
Utility to send an update to a dispatcher and assert
whether the callback was called appropriately. Its purpose is
for repeated usage in the same test function.
"""
self.test_flag = False
dispatcher.process_update(update)
return self.test_flag
def callback_basic(self, bot, update):
test_bot = isinstance(bot, Bot)
test_update = isinstance(update, Update)
self.test_flag = test_bot and test_update
def make_callback_for(self, pass_keyword):
def callback(bot, update, **kwargs):
self.test_flag = kwargs.get(keyword, None) is not None
keyword = pass_keyword[5:]
return callback
def callback_context(self, update, context):
self.test_flag = (isinstance(context, CallbackContext)
and isinstance(context.bot, Bot)
and isinstance(update, Update)
and isinstance(context.update_queue, Queue)
and isinstance(context.job_queue, JobQueue)
and isinstance(context.user_data, dict)
and isinstance(context.chat_data, dict)
and isinstance(context.bot_data, dict)
and isinstance(update.message, Message))
def callback_context_args(self, update, context):
self.test_flag = context.args == ['one', 'two']
def callback_context_regex1(self, update, context):
if context.matches:
types = all([type(res) == self.SRE_TYPE for res in context.matches])
num = len(context.matches) == 1
self.test_flag = types and num
def callback_context_regex2(self, update, context):
if context.matches:
types = all([type(res) == self.SRE_TYPE for res in context.matches])
num = len(context.matches) == 2
self.test_flag = types and num
def _test_context_args_or_regex(self, cdp, handler, text):
cdp.add_handler(handler)
update = make_command_update(text)
assert not self.response(cdp, update)
update.message.text += ' one two'
assert self.response(cdp, update)
def _test_edited(self, message, handler_edited, handler_not_edited):
"""
Assert whether a handler that should accept edited messages
and a handler that shouldn't work correctly.
:param message: ``telegram.Message`` to check against the handlers
:param handler_edited: handler that should accept edited messages
:param handler_not_edited: handler that should not accept edited messages
"""
update = make_command_update(message)
edited_update = make_command_update(message, edited=True)
assert is_match(handler_edited, update)
assert is_match(handler_edited, edited_update)
assert is_match(handler_not_edited, update)
assert not is_match(handler_not_edited, edited_update)
# ----------------------------- CommandHandler -----------------------------
class TestCommandHandler(BaseTest):
CMD = '/test'
@pytest.fixture(scope='class')
def command(self):
return self.CMD
@pytest.fixture(scope='class')
def command_message(self, command):
return make_command_message(command)
@pytest.fixture(scope='class')
def command_update(self, command_message):
return make_command_update(command_message)
def ch_callback_args(self, bot, update, args):
if update.message.text == self.CMD:
self.test_flag = len(args) == 0
elif update.message.text == '{}@{}'.format(self.CMD, bot.username):
self.test_flag = len(args) == 0
else:
self.test_flag = args == ['one', 'two']
def make_default_handler(self, callback=None, **kwargs):
callback = callback or self.callback_basic
return CommandHandler(self.CMD[1:], callback, **kwargs)
def test_basic(self, dp, command):
"""Test whether a command handler responds to its command
and not to others, or badly formatted commands"""
handler = self.make_default_handler()
dp.add_handler(handler)
assert self.response(dp, make_command_update(command))
assert not is_match(handler, make_command_update(command[1:]))
assert not is_match(handler, make_command_update('/not{}'.format(command[1:])))
assert not is_match(handler, make_command_update('not {} at start'.format(command)))
@pytest.mark.parametrize('cmd',
['way_too_longcommand1234567yes_way_toooooooLong', 'ïñválídletters',
'invalid #&* chars'],
ids=['too long', 'invalid letter', 'invalid characters'])
def test_invalid_commands(self, cmd):
with pytest.raises(ValueError, match='not a valid bot command'):
CommandHandler(cmd, self.callback_basic)
def test_command_list(self):
"""A command handler with multiple commands registered should respond to all of them."""
handler = CommandHandler(['test', 'star'], self.callback_basic)
assert is_match(handler, make_command_update('/test'))
assert is_match(handler, make_command_update('/star'))
assert not is_match(handler, make_command_update('/stop'))
def test_deprecation_warning(self):
"""``allow_edited`` deprecated in favor of filters"""
with pytest.warns(TelegramDeprecationWarning, match='See https://git.io/fxJuV'):
self.make_default_handler(allow_edited=True)
def test_edited(self, command_message):
"""Test that a CH responds to an edited message iff its filters allow it"""
handler_edited = self.make_default_handler()
handler_no_edited = self.make_default_handler(filters=~Filters.update.edited_message)
self._test_edited(command_message, handler_edited, handler_no_edited)
def test_edited_deprecated(self, command_message):
"""Test that a CH responds to an edited message iff ``allow_edited`` is True"""
handler_edited = self.make_default_handler(allow_edited=True)
handler_no_edited = self.make_default_handler(allow_edited=False)
self._test_edited(command_message, handler_edited, handler_no_edited)
def test_directed_commands(self, bot, command):
"""Test recognition of commands with a mention to the bot"""
handler = self.make_default_handler()
assert is_match(handler, make_command_update(command + '@' + bot.username, bot=bot))
assert not is_match(handler, make_command_update(command + '@otherbot', bot=bot))
def test_with_filter(self, command):
"""Test that a CH with a (generic) filter responds iff its filters match"""
handler = self.make_default_handler(filters=Filters.group)
assert is_match(handler, make_command_update(command, chat=Chat(-23, Chat.GROUP)))
assert not is_match(handler, make_command_update(command, chat=Chat(23, Chat.PRIVATE)))
def test_pass_args(self, dp, bot, command):
"""Test the passing of arguments alongside a command"""
handler = self.make_default_handler(self.ch_callback_args, pass_args=True)
dp.add_handler(handler)
at_command = '{}@{}'.format(command, bot.username)
assert self.response(dp, make_command_update(command))
assert self.response(dp, make_command_update(command + ' one two'))
assert self.response(dp, make_command_update(at_command, bot=bot))
assert self.response(dp, make_command_update(at_command + ' one two', bot=bot))
def test_newline(self, dp, command):
"""Assert that newlines don't interfere with a command handler matching a message"""
handler = self.make_default_handler()
dp.add_handler(handler)
update = make_command_update(command + '\nfoobar')
assert is_match(handler, update)
assert self.response(dp, update)
@pytest.mark.parametrize('pass_keyword', BaseTest.PASS_KEYWORDS)
def test_pass_data(self, dp, command_update, pass_combination, pass_keyword):
handler = CommandHandler('test', self.make_callback_for(pass_keyword), **pass_combination)
dp.add_handler(handler)
assert self.response(dp, command_update) == pass_combination.get(pass_keyword, False)
def test_other_update_types(self, false_update):
"""Test that a command handler doesn't respond to unrelated updates"""
handler = self.make_default_handler()
assert not is_match(handler, false_update)
def test_filters_for_wrong_command(self, mock_filter):
"""Filters should not be executed if the command does not match the handler"""
handler = self.make_default_handler(filters=mock_filter)
assert not is_match(handler, make_command_update('/star'))
assert not mock_filter.tested
def test_context(self, cdp, command_update):
"""Test correct behaviour of CHs with context-based callbacks"""
handler = self.make_default_handler(self.callback_context)
cdp.add_handler(handler)
assert self.response(cdp, command_update)
def test_context_args(self, cdp, command):
"""Test CHs that pass arguments through ``context``"""
handler = self.make_default_handler(self.callback_context_args)
self._test_context_args_or_regex(cdp, handler, command)
def test_context_regex(self, cdp, command):
"""Test CHs with context-based callbacks and a single filter"""
handler = self.make_default_handler(self.callback_context_regex1,
filters=Filters.regex('one two'))
self._test_context_args_or_regex(cdp, handler, command)
def test_context_multiple_regex(self, cdp, command):
"""Test CHs with context-based callbacks and filters combined"""
handler = self.make_default_handler(self.callback_context_regex2,
filters=Filters.regex('one') & Filters.regex('two'))
self._test_context_args_or_regex(cdp, handler, command)
# ----------------------------- PrefixHandler -----------------------------
def combinations(prefixes, commands):
return (prefix + command for prefix in prefixes for command in commands)
class TestPrefixHandler(BaseTest):
# Prefixes and commands with which to test PrefixHandler:
PREFIXES = ['!', '#', 'mytrig-']
COMMANDS = ['help', 'test']
COMBINATIONS = list(combinations(PREFIXES, COMMANDS))
@pytest.fixture(scope='class', params=PREFIXES)
def prefix(self, request):
return request.param
@pytest.fixture(scope='class', params=[1, 2], ids=['single prefix', 'multiple prefixes'])
def prefixes(self, request):
return TestPrefixHandler.PREFIXES[:request.param]
@pytest.fixture(scope='class', params=COMMANDS)
def command(self, request):
return request.param
@pytest.fixture(scope='class', params=[1, 2], ids=['single command', 'multiple commands'])
def commands(self, request):
return TestPrefixHandler.COMMANDS[:request.param]
@pytest.fixture(scope='class')
def prefix_message_text(self, prefix, command):
return prefix + command
@pytest.fixture(scope='class')
def prefix_message(self, prefix_message_text):
return make_message(prefix_message_text)
@pytest.fixture(scope='class')
def prefix_message_update(self, prefix_message):
return make_message_update(prefix_message)
def make_default_handler(self, callback=None, **kwargs):
callback = callback or self.callback_basic
return PrefixHandler(self.PREFIXES, self.COMMANDS, callback, **kwargs)
def ch_callback_args(self, bot, update, args):
if update.message.text in TestPrefixHandler.COMBINATIONS:
self.test_flag = len(args) == 0
else:
self.test_flag = args == ['one', 'two']
def test_basic(self, dp, prefix, command):
"""Test the basic expected response from a prefix handler"""
handler = self.make_default_handler()
dp.add_handler(handler)
text = prefix + command
assert self.response(dp, make_message_update(text))
assert not is_match(handler, make_message_update(command))
assert not is_match(handler, make_message_update(prefix + 'notacommand'))
assert not is_match(handler, make_command_update('not {} at start'.format(text)))
def test_single_multi_prefixes_commands(self, prefixes, commands, prefix_message_update):
"""Test various combinations of prefixes and commands"""
handler = self.make_default_handler()
result = is_match(handler, prefix_message_update)
expected = prefix_message_update.message.text in combinations(prefixes, commands)
return result == expected
def test_edited(self, prefix_message):
handler_edited = self.make_default_handler()
handler_no_edited = self.make_default_handler(filters=~Filters.update.edited_message)
self._test_edited(prefix_message, handler_edited, handler_no_edited)
def test_with_filter(self, prefix_message_text):
handler = self.make_default_handler(filters=Filters.group)
text = prefix_message_text
assert is_match(handler, make_message_update(text, chat=Chat(-23, Chat.GROUP)))
assert not is_match(handler, make_message_update(text, chat=Chat(23, Chat.PRIVATE)))
def test_pass_args(self, dp, prefix_message):
handler = self.make_default_handler(self.ch_callback_args, pass_args=True)
dp.add_handler(handler)
assert self.response(dp, make_message_update(prefix_message))
update_with_args = make_message_update(prefix_message.text + ' one two')
assert self.response(dp, update_with_args)
@pytest.mark.parametrize('pass_keyword', BaseTest.PASS_KEYWORDS)
def test_pass_data(self, dp, pass_combination, prefix_message_update, pass_keyword):
"""Assert that callbacks receive data iff its corresponding ``pass_*`` kwarg is enabled"""
handler = self.make_default_handler(self.make_callback_for(pass_keyword),
**pass_combination)
dp.add_handler(handler)
assert self.response(dp, prefix_message_update) \
== pass_combination.get(pass_keyword, False)
def test_other_update_types(self, false_update):
handler = self.make_default_handler()
assert not is_match(handler, false_update)
def test_filters_for_wrong_command(self, mock_filter):
"""Filters should not be executed if the command does not match the handler"""
handler = self.make_default_handler(filters=mock_filter)
assert not is_match(handler, make_message_update('/test'))
assert not mock_filter.tested
def test_edit_prefix(self):
handler = self.make_default_handler()
handler.prefix = ['?', '§']
assert handler._commands == list(combinations(['?', '§'], self.COMMANDS))
handler.prefix = '+'
assert handler._commands == list(combinations(['+'], self.COMMANDS))
def test_edit_command(self):
handler = self.make_default_handler()
handler.command = 'foo'
assert handler._commands == list(combinations(self.PREFIXES, ['foo']))
def test_basic_after_editing(self, dp, prefix, command):
"""Test the basic expected response from a prefix handler"""
handler = self.make_default_handler()
dp.add_handler(handler)
text = prefix + command
assert self.response(dp, make_message_update(text))
handler.command = 'foo'
text = prefix + 'foo'
assert self.response(dp, make_message_update(text))
def test_context(self, cdp, prefix_message_update):
handler = self.make_default_handler(self.callback_context)
cdp.add_handler(handler)
assert self.response(cdp, prefix_message_update)
def test_context_args(self, cdp, prefix_message_text):
handler = self.make_default_handler(self.callback_context_args)
self._test_context_args_or_regex(cdp, handler, prefix_message_text)
def test_context_regex(self, cdp, prefix_message_text):
handler = self.make_default_handler(self.callback_context_regex1,
filters=Filters.regex('one two'))
self._test_context_args_or_regex(cdp, handler, prefix_message_text)
def test_context_multiple_regex(self, cdp, prefix_message_text):
handler = self.make_default_handler(self.callback_context_regex2,
filters=Filters.regex('one') & Filters.regex(
'two'))
self._test_context_args_or_regex(cdp, handler, prefix_message_text)
```
#### File: RaccBoot/tests/test_encryptedpassportelement.py
```python
import pytest
from telegram import EncryptedPassportElement, PassportFile, PassportElementError
@pytest.fixture(scope='class')
def encrypted_passport_element():
return EncryptedPassportElement(TestEncryptedPassportElement.type_,
data=TestEncryptedPassportElement.data,
phone_number=TestEncryptedPassportElement.phone_number,
email=TestEncryptedPassportElement.email,
files=TestEncryptedPassportElement.files,
front_side=TestEncryptedPassportElement.front_side,
reverse_side=TestEncryptedPassportElement.reverse_side,
selfie=TestEncryptedPassportElement.selfie)
class TestEncryptedPassportElement(object):
type_ = 'type'
data = 'data'
phone_number = 'phone_number'
email = 'email'
files = [PassportFile('file_id', 50, 0)]
front_side = PassportFile('file_id', 50, 0)
reverse_side = PassportFile('file_id', 50, 0)
selfie = PassportFile('file_id', 50, 0)
def test_expected_values(self, encrypted_passport_element):
assert encrypted_passport_element.type == self.type_
assert encrypted_passport_element.data == self.data
assert encrypted_passport_element.phone_number == self.phone_number
assert encrypted_passport_element.email == self.email
assert encrypted_passport_element.files == self.files
assert encrypted_passport_element.front_side == self.front_side
assert encrypted_passport_element.reverse_side == self.reverse_side
assert encrypted_passport_element.selfie == self.selfie
def test_to_dict(self, encrypted_passport_element):
encrypted_passport_element_dict = encrypted_passport_element.to_dict()
assert isinstance(encrypted_passport_element_dict, dict)
assert (encrypted_passport_element_dict['type']
== encrypted_passport_element.type)
assert (encrypted_passport_element_dict['data']
== encrypted_passport_element.data)
assert (encrypted_passport_element_dict['phone_number']
== encrypted_passport_element.phone_number)
assert (encrypted_passport_element_dict['email']
== encrypted_passport_element.email)
assert isinstance(encrypted_passport_element_dict['files'], list)
assert (encrypted_passport_element_dict['front_side']
== encrypted_passport_element.front_side.to_dict())
assert (encrypted_passport_element_dict['reverse_side']
== encrypted_passport_element.reverse_side.to_dict())
assert (encrypted_passport_element_dict['selfie']
== encrypted_passport_element.selfie.to_dict())
def test_equality(self):
a = EncryptedPassportElement(self.type_, data=self.data)
b = EncryptedPassportElement(self.type_, data=self.data)
c = EncryptedPassportElement(self.data, '')
d = PassportElementError('source', 'type', 'message')
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
```
#### File: RaccBoot/tests/test_inlinequeryresultcachedaudio.py
```python
import pytest
from telegram import (InputTextMessageContent, InlineQueryResultCachedAudio, InlineKeyboardMarkup,
InlineKeyboardButton, InlineQueryResultCachedVoice)
@pytest.fixture(scope='class')
def inline_query_result_cached_audio():
return InlineQueryResultCachedAudio(
TestInlineQueryResultCachedAudio.id_,
TestInlineQueryResultCachedAudio.audio_file_id,
caption=TestInlineQueryResultCachedAudio.caption,
parse_mode=TestInlineQueryResultCachedAudio.parse_mode,
input_message_content=TestInlineQueryResultCachedAudio.input_message_content,
reply_markup=TestInlineQueryResultCachedAudio.reply_markup)
class TestInlineQueryResultCachedAudio(object):
id_ = 'id'
type_ = 'audio'
audio_file_id = 'audio file id'
caption = 'caption'
parse_mode = 'HTML'
input_message_content = InputTextMessageContent('input_message_content')
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]])
def test_expected_values(self, inline_query_result_cached_audio):
assert inline_query_result_cached_audio.type == self.type_
assert inline_query_result_cached_audio.id == self.id_
assert inline_query_result_cached_audio.audio_file_id == self.audio_file_id
assert inline_query_result_cached_audio.caption == self.caption
assert inline_query_result_cached_audio.parse_mode == self.parse_mode
assert (inline_query_result_cached_audio.input_message_content.to_dict()
== self.input_message_content.to_dict())
assert (inline_query_result_cached_audio.reply_markup.to_dict()
== self.reply_markup.to_dict())
def test_to_dict(self, inline_query_result_cached_audio):
inline_query_result_cached_audio_dict = inline_query_result_cached_audio.to_dict()
assert isinstance(inline_query_result_cached_audio_dict, dict)
assert (inline_query_result_cached_audio_dict['type']
== inline_query_result_cached_audio.type)
assert inline_query_result_cached_audio_dict['id'] == inline_query_result_cached_audio.id
assert (inline_query_result_cached_audio_dict['audio_file_id']
== inline_query_result_cached_audio.audio_file_id)
assert (inline_query_result_cached_audio_dict['caption']
== inline_query_result_cached_audio.caption)
assert (inline_query_result_cached_audio_dict['parse_mode']
== inline_query_result_cached_audio.parse_mode)
assert (inline_query_result_cached_audio_dict['input_message_content']
== inline_query_result_cached_audio.input_message_content.to_dict())
assert (inline_query_result_cached_audio_dict['reply_markup']
== inline_query_result_cached_audio.reply_markup.to_dict())
def test_equality(self):
a = InlineQueryResultCachedAudio(self.id_, self.audio_file_id)
b = InlineQueryResultCachedAudio(self.id_, self.audio_file_id)
c = InlineQueryResultCachedAudio(self.id_, '')
d = InlineQueryResultCachedAudio('', self.audio_file_id)
e = InlineQueryResultCachedVoice(self.id_, '', '')
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
```
#### File: RaccBoot/tests/test_invoice.py
```python
import pytest
from flaky import flaky
from telegram import LabeledPrice, Invoice
@pytest.fixture(scope='class')
def invoice():
return Invoice(TestInvoice.title, TestInvoice.description, TestInvoice.start_parameter,
TestInvoice.currency, TestInvoice.total_amount)
class TestInvoice(object):
payload = 'payload'
prices = [LabeledPrice('Fish', 100), LabeledPrice('Fish Tax', 1000)]
provider_data = """{"test":"test"}"""
title = 'title'
description = 'description'
start_parameter = 'start_parameter'
currency = 'EUR'
total_amount = sum([p.amount for p in prices])
def test_de_json(self, bot):
invoice_json = Invoice.de_json({
'title': TestInvoice.title,
'description': TestInvoice.description,
'start_parameter': TestInvoice.start_parameter,
'currency': TestInvoice.currency,
'total_amount': TestInvoice.total_amount
}, bot)
assert invoice_json.title == self.title
assert invoice_json.description == self.description
assert invoice_json.start_parameter == self.start_parameter
assert invoice_json.currency == self.currency
assert invoice_json.total_amount == self.total_amount
def test_to_dict(self, invoice):
invoice_dict = invoice.to_dict()
assert isinstance(invoice_dict, dict)
assert invoice_dict['title'] == invoice.title
assert invoice_dict['description'] == invoice.description
assert invoice_dict['start_parameter'] == invoice.start_parameter
assert invoice_dict['currency'] == invoice.currency
assert invoice_dict['total_amount'] == invoice.total_amount
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_send_required_args_only(self, bot, chat_id, provider_token):
message = bot.send_invoice(chat_id, self.title, self.description, self.payload,
provider_token, self.start_parameter, self.currency,
self.prices)
assert message.invoice.currency == self.currency
assert message.invoice.start_parameter == self.start_parameter
assert message.invoice.description == self.description
assert message.invoice.title == self.title
assert message.invoice.total_amount == self.total_amount
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_send_all_args(self, bot, chat_id, provider_token):
message = bot.send_invoice(
chat_id,
self.title,
self.description,
self.payload,
provider_token,
self.start_parameter,
self.currency,
self.prices,
provider_data=self.provider_data,
photo_url='https://raw.githubusercontent.com/'
'python-telegram-bot/logos/master/'
'logo/png/ptb-logo_240.png',
photo_size=240,
photo_width=240,
photo_height=240,
need_name=True,
need_phone_number=True,
need_email=True,
need_shipping_address=True,
send_phone_number_to_provider=True,
send_email_to_provider=True,
is_flexible=True)
assert message.invoice.currency == self.currency
assert message.invoice.start_parameter == self.start_parameter
assert message.invoice.description == self.description
assert message.invoice.title == self.title
assert message.invoice.total_amount == self.total_amount
def test_send_object_as_provider_data(self, monkeypatch, bot, chat_id, provider_token):
def test(_, url, data, **kwargs):
return (data['provider_data'] == '{"test_data": 123456789}' # Depends if using
or data['provider_data'] == '{"test_data":123456789}') # ujson or not
monkeypatch.setattr('telegram.utils.request.Request.post', test)
assert bot.send_invoice(chat_id, self.title, self.description, self.payload,
provider_token, self.start_parameter, self.currency,
self.prices, provider_data={'test_data': 123456789})
```
#### File: RaccBoot/tests/test_meta.py
```python
import os
import pytest
def call_pre_commit_hook(hook_id):
__tracebackhide__ = True
return os.system(' '.join(['pre-commit', 'run', '--all-files', hook_id])) # pragma: no cover
@pytest.mark.nocoverage
@pytest.mark.parametrize('hook_id', argvalues=('yapf', 'flake8', 'pylint'))
@pytest.mark.skipif(not os.getenv('TEST_PRE_COMMIT', False), reason='TEST_PRE_COMMIT not enabled')
def test_pre_commit_hook(hook_id):
assert call_pre_commit_hook(hook_id) == 0 # pragma: no cover
@pytest.mark.nocoverage
@pytest.mark.skipif(not os.getenv('TEST_BUILD', False), reason='TEST_BUILD not enabled')
def test_build():
assert os.system('python setup.py bdist_dumb') == 0 # pragma: no cover
```
#### File: RaccBoot/tests/test_passportelementerrorreverseside.py
```python
import pytest
from telegram import PassportElementErrorReverseSide, PassportElementErrorSelfie
@pytest.fixture(scope='class')
def passport_element_error_reverse_side():
return PassportElementErrorReverseSide(TestPassportElementErrorReverseSide.type_,
TestPassportElementErrorReverseSide.file_hash,
TestPassportElementErrorReverseSide.message)
class TestPassportElementErrorReverseSide(object):
source = 'reverse_side'
type_ = 'test_type'
file_hash = 'file_hash'
message = 'Error message'
def test_expected_values(self, passport_element_error_reverse_side):
assert passport_element_error_reverse_side.source == self.source
assert passport_element_error_reverse_side.type == self.type_
assert passport_element_error_reverse_side.file_hash == self.file_hash
assert passport_element_error_reverse_side.message == self.message
def test_to_dict(self, passport_element_error_reverse_side):
passport_element_error_reverse_side_dict = passport_element_error_reverse_side.to_dict()
assert isinstance(passport_element_error_reverse_side_dict, dict)
assert (passport_element_error_reverse_side_dict['source']
== passport_element_error_reverse_side.source)
assert (passport_element_error_reverse_side_dict['type']
== passport_element_error_reverse_side.type)
assert (passport_element_error_reverse_side_dict['file_hash']
== passport_element_error_reverse_side.file_hash)
assert (passport_element_error_reverse_side_dict['message']
== passport_element_error_reverse_side.message)
def test_equality(self):
a = PassportElementErrorReverseSide(self.type_, self.file_hash, self.message)
b = PassportElementErrorReverseSide(self.type_, self.file_hash, self.message)
c = PassportElementErrorReverseSide(self.type_, '', '')
d = PassportElementErrorReverseSide('', self.file_hash, '')
e = PassportElementErrorReverseSide('', '', self.message)
f = PassportElementErrorSelfie(self.type_, self.file_hash, self.message)
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a != c
assert hash(a) != hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
assert a != f
assert hash(a) != hash(f)
```
#### File: RaccBoot/tests/test_replykeyboardremove.py
```python
import pytest
from flaky import flaky
from telegram import ReplyKeyboardRemove
@pytest.fixture(scope='class')
def reply_keyboard_remove():
return ReplyKeyboardRemove(selective=TestReplyKeyboardRemove.selective)
class TestReplyKeyboardRemove(object):
remove_keyboard = True
selective = True
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_send_message_with_reply_keyboard_remove(self, bot, chat_id, reply_keyboard_remove):
message = bot.send_message(chat_id, 'Text', reply_markup=reply_keyboard_remove)
assert message.text == 'Text'
def test_expected_values(self, reply_keyboard_remove):
assert reply_keyboard_remove.remove_keyboard == self.remove_keyboard
assert reply_keyboard_remove.selective == self.selective
def test_to_dict(self, reply_keyboard_remove):
reply_keyboard_remove_dict = reply_keyboard_remove.to_dict()
assert (reply_keyboard_remove_dict['remove_keyboard']
== reply_keyboard_remove.remove_keyboard)
assert reply_keyboard_remove_dict['selective'] == reply_keyboard_remove.selective
```
#### File: RaccBoot/tests/test_update.py
```python
import pytest
from telegram import (Message, User, Update, Chat, CallbackQuery, InlineQuery,
ChosenInlineResult, ShippingQuery, PreCheckoutQuery, Poll, PollOption)
from telegram.poll import PollAnswer
message = Message(1, User(1, '', False), None, Chat(1, ''), text='Text')
params = [
{'message': message},
{'edited_message': message},
{'callback_query': CallbackQuery(1, User(1, '', False), 'chat', message=message)},
{'channel_post': message},
{'edited_channel_post': message},
{'inline_query': InlineQuery(1, User(1, '', False), '', '')},
{'chosen_inline_result': ChosenInlineResult('id', User(1, '', False), '')},
{'shipping_query': ShippingQuery('id', User(1, '', False), '', None)},
{'pre_checkout_query': PreCheckoutQuery('id', User(1, '', False), '', 0, '')},
{'callback_query': CallbackQuery(1, User(1, '', False), 'chat')},
{'poll': Poll('id', '?', [PollOption('.', 1)], False, False, False, Poll.REGULAR, True)},
{'poll_answer': PollAnswer("id", User(1, '', False), [1])}
]
all_types = ('message', 'edited_message', 'callback_query', 'channel_post',
'edited_channel_post', 'inline_query', 'chosen_inline_result',
'shipping_query', 'pre_checkout_query', 'poll', 'poll_answer')
ids = all_types + ('callback_query_without_message',)
@pytest.fixture(params=params, ids=ids)
def update(request):
return Update(update_id=TestUpdate.update_id, **request.param)
class TestUpdate(object):
update_id = 868573637
@pytest.mark.parametrize('paramdict', argvalues=params, ids=ids)
def test_de_json(self, bot, paramdict):
json_dict = {'update_id': TestUpdate.update_id}
# Convert the single update 'item' to a dict of that item and apply it to the json_dict
json_dict.update({k: v.to_dict() for k, v in paramdict.items()})
update = Update.de_json(json_dict, bot)
assert update.update_id == self.update_id
# Make sure only one thing in the update (other than update_id) is not None
i = 0
for type in all_types:
if getattr(update, type) is not None:
i += 1
assert getattr(update, type) == paramdict[type]
assert i == 1
def test_update_de_json_empty(self, bot):
update = Update.de_json(None, bot)
assert update is None
def test_de_json_default_quote(self, bot):
json_dict = {'update_id': TestUpdate.update_id}
json_dict['message'] = message.to_dict()
json_dict['default_quote'] = True
update = Update.de_json(json_dict, bot)
assert update.message.default_quote is True
def test_to_dict(self, update):
update_dict = update.to_dict()
assert isinstance(update_dict, dict)
assert update_dict['update_id'] == update.update_id
for type in all_types:
if getattr(update, type) is not None:
assert update_dict[type] == getattr(update, type).to_dict()
def test_effective_chat(self, update):
# Test that it's sometimes None per docstring
chat = update.effective_chat
if not (update.inline_query is not None
or update.chosen_inline_result is not None
or (update.callback_query is not None
and update.callback_query.message is None)
or update.shipping_query is not None
or update.pre_checkout_query is not None
or update.poll is not None
or update.poll_answer is not None):
assert chat.id == 1
else:
assert chat is None
def test_effective_user(self, update):
# Test that it's sometimes None per docstring
user = update.effective_user
if not (update.channel_post is not None
or update.edited_channel_post is not None
or update.poll is not None):
assert user.id == 1
else:
assert user is None
def test_effective_message(self, update):
# Test that it's sometimes None per docstring
eff_message = update.effective_message
if not (update.inline_query is not None
or update.chosen_inline_result is not None
or (update.callback_query is not None
and update.callback_query.message is None)
or update.shipping_query is not None
or update.pre_checkout_query is not None
or update.poll is not None
or update.poll_answer is not None):
assert eff_message.message_id == message.message_id
else:
assert eff_message is None
def test_equality(self):
a = Update(self.update_id, message=message)
b = Update(self.update_id, message=message)
c = Update(self.update_id)
d = Update(0, message=message)
e = User(self.update_id, '', False)
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
```
#### File: RaccBoot/tests/test_userprofilephotos.py
```python
from telegram import PhotoSize, UserProfilePhotos
class TestUserProfilePhotos(object):
total_count = 2
photos = [
[
PhotoSize('file_id1', 'file_un_id1', 512, 512),
PhotoSize('file_id2', 'file_un_id1', 512, 512)
],
[
PhotoSize('file_id3', 'file_un_id3', 512, 512),
PhotoSize('file_id4', 'file_un_id4', 512, 512)
]
]
def test_de_json(self, bot):
json_dict = {
'total_count': 2,
'photos': [[y.to_dict() for y in x] for x in self.photos]
}
user_profile_photos = UserProfilePhotos.de_json(json_dict, bot)
assert user_profile_photos.total_count == self.total_count
assert user_profile_photos.photos == self.photos
def test_to_dict(self):
user_profile_photos = UserProfilePhotos(self.total_count, self.photos)
user_profile_photos_dict = user_profile_photos.to_dict()
assert user_profile_photos_dict['total_count'] == user_profile_photos.total_count
for ix, x in enumerate(user_profile_photos_dict['photos']):
for iy, y in enumerate(x):
assert y == user_profile_photos.photos[ix][iy].to_dict()
```
#### File: RaccBoot/tests/test_videonote.py
```python
import os
import pytest
from flaky import flaky
from telegram import VideoNote, TelegramError, Voice, PhotoSize
@pytest.fixture(scope='function')
def video_note_file():
f = open('tests/data/telegram2.mp4', 'rb')
yield f
f.close()
@pytest.fixture(scope='class')
def video_note(bot, chat_id):
with open('tests/data/telegram2.mp4', 'rb') as f:
return bot.send_video_note(chat_id, video_note=f, timeout=50).video_note
class TestVideoNote(object):
length = 240
duration = 3
file_size = 132084
thumb_width = 240
thumb_height = 240
thumb_file_size = 11547
caption = u'VideoNoteTest - Caption'
videonote_file_id = '5a3128a4d2a04750b5b58397f3b5e812'
videonote_file_unique_id = 'adc3145fd2e84d95b64d68eaa22aa33e'
def test_creation(self, video_note):
# Make sure file has been uploaded.
assert isinstance(video_note, VideoNote)
assert isinstance(video_note.file_id, str)
assert isinstance(video_note.file_unique_id, str)
assert video_note.file_id != ''
assert video_note.file_unique_id != ''
assert isinstance(video_note.thumb, PhotoSize)
assert isinstance(video_note.thumb.file_id, str)
assert isinstance(video_note.thumb.file_unique_id, str)
assert video_note.thumb.file_id != ''
assert video_note.thumb.file_unique_id != ''
def test_expected_values(self, video_note):
assert video_note.length == self.length
assert video_note.duration == self.duration
assert video_note.file_size == self.file_size
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_send_all_args(self, bot, chat_id, video_note_file, video_note, thumb_file):
message = bot.send_video_note(chat_id, video_note_file, duration=self.duration,
length=self.length, disable_notification=False,
thumb=thumb_file)
assert isinstance(message.video_note, VideoNote)
assert isinstance(message.video_note.file_id, str)
assert isinstance(message.video_note.file_unique_id, str)
assert message.video_note.file_id != ''
assert message.video_note.file_unique_id != ''
assert message.video_note.length == video_note.length
assert message.video_note.duration == video_note.duration
assert message.video_note.file_size == video_note.file_size
assert message.video_note.thumb.file_size == self.thumb_file_size
assert message.video_note.thumb.width == self.thumb_width
assert message.video_note.thumb.height == self.thumb_height
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_get_and_download(self, bot, video_note):
new_file = bot.get_file(video_note.file_id)
assert new_file.file_size == self.file_size
assert new_file.file_id == video_note.file_id
assert new_file.file_unique_id == video_note.file_unique_id
assert new_file.file_path.startswith('https://')
new_file.download('telegram2.mp4')
assert os.path.isfile('telegram2.mp4')
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_resend(self, bot, chat_id, video_note):
message = bot.send_video_note(chat_id, video_note.file_id)
assert message.video_note == video_note
def test_send_with_video_note(self, monkeypatch, bot, chat_id, video_note):
def test(_, url, data, **kwargs):
return data['video_note'] == video_note.file_id
monkeypatch.setattr('telegram.utils.request.Request.post', test)
message = bot.send_video_note(chat_id, video_note=video_note)
assert message
def test_de_json(self, bot):
json_dict = {
'file_id': self.videonote_file_id,
'file_unique_id': self.videonote_file_unique_id,
'length': self.length,
'duration': self.duration,
'file_size': self.file_size
}
json_video_note = VideoNote.de_json(json_dict, bot)
assert json_video_note.file_id == self.videonote_file_id
assert json_video_note.file_unique_id == self.videonote_file_unique_id
assert json_video_note.length == self.length
assert json_video_note.duration == self.duration
assert json_video_note.file_size == self.file_size
def test_to_dict(self, video_note):
video_note_dict = video_note.to_dict()
assert isinstance(video_note_dict, dict)
assert video_note_dict['file_id'] == video_note.file_id
assert video_note_dict['file_unique_id'] == video_note.file_unique_id
assert video_note_dict['length'] == video_note.length
assert video_note_dict['duration'] == video_note.duration
assert video_note_dict['file_size'] == video_note.file_size
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_error_send_empty_file(self, bot, chat_id):
with pytest.raises(TelegramError):
bot.send_video_note(chat_id, open(os.devnull, 'rb'))
@flaky(3, 1)
@pytest.mark.timeout(10)
def test_error_send_empty_file_id(self, bot, chat_id):
with pytest.raises(TelegramError):
bot.send_video_note(chat_id, '')
def test_error_without_required_args(self, bot, chat_id):
with pytest.raises(TypeError):
bot.send_video_note(chat_id=chat_id)
def test_get_file_instance_method(self, monkeypatch, video_note):
def test(*args, **kwargs):
return args[1] == video_note.file_id
monkeypatch.setattr('telegram.Bot.get_file', test)
assert video_note.get_file()
def test_equality(self, video_note):
a = VideoNote(video_note.file_id, video_note.file_unique_id, self.length, self.duration)
b = VideoNote('', video_note.file_unique_id, self.length, self.duration)
c = VideoNote(video_note.file_id, video_note.file_unique_id, 0, 0)
d = VideoNote('', '', self.length, self.duration)
e = Voice(video_note.file_id, video_note.file_unique_id, self.duration)
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
``` |
{
"source": "jordi399/web_scrapping",
"score": 3
} |
#### File: jordi399/web_scrapping/opening_pagination.py
```python
import import_ipynb
import fetching_pages as FP
import sections as sxn
import create_json as C
import fetching_from_database as ffb
import bs4
import mysql.connector
from bs4 import BeautifulSoup
import pandas as pd
import requests
import json
import time
import sys
from datetime import datetime
import random
from multiprocessing import Pool, Process
import glob
import os
import http.client
# In[2]:
http.client._MAXHEADERS = 1000
import warnings
warnings.filterwarnings('ignore')
# In[3]:
df = pd.read_csv('625vmpages.csv')
df = df.drop('Unnamed: 0',axis=1)
vmps = df['0'].tolist()
# In[33]:
def pagination(data):
xpath = 'https://www.softwaresuggest.com/desktopview/softwarelist'
products = []
button = 'view more'
i = 25
new_data = data
while(button):
page = FP.get_response_page(xpath,'post',new_data)
temp = FP.get_products_from_a_page(page)
products.append(temp)
i = i+25
new_data["startlimit"] = i
if(len(temp)<25):
button = None
new_pro=[]
for p in products:
for i in p:
new_pro.append(i)
new_pro = set(new_pro)
return(new_pro)
# In[34]:
data = {"categoryid": "66","startlimit":"25",
"softwareid": "undefined",
"software": "Server Monitoring Tools",
"page_type": "Category",
"current_cat_id": "66"}
# In[37]:
res = pagination(data)
for i in res:
print(i)
print(len(res))
# In[ ]:
```
#### File: jordi399/web_scrapping/sections.py
```python
import import_ipynb
# import libs
import fetching_pages as FP
# import sections as sxn
import create_json as C
import fetching_from_database as ffb
import bs4
import mysql.connector
from bs4 import BeautifulSoup
import pandas as pd
import requests
import json
import time
import sys
from datetime import datetime
import random
from multiprocessing import Pool, Process
# In[2]:
class DESCRIPTION:
def __init__(self,description):
self.description = description
def Key(self):
if(self.description):
for i in self.description.find_all('h2'):
key=(i.get_text())
return (key)
else:
return ('Description')
def Value(self):
if(self.description):
D_value = ""
for i in self.description.find_all('div',class_='software_discription section_bg_prof'):
for j in i.find_all('div'):
for k in j.find_all('p'):
D_value = D_value+str(k.get_text())
return (D_value)
else:
return (None)
# In[3]:
class PRICING:
def __init__(self,pricing):
self.pricing = pricing
def Key(self):
if(self.pricing):
#Getting the P_key for whole
for i in self.pricing.find_all('div', class_='row section_main'):
for j in i.find_all('div', class_='col-12 section_title'):
key = (j.find('h2').get_text())
return (key)
else:
return ('Pricing')
#Getting value
def Value(self):
if(self.pricing):
names_of_plan = []
costs = []
for i in self.pricing.find_all('div', class_='row price_row_mar_top'):
for j in i.find_all('div', class_='col-xl-4 pricing_sec_cell'):
for k in j.find_all('div', class_='table-header'):
for l in k.find_all('div', class_='d-flex align-items-center justify-content-center soft-plan-heading'):
names_of_plan.append(
str(l.get_text()).replace('\n', ""))
for m in k.find_all('div', class_='d-flex flex-column align-items-center justify-content-center soft-plan-price'):
costs.append(str(m.get_text()).replace("\n", ""))
length = len(costs)
features_of_plans = [[] for x in range(len(costs))]
plans = [{} for x in range(len(costs))]
for fv in self.pricing.find_all('div', class_='soft-pricing-table'):
temp = str(fv.find(
'div', class_='d-flex align-items-center justify-content-center soft-plan-heading').get_text())
temp = temp.replace("\n", "")
for check in range(length):
if(temp == names_of_plan[check]):
for fu in fv.find_all('div', class_='soft-pricing-instruction'):
for ft in fu.find_all('ul'):
for fs in ft.find_all('li'):
features_of_plans[check].append(
str(fs.get_text()).replace("\n", ""))
P_value = {}
list_of_plans = [{} for ck in range(len(names_of_plan))]
for i in range(length):
t_dict = plans[i]
features_of_plans[i].pop()
t_dict.update(
{"Name": names_of_plan[i], "Costs": costs[i], "Benifits": features_of_plans[i]})
P_value.update({"Plan"+str(i+1): t_dict})
return (P_value)
else:
return (None)
# In[4]:
class DETAILS:
def __init__(self,company_details):
self.company_details = company_details
def Key(self):
return ('Company Details')
def Value(self):
if(self.company_details):
details_value = {}
for i in (self.company_details.find_all('div',class_='row section_main')):
for j in i.find_all('div',class_='col-12 section_title'):
Details=(j.find('h2').get_text())
for k in i.find_all('ul'):
for l in k.find_all('li'):
for s in l.find_all('span'):
for b in s.find_all('b'):
tk = (b.get_text())
for p in l.find_all('p'):
if(p.find('a')!=None):
temp = p.find('a')
tv = ( temp['href'] )
else:
tv = ( p.get_text())
details_value.update( {tk:tv} )
return (details_value)
else:
return (None)
# In[5]:
class USERS:
def __init__(self,users):
self.users = users
def Key(self):
if(self.users):
key = (self.users.find('div',class_='col-12 d-flex align-items-center justify-content-between section_title section_title_with_btn')).find('h2').get_text()
return (key)
else:
return ('Users')
def Value(self):
if(self.users):
B_list=[]
A_list=[]
C_list=[]
user_main = {}
for i in self.users.find_all('div',class_='row section_main'):
for k in i.find_all('div',class_='section_bg_prof'):
for l in k.find_all('div',class_='user_title'):
temp_k = l.get_text()
for m in k.find_all('ul',class_='d-flex user_list_main'):
for n in m.find_all('li'):
o = n.find('i')
if(o.get_text()!='cancel'):
temp_v = (n.get_text()).replace(o.get_text(),"")
if(l.get_text()=='Business'):
B_list.append((n.get_text()).replace(o.get_text(),""))
elif(l.get_text()=='Available Support'):
A_list.append((n.get_text()).replace(o.get_text(),""))
else:
C_list.append((n.get_text()).replace(o.get_text(),""))
if(temp_k=='Business'):
user_main.update({temp_k:B_list})
elif(temp_k=='Available Support'):
user_main.update({temp_k:A_list})
else:
user_main.update({temp_k:C_list})
return (user_main)
else:
return (None)
# In[6]:
class SPECIFICATIONS:
def __init__(self,specifications):
self.specifications = specifications
def Key(self):
if (self.specifications):
key = self.specifications.find('h2').get_text()
return (key)
else:
return ('Specifications')
def Value(self):
if (self.specifications):
fspex = {}
for i in (self.specifications.find_all('div', class_='row section_main')):
for j in (i.find_all('div', class_='row')):
for k in (j.find_all('div', class_='col-xs-12 col-sm-12 col-xl-6 speci_column')):
if(k.find('h3') == None):
for l in k.find_all('ul'):
for m in l.find_all('li'):
if(m.find('i') == None):
span = m.find('span')
p = m.find('p')
fspex.update(
{span.get_text(): p.get_text()})
else:
i_li = m.find('i')
spans = m.find('span')
if(i_li.get_text() == 'cancel'):
fspex.update(
{spans.get_text(): False})
else:
fspex.update(
{spans.get_text(): True})
else:
for n in k.find_all('div', class_='specification_small_title'):
for o in n.find_all('h3'):
p = o.get_text()
if(p == 'Other Categories'):
ocats = []
for q in k.find_all('div', class_='d-flex flex-wrap speci_other_cat'):
for r in q.find_all('a', class_='ga_track_oth_cat d-flex align-items-center'):
ocats.append(r.get_text())
fspex.update({p: ocats})
else:
ans = []
for s in k.find_all('ul'):
for t in s.find_all('li'):
i_t = t.find('i').get_text()
s_t = t.find('span').get_text()
if(i_t != 'cancel'):
ans.append(s_t)
if(len(ans) != 0):
fspex.update({p: ans})
else:
fspex.update({p: False})
for u in j.find_all('div', class_='col-xs-12 col-sm-12 col-xl-6'):
x_ans = []
for v in u.find_all('div', class_="specification_small_title"):
for w in v.find_all('h3'):
x = w.get_text()
for y in u.find_all('ul'):
for z in y.find_all('li'):
i_z = z.find('i').get_text()
s_z = z.find('span').get_text()
if(i_z != 'cancel'):
x_ans.append(s_z)
fspex.update({x: x_ans})
if(len(x_ans) != 0):
fspex.update({x: x_ans})
else:
fspex.update({x: False})
return (fspex)
else:
return (None)
# In[15]:
class VIDEO:
def __init__(self,video):
self.video = video
def Key(self):
if(self.video):
key = self.video.find('h2').get_text()
return (key)
else:
return ('Video')
def Value(self):
if(self.video):
if(self.video.find('a')):
value = self.video.find('a')['href']
elif(self.video.find('iframe')):
value = self.video.find('iframe')['src']
else:
value = 'None'
return (value)
# In[8]:
class FEATURES:
def __init__(self,features):
self.features = features
def Key(self):
if(self.features):
key = self.features.find('h2').get_text()
return (key)
else:
return ('Features')
def Value(self):
if(self.features):
value=[]
for i in (self.features.find_all('ul')):
value.append( (i.find('li')).get_text() )
return (value)
else:
return (None)
# In[9]:
class AWARDS:
def __init__(self,award_section):
self.award_section = award_section
def Key(self):
if (self.award_section):
key = (self.award_section.find('h2').get_text())
return (key)
else:
return ('Awards')
def Value(self):
if (self.award_section):
value = []
for i in (self. award_section.find_all('img',alt = True) ):
value.append(i['src'])
value.append(i['data-src'])
return (value)
else:
return (None)
# In[10]:
class SCREENSHOTS:
def __init__(self,screenshots):
self.screenshots = screenshots
def Key(self):
if (self.screenshots):
key = (self.screenshots.find('h2')).get_text()
return (key)
else:
return ('Screenshots')
def Value(self):
if (self.screenshots):
value = []
for i in set(self.screenshots.find_all('a',alt=True)):
value.append(i['src'])
value.append(i['data-src'])
return (value)
else:
return (None)
# In[11]:
class OVERVIEW:
def __init__(self,overview):
self.overview = overview
def Key(self):
if(self.overview):
if(self.overview.find('h2').get_text()=='Overview'):
return (self.overview.find('h3').get_text())
else:
return ('Overview')
def Value(self):
if(self.overview):
if(self.overview.find('p')):
return (self.overview.find('p').get_text())
else:
return (None)
# In[12]:
def NAME(header):
try:
if(header):
if(header.find('h1')):
name = (header.find('h1')).get_text()
return (name)
else:
name = None
return Name
except:
name = None
return (name)
# In[13]:
def LOGO(header):
logo = []
try:
if(header):
if(header.find_all('img')):
for i in (header.find_all('img',alt=True)):
logo.append(i['src'])
logo.append(i['data-src'])
return (logo)
else:
logo.append(None)
return (logo)
except:
logo.append("Not found")
return (logo)
# In[14]:
class BREADCRUMB:
def __init__(self,breadcrumb):
self.breadcrumb = breadcrumb
def Category(self):
cat={}
if(self.breadcrumb):
for ul in (self.breadcrumb.find_all('ul')):
for idx,li in enumerate(ul.find_all('li')):
if (idx==1):
for a in li.find_all('a'):
cat.update({a.get_text():a['href']})
return cat
else:
return {}
def Software(self):
sw = {}
if(self.breadcrumb):
if(self.breadcrumb):
for ul in (self.breadcrumb.find_all('ul')):
for idx,li in enumerate(ul.find_all('li')):
if (idx==2):
for a in li.find_all('a'):
sw.update({a.get_text():a['href']})
return sw
else:
return {}
# In[ ]:
# In[ ]:
# In[ ]:
``` |
{
"source": "jordiae/DeepLearning-MAI",
"score": 3
} |
#### File: cnn/models/pyramid_cnn.py
```python
import torch.nn as nn
import torch.nn.functional as F
import torch
class PyramidEncoder(nn.Module):
def __init__(self, args):
super(PyramidEncoder, self).__init__()
# channels in, channels out, kernel_size.
# Defaults: stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'
self.kernel_size = args.kernel_size
self.dropout = True if args.dropout > 0.0 else False
self.dropout_layer = nn.Dropout(args.dropout) if args.dropout > 0.0 else None
self.batch_norm = not args.no_batch_norm
self.pool = nn.MaxPool2d(2, 2)
self.conv_layers = nn.ModuleList([])
self.input_size = 256
self.stride = 1
self.stride_end_block = 2
self.padding = self.kernel_size // 2
self.channels_in = 3
self.channels_first_in = args.initial_channels
self.n_conv_layers = args.conv_layers
channels_in = self.channels_in
dims = self.input_size
for i in range(0, args.conv_blocks):
if i == 0:
channels_out = self.channels_first_in
else:
channels_out = channels_in * 2
for j in range(0, args.conv_layers):
if j == args.conv_layers - 1 and args.no_pool:
dims = ((dims - self.kernel_size + 2 * self.padding) * self.stride_end_block + 1)
conv = nn.Conv2d(channels_in, channels_out, self.kernel_size, stride=self.stride_end_block,
padding=self.padding)
else:
dims = ((dims - self.kernel_size + 2 * self.padding) * self.stride + 1)
conv = nn.Conv2d(channels_in, channels_out, self.kernel_size, stride=self.stride,
padding=self.padding)
if self.batch_norm:
self.conv_layers.append(nn.ModuleList([conv, nn.BatchNorm2d(channels_out)]))
else:
self.conv_layers.append(conv)
channels_in = channels_out
dims //= 2
self.dims = dims
self.channels_out = channels_out
def forward(self, x):
for idx, conv_layer in enumerate(self.conv_layers):
if self.batch_norm:
conv, batch_norm = conv_layer
x = F.relu(batch_norm(conv(x)))
else:
x = F.relu(conv_layer(x))
if idx % self.n_conv_layers == 0:
x = self.pool(x)
return x
class PyramidDecoder(nn.Module):
def __init__(self, args):
super(PyramidDecoder, self).__init__()
# channels in, channels out, kernel_size.
# Defaults: stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'
self.kernel_size = args.kernel_size
self.dropout = True if args.dropout > 0.0 else False
self.dropout_layer = nn.Dropout(args.dropout) if args.dropout > 0.0 else None
self.batch_norm = not args.no_batch_norm
self.conv_layers = nn.ModuleList([])
self.input_size = args.encoder_dims_out
self.stride = 2
self.padding = self.kernel_size//2
self.channels_in = args.encoder_channels_out
self.n_conv_layers = args.conv_layers
self.channels_out = 3
channels_in = self.channels_in
dims = self.input_size
for i in range(0, args.conv_blocks):
if i == args.conv_blocks-1:
channels_out = self.channels_out
else:
channels_out = channels_in//2
dims = ((dims - self.kernel_size + 2 * self.padding) * self.stride + 1)
conv = nn.ConvTranspose2d(channels_in, channels_out, self.kernel_size, stride=self.stride,
padding=self.padding)
if self.batch_norm:
self.conv_layers.append(nn.ModuleList([conv, nn.BatchNorm2d(channels_out)]))
else:
self.conv_layers.append(conv)
channels_in = channels_out
dims *= 2
def forward(self, x):
for idx, conv_layer in enumerate(self.conv_layers):
output_size = list(map(lambda d: d * 2, list(x.shape)[-2:]))
if self.batch_norm:
conv, batch_norm = conv_layer
x = F.relu(batch_norm(conv(x, output_size=output_size)))
else:
x = F.relu(conv_layer(x, output_size=output_size))
x = torch.tanh(x)
return x
class PyramidCNN(nn.Module):
def __init__(self, args):
super(PyramidCNN, self).__init__()
self.encoder = PyramidEncoder(args)
args.encoder_dims_out = self.encoder.dims
args.encoder_channels_out = self.encoder.channels_out
self.classification_head = PyramidClassificationHead(args)
self.mode = 'classifier' if not args.autoencoder else 'autoencoder'
self.decoder = PyramidDecoder(args) if self.mode == 'autoencoder' else None
def forward(self, x):
x = self.encoder(x)
if self.mode == 'classifier':
x = self.classification_head(x)
else:
x = self.decoder(x)
return x
class PyramidClassificationHead(nn.Module):
def __init__(self, args):
super(PyramidClassificationHead, self).__init__()
self.kernel_size = args.kernel_size
self.dropout = True if args.dropout > 0.0 else False
self.dropout_layer = nn.Dropout(args.dropout) if args.dropout > 0.0 else None
self.batch_norm = not args.no_batch_norm
self.n_classes = 67
dims = args.encoder_dims_out
channels_out = args.encoder_channels_out
self.pool_channels_max = nn.MaxPool2d(dims, dims)
self.pool_channels_avg = nn.AvgPool2d(dims, dims)
self.fc_layers = nn.ModuleList([])
dims_in = channels_out * 2 # because of concat of max and avg pooling along channels
self.dims_in_fc = dims_in
for i in range(0, args.fc_layers-1):
dims_out = dims_in // 2
fc = nn.Linear(dims_in, dims_out)
if self.batch_norm:
self.fc_layers.append(nn.ModuleList([fc, nn.BatchNorm1d(dims_out)]))
else:
self.fc_layers.append(fc)
dims_in = dims_out
self.fc_layers.append(nn.Linear(dims_in, self.n_classes))
def forward(self, x):
x = torch.cat([torch.squeeze(self.pool_channels_max(x)), torch.squeeze(self.pool_channels_avg(x))], 1)
x = x.view(-1, self.dims_in_fc)
for fc_layer in self.fc_layers[:-1]:
if self.batch_norm:
fc, batch_norm = fc_layer
x = F.relu(batch_norm(fc(x)))
else:
x = F.relu(fc_layer(x))
x = self.dropout_layer(x)
x = self.fc_layers[-1](x)
# softmax not required (done by cross-entropy criterion):
# "This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class.
# https://pytorch.org/docs/stable/nn.html#crossentropyloss
return x
```
#### File: src/cnn/stats.py
```python
from .utils import get_num_pixels
import os
def get_stats(data, path):
stats = {}
for class_ in data:
stats[class_] = {}
for img in data[class_]:
width, height = get_num_pixels(os.path.join(path, class_, img))
stats[class_][img] = width, height
return stats
def get_freqs(img_dict):
sizes = {}
for img in img_dict:
size = img_dict[img]
if size in sizes:
sizes[size] += 1
else:
sizes[size] = 1
return sizes
def get_stats_freqs(data, path):
stats = get_stats(data, path)
res = []
for key in stats:
res.append(f'{key}: total = {len(stats[key])}, size_freqs = {get_freqs(stats[key])}\n\n')
return res
```
#### File: src/rnn/utils.py
```python
import torch
from typing import Tuple
import torch.nn.functional as F
import argparse
import os
import logging
from torch import nn
def load_arch(device: torch.device, args: argparse.Namespace) -> Tuple[torch.nn.Module, torch.nn.Module]:
"""
Returns initialized encoder and decoder, to be used jointly as a Seq2seq model.
Notice that if bidirectional is set to True, the hidden_size of the decoder will be multiplied by 2.
:param device: device
:param args: Arguments from argparse.
:return: Initialized model
"""
from rnn.models import VanillaRNN, LSTM, GRU, Decoder, PyTorchBaseRNN
decoder_bidirectional_mul = 2 if args.bidirectional else 1
embeddings = None
if args.share_embeddings:
embeddings = nn.Embedding(args.vocab_size, args.embedding_size)
if args.no_pytorch_rnn:
if args.arch == 'elman':
encoder = VanillaRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, mode='elman',
dropout=args.dropout, bidirectional=args.bidirectional, embeddings=embeddings)
decoder = Decoder(VanillaRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul,
n_layers=args.n_layers, mode='elman', dropout=args.dropout,
bidirectional=False, embeddings=embeddings),
args.vocab_size)
elif args.arch == 'jordan':
encoder = VanillaRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, mode='jordan',
dropout=args.dropout, bidirectional=args.bidirectional, embeddings=embeddings)
decoder = Decoder(VanillaRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul,
n_layers=args.n_layers, mode='jordan', dropout=args.dropout,
bidirectional=False, embeddings=embeddings),
args.vocab_size)
elif args.arch == 'lstm':
encoder = LSTM(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, dropout=args.dropout,
bidirectional=args.bidirectional, embeddings=embeddings)
decoder = Decoder(LSTM(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul, n_layers=args.n_layers,
dropout=args.dropout, bidirectional=False, embeddings=embeddings), args.vocab_size)
elif args.arch == 'gru':
encoder = GRU(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, dropout=args.dropout,
bidirectional=args.bidirectional, embeddings=embeddings)
decoder = Decoder(GRU(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul, n_layers=args.n_layers,
dropout=args.dropout, bidirectional=False, embeddings=embeddings), args.vocab_size)
else:
raise NotImplementedError()
else:
if args.arch == 'elman':
encoder = PyTorchBaseRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, arch='elman',
dropout=args.dropout, bidirectional=args.bidirectional, embeddings=embeddings)
decoder = Decoder(PyTorchBaseRNN(device=device, vocab_size=args.vocab_size,
embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul,
n_layers=args.n_layers, arch='elman', dropout=args.dropout,
bidirectional=False, embeddings=embeddings),
args.vocab_size)
elif args.arch == 'jordan':
raise NotImplementedError()
elif args.arch == 'lstm':
encoder = PyTorchBaseRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, dropout=args.dropout,
bidirectional=args.bidirectional, embeddings=embeddings, arch='lstm')
decoder = Decoder(PyTorchBaseRNN(device=device, vocab_size=args.vocab_size,
embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul,
n_layers=args.n_layers, dropout=args.dropout, bidirectional=False,
embeddings=embeddings, arch='lstm'), args.vocab_size)
elif args.arch == 'gru':
encoder = PyTorchBaseRNN(device=device, vocab_size=args.vocab_size, embedding_dim=args.embedding_size,
hidden_features=args.hidden_size, n_layers=args.n_layers, dropout=args.dropout,
bidirectional=args.bidirectional, embeddings=embeddings, arch='gru')
decoder = Decoder(PyTorchBaseRNN(device=device, vocab_size=args.vocab_size,
embedding_dim=args.embedding_size,
hidden_features=args.hidden_size*decoder_bidirectional_mul,
n_layers=args.n_layers, dropout=args.dropout, bidirectional=False,
embeddings=embeddings, arch='gru'), args.vocab_size)
else:
raise NotImplementedError()
return encoder, decoder
def pack_right_padded_seq(seqs: torch.Tensor, lengths: torch.Tensor, device: str) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Function for packing a right-padded sequence, inspired by the functionality of
torch.nn.utils.rnn.pack_padded_sequence.
Instead of relying on a lengths parameter, it assumes that the sequences are zero-padded.
The function flattens all sequences into a single sequence, ordered by time-step ([first token of first batch,
first token of second batch,... last token of last batch] and removes padding. It also returns the effective batch
size at each iteration, which will be [number of first tokens across batch, number of second tokens...].
lengths is used to verify that the sequence are ordered by length.
If the batch is not sorted by increasing lengths, an exception is thrown.
:param seqs: [batch, right-padded tokens]
:param lengths: [batch]
:param device: device
:return: ([packed tokens], [effective batch sizes])
"""
prev = lengths[0]
for l in lengths:
if l < prev:
raise Exception('Unsorted batches!')
else:
prev = l
effective_batch_sizes = (seqs != 0).sum(dim=0)
seqs = torch.cat((seqs, torch.zeros(seqs.shape[0], 1).to(device).long()), dim=-1)
seqs = seqs.permute(-1, 0).reshape(seqs.shape[0] * seqs.shape[1]) # [batch, tokens] -> [batch*tokens]
non_pad_idx = (seqs != 0).nonzero().flatten()
seqs = seqs[non_pad_idx]
return seqs, effective_batch_sizes
def init_train_logging():
"""Sets logging such that the output is both saved in a file and output to stdout"""
log_path = 'train.log'
if os.path.exists('checkpoint_last.pt'):
logging.basicConfig(filename=log_path, level=logging.INFO, filemode='a')
else:
logging.basicConfig(filename=log_path, level=logging.INFO)
logging.getLogger('').addHandler(logging.StreamHandler())
def init_eval_logging(set_: str):
"""Sets logging such that the output is both saved in a file and output to stdout"""
# TODO: Refactor
log_path = f'eval-{set_}.log'
if os.path.exists('checkpoint_last.pt'):
logging.basicConfig(filename=log_path, level=logging.INFO, filemode='a')
else:
logging.basicConfig(filename=log_path, level=logging.INFO)
logging.getLogger('').addHandler(logging.StreamHandler())
class LabelSmoothingLoss(nn.Module):
def __init__(self, smoothing=0.0):
super(LabelSmoothingLoss, self).__init__()
self.smoothing = smoothing
def smooth_one_hot(self, target, classes, smoothing=0.0):
assert 0 <= smoothing < 1
shape = (target.size(0), classes)
with torch.no_grad():
target = torch.empty(size=shape, device=target.device) \
.fill_(smoothing / (classes - 1)) \
.scatter_(1, target.data.unsqueeze(1), 1. - smoothing)
return target
def forward(self, input, target):
target = LabelSmoothingLoss.smooth_one_hot(self, target, input.size(-1), self.smoothing)
lsm = F.log_softmax(input, -1)
loss = -(target * lsm).sum(-1)
loss = loss.mean()
return loss
```
#### File: src/transfer/utils.py
```python
import os
from PIL import Image
import torch
import torch.nn.functional as F
from torch import nn
from typing import List
import torchvision
def dir_path(s: str):
if os.path.isdir(s):
return s
else:
raise NotADirectoryError(s)
def get_num_pixels(img_path: str):
width, height = Image.open(img_path).size
return width, height
def load_model(pretrained_model: str, pre_conv: bool, mode: str, transfer_strategy: str):
from transfer.models import build_pretrained
model, transform_in = build_pretrained(pretrained_model, pretrained=mode == 'train', n_classes=67,
input_size=(256, 256), transfer_strategy=transfer_strategy, preconv=pre_conv)
return model, transform_in
class ComposedOptimizer:
def __init__(self, optimizers: List[torch.optim.Optimizer]):
self.optimizers = optimizers
def zero_grad(self):
for opt in self.optimizers:
opt.zero_grad()
def step(self):
for opt in self.optimizers:
opt.step()
class LabelSmoothingLoss(nn.Module):
def __init__(self, smoothing=0.0):
super(LabelSmoothingLoss, self).__init__()
self.smoothing = smoothing
def smooth_one_hot(self, target: torch.Tensor, classes: int, smoothing: float = 0.0):
assert 0 <= smoothing < 1
shape = (target.size(0), classes)
with torch.no_grad():
target = torch.empty(size=shape, device=target.device) \
.fill_(smoothing / (classes - 1)) \
.scatter_(1, target.data.unsqueeze(1), 1. - smoothing)
return target
def forward(self, input: torch.Tensor, target: torch.Tensor):
target = LabelSmoothingLoss.smooth_one_hot(self, target, input.size(-1), self.smoothing)
lsm = F.log_softmax(input, -1)
loss = -(target * lsm).sum(-1)
loss = loss.mean()
return loss
``` |
{
"source": "jordiae/distify",
"score": 2
} |
#### File: distify/distify/__init__.py
```python
from ray.util.multiprocessing import Pool as RayPool
from multiprocessing.pool import ThreadPool as MTPool
import multiprocessing
import os
import time
import uuid
import sqlite3
from tqdm import tqdm
import logging
from dataclasses import dataclass
from hydra.core.config_store import ConfigStore
import contextlib
import json
from typing import Optional
try:
import thread
except ImportError:
import _thread as thread
from timeout_decorator import timeout
from timeout_decorator.timeout_decorator import TimeoutError
# TODO: fix multi-threaded forks
# Ideally, with set context 'spawn', but Ray doesn't support it?
# from multiprocessing_logging import install_mp_handler
# install_mp_handler()
# TODO: fault tolerance? https://docs.ray.io/en/latest/auto_examples/plot_example-lm.html
SQL_CHECK_SAME_THREAD = False # TODO: REVIEW
CHECKPOINT_DB_PATH = 'checkpoint.db'
def timestamp():
return time.strftime("%Y-%m-%d-%H%M")
# TODO: improve logging
# TODO: test multiple backends (mp, ray, threads)
# TODO: typing
# TODO: timeout doesnt work with sleep?
# TODO: Improve logging, interaction with tqdm, etc
# TODO: pip
# TODO: CI
# TODO: testing
# TODO: Slurm
class TqdmLoggingHandler(logging.StreamHandler):
"""Avoid tqdm progress bar interruption by logger's output to console"""
# see logging.StreamHandler.eval method:
# https://github.com/python/cpython/blob/d2e2534751fd675c4d5d3adc208bf4fc984da7bf/Lib/logging/__init__.py#L1082-L1091
# and tqdm.write method:
# https://github.com/tqdm/tqdm/blob/f86104a1f30c38e6f80bfd8fb16d5fcde1e7749f/tqdm/std.py#L614-L620
def emit(self, record):
try:
tqdm.write('\n', end=self.terminator)
# msg = '\n' + self.format(record) + '\n'
# tqdm.write(msg, end=self.terminator)
except RecursionError:
raise
except Exception:
self.handleError(record)
@dataclass
class DistifyConfig:
# checkpoint_frequency: int
log_frequency: int
chunksize: int
parallel: bool
# Inside pool?
# Then SQL connection should be initialized for each node etc
parallelize_checkpoint_retrieval: bool
requires_order: bool
timeout: Optional[int]
def register_configs():
cs = ConfigStore.instance()
cs.store(
name="config",
node=DistifyConfig,
)
register_configs()
class Globals:
def __init__(self):
self.F_MAPPERS = None
self.F_REDUCERS = None
self.sql_con = None
self.sql_cur = None
self.timeout = None
G = Globals()
def exit_after():
'''
use as decorator to exit process if
function takes longer than s seconds
'''
if G.timeout is not None:
def outer(fn):
@timeout(G.timeout)
def inner(*args, **kwargs):
return fn(*args, **kwargs)
def inner2(*args, **kwargs):
try:
res = inner(*args, **kwargs)
except TimeoutError as e:
res = {'hash': hash(args[1]), 'result': None}
return res
return inner2
return outer
def outer(fn):
return fn
return outer
class Worker:
def __init__(self):
self.process_id = os.uname()[1] + '_' + str(os.getpid())
self.logger = logging.getLogger(self.process_id)
self.logger.addHandler(TqdmLoggingHandler())
def get_unique_path(self):
ts = timestamp()
extra_id = uuid.uuid4().hex
return os.path.join(os.getcwd(), self.process_id + '_' + ts + '_' + extra_id)
@classmethod
def factory(cls, *args):
return cls(*args)
class Mapper(Worker):
def map(self, x) -> None:
raise NotImplementedError
def __call__(self, x):
# return self.process(x)
try:
res = {'hash': hash(x), 'result': self.map(x)}
except BaseException as e:
self.logger.warning(f'Uncaught exception: {str(e)}')
res = {'hash': hash(x), 'result': None}
return res
class Reducer(Worker):
def reduce(self, store, values):
raise NotImplementedError
@property
def default_value(self):
raise NotImplementedError
def __call__(self, store, values):
if store is None:
store = self.default_value
values = list(filter(lambda x: x is not None, values))
if len(values) == 0:
values = [self.default_value]
return self.reduce(store, values)
class MapperComposer(Mapper):
def __init__(self, mappers, mappers_args):
self.mappers = [mapper.factory(mapper_args) for mapper, mapper_args in zip(mappers, mappers_args)]
super().__init__()
def map(self, x) -> None:
result = x
for mapper in self.mappers:
result = mapper(result)
return result
class ReducerComposer(Worker):
def __init__(self, reducers, reducers_args):
self.reducers = [reducer.factory(reducer_args) for reducer, reducer_args in zip(reducers, reducers_args)]
super().__init__()
def reduce(self, store, values):
ac = store
for reducer in self.reducers:
ac = reducer(ac, values)
return ac
@property
def default_value(self):
return self.reducers[0].default_value
@contextlib.contextmanager
def SingleProcessPool(initializer, initargs):
initializer(*initargs)
class NullPool:
def imap_unordered(self, f, l, chunksize=1):
return map(f, l)
def imap_ordered(self, f, l, chunksize=1):
return map(f, l)
def map(self, f, l):
return list(map(f, l))
yield NullPool()
# TODO: Close?
class Processor:
def __init__(self, mapper_class, stream, distify_cfg, mapper_args=(), reducer_class=None, reducer_args=()):
self.mapper_class = mapper_class
self.stream = stream
if distify_cfg.requires_order:
self.stream = sorted(stream)
self.timeout = distify_cfg.timeout
# self.checkpoint_frequency = distify_cfg.checkpoint_frequency
self.mapper_args = mapper_args
self.parallel_backend = distify_cfg.parallel_backend
assert self.parallel_backend in ['ray', 'mp', 'mt', 'seq']
self.reducer_class = reducer_class
self.reducer_args = reducer_args
self.distify_cfg = distify_cfg
self.logger = logging.getLogger('DISTIFY MAIN')
restoring = os.path.exists(CHECKPOINT_DB_PATH)
self.con = sqlite3.connect(CHECKPOINT_DB_PATH, check_same_thread=SQL_CHECK_SAME_THREAD)
self.cur = self.con.cursor()
# Checkpoint
if not restoring:
sql_create_tasks_table = """CREATE TABLE IF NOT EXISTS elements (
id integer PRIMARY KEY,
hash integer
);"""
self.cur.execute(sql_create_tasks_table)
index_sql = "CREATE INDEX IF NOT EXISTS hash_index ON elements(hash)"
self.cur.execute(index_sql)
# Reduced
if reducer_class is not None:
sql_create_tasks_table = """CREATE TABLE IF NOT EXISTS reduce (
id integer PRIMARY KEY,
value text
);"""
self.cur.execute(sql_create_tasks_table)
self.cur.execute(f"INSERT INTO reduce VALUES (0, {json.dumps(None)})")
self.con.commit()
if self.distify_cfg.parallelize_checkpoint_retrieval:
self.con.close()
del self.cur
del self.con
@staticmethod
def filter_pool(pool, func, iterable):
res = pool.map(func, iterable)
to_keep = []
for element, keep in zip(iterable, res):
if keep:
to_keep.append(element)
return to_keep
@staticmethod
def done_global(x):
x = hash(x)
G.sql_cur.execute(f"SELECT * FROM elements WHERE hash = {x}")
data = G.sql_cur.fetchall()
return len(data) != 0
def done(self, x):
x = hash(x)
self.cur.execute(f"SELECT * FROM elements WHERE hash = {x}")
data = self.cur.fetchall()
return len(data) != 0
def not_done(self, x):
return not self.done(x)
def not_done_global(self, x):
return not self.done_global(x)
def run(self):
work_dir = os.getcwd()
if not self.distify_cfg.parallelize_checkpoint_retrieval:
new_stream = list(filter(self.not_done, self.stream))
if self.parallel_backend == 'ray':
pool = RayPool
elif self.parallel_backend == 'mp':
pool = multiprocessing.get_context('spawn').Pool
elif self.parallel_backend == 'mt':
pool = MTPool
else:
pool = SingleProcessPool
result = None
with pool(initializer=self._initialize, initargs=(self.mapper_class.factory, work_dir,
self.mapper_args,
self.distify_cfg.parallelize_checkpoint_retrieval,
self.reducer_class.factory if self.reducer_class is not None else None,
self.reducer_args if self.reducer_class is not None else None,
self.timeout
)) as p:
self._initialize(self.mapper_class.factory, work_dir,
self.mapper_args,
self.distify_cfg.parallelize_checkpoint_retrieval,
self.reducer_class.factory if self.reducer_class is not None else None,
self.reducer_args if self.reducer_class is not None else None,
self.timeout
)
if self.distify_cfg.parallelize_checkpoint_retrieval:
new_stream = self.filter_pool(p, self.not_done_global, self.stream)
# TODO: close connection for all nodes except master
self.con = sqlite3.connect(CHECKPOINT_DB_PATH, check_same_thread=SQL_CHECK_SAME_THREAD)
self.cur = self.con.cursor()
if self.distify_cfg.requires_order:
new_stream = sorted(new_stream)
res = p.imap(self._map_f, new_stream, chunksize=self.distify_cfg.chunksize)
else:
res = p.imap_unordered(self._map_f, new_stream, chunksize=self.distify_cfg.chunksize)
if len(new_stream) != len(self.stream):
self.logger.info(f'Resuming execution from checkpoint {os.getcwd()}')
pbar = tqdm(res, initial=len(self.stream) - len(new_stream), total=len(self.stream))
for idx, e in enumerate(pbar):
if isinstance(e, BaseException):
self.logger.info(f'Error in worker: {str(e)}')
continue
h = e['hash']
result = e['result']
self.cur.execute(f"INSERT INTO elements VALUES ({h}, {h})")
# TODO: reintroduce periodic checkpointing
# if idx % self.log_reduce_frequency == 0:
# TODO: reduction could (should?) be run in parallel
if self.reducer_class is not None:
self.cur.execute(f"SELECT id, value FROM reduce")
current_reduced = self.cur.fetchall()
id_, value = current_reduced[0]
if value is not None:
value = json.loads(value)
reduced, log_message = self._reduce_f(value, [result])
if log_message is not None and idx % self.distify_cfg.log_frequency == 0:
pbar.set_description(log_message)
# TODO: Also log log_message, but only to file, not to console
reduced_dump = json.dumps(reduced)
sql = f''' UPDATE reduce
SET value = '{reduced_dump}'
WHERE id = {id_}'''
self.cur.execute(sql)
self.con.commit()
self.con.close()
if self.reducer_class is not None:
with open('reduced.json', 'w', encoding='utf-8') as f:
json.dump(reduced, f, ensure_ascii=False, indent=4)
return result
@staticmethod
def _map_f(x):
return G.F_MAPPERS(x)
@staticmethod
def _reduce_f(store, values):
return G.F_REDUCERS(store, values)
@staticmethod
def _initialize(mapper_factory, work_dir, mapper_args, parallelize_checkpoint_retrieval,
reducer_factory, reducer_args, timeout):
os.chdir(work_dir) # needed for ray
G.F_MAPPERS = mapper_factory(*mapper_args)
if reducer_factory is not None:
G.F_REDUCERS = reducer_factory(*reducer_args)
if parallelize_checkpoint_retrieval:
G.sql_con = sqlite3.connect(CHECKPOINT_DB_PATH, check_same_thread=SQL_CHECK_SAME_THREAD)
G.sql_cur = G.sql_con.cursor()
G.timeout = timeout
__version__ = '0.3.3'
__all__ = ['Processor', 'Mapper', 'Reducer', '__version__', 'MapperComposer', 'ReducerComposer']
```
#### File: examples/basic/app.py
```python
import os
import hydra
import logging
from omegaconf import DictConfig
from dataclasses import dataclass
from hydra.core.config_store import ConfigStore
from pprint import pformat
from distify import Mapper, Processor, Reducer
log = logging.getLogger(__name__)
@dataclass
class MyMapperConfig:
my_custom_argument: int
my_other_custom_argument: int
@dataclass
class MyReducerConfig:
pass
@dataclass
class MyAppConfig:
mapper: MyMapperConfig
reducer: MyReducerConfig
def register_configs():
cs = ConfigStore.instance()
cs.store(
group="app",
name="my_app",
node=MyAppConfig,
)
register_configs()
class MyMapper(Mapper):
def __init__(self, cfg: MyMapperConfig):
super().__init__()
self.non_pickable_dependency = lambda x: x + cfg.my_custom_argument + cfg.my_other_custom_argument
self.write_path = self.get_unique_path() + '.txt'
self.fd = open(self.write_path, 'a')
def map(self, x):
if x % 10000 == 0:
self.logger.info(f'Hi {x}')
self.fd.write(str(self.non_pickable_dependency(x)) + '\n')
self.fd.flush()
# Returning a value is optional! But if we want to use a Reducer, we should return something
return x
# Reduction is optional
class MyReducer(Reducer):
def __init__(self, cfg: MyReducerConfig):
super().__init__()
self.cfg = cfg
@property
def default_value(self):
return 0
def reduce(self, store, values):
result = store + sum(values)
log_message = f'Reduced so far: {result}'
return result, log_message
@hydra.main(config_path="conf", config_name="base_config")
def main(cfg: DictConfig) -> None:
logging.info(pformat(cfg))
logging.info(os.getcwd())
# Again, reducer_class and reducer_args arguments are optional!
# Stream must be list, not generator
processor = Processor(stream=list(range(0, 20_000)), mapper_class=MyMapper, mapper_args=[cfg.app.mapper],
distify_cfg=cfg.distify, reducer_class=MyReducer, reducer_args=[cfg.app.reducer])
reduced = processor.run()
logging.info('Finished execution correctly')
logging.info(pformat(reduced))
if __name__ == '__main__':
main()
``` |
{
"source": "jordiae/neural-compilers",
"score": 2
} |
#### File: jordiae/neural-compilers/evaluate-io-legacy.py
```python
import os
from dataclasses import dataclass, asdict
from neural_compilers.utils.tokenization import GCC
from typing import *
import logging
import time
import uuid
import git
from bleu import list_bleu
from neural_compilers.utils.tokenization import PygmentsTokenizer
import lizard
import re, itertools
from copy import deepcopy
import sys
from io import StringIO
import contextlib
import json
def print(*args):
logging.info(' '.join([str(arg) for arg in args]))
code_tokenizer = PygmentsTokenizer()
def eval_bleu(ref: str, hyp: str) -> float:
return list_bleu([ref], [hyp], detok=False)
JUST_FUNC = True
BAD_CASES = []
BAD_EXAMPLES = {}
@dataclass
class FuncParameter:
type_name: str
var_name: str
@dataclass
class Signature:
return_type: str
func_name: str
parameters: List[FuncParameter]
@dataclass
class Example:
inp: List[str]
out: str
@dataclass
class ExampleList:
signature: Signature
examples: List[Example]
@dataclass
class Array:
var_name: str
size: int
@dataclass
class Props:
output: List[str]
arrays: List[Array]
# From IO-EMBEDDINGS repo
def parse_file(path: str) -> Tuple[Signature, Example]:
with open(path, 'r') as f:
lines = f.readlines()
# added hack for avoiding comments, macros, empty lines. TODO: review, improve
lines = [line for line in lines if not line.startswith('//') and not line.startswith('#') and len(line.split()) > 0]
signature = lines[0]
signature_split = signature.split()
return_type = signature_split[0]
func_name = signature_split[1].split('(')[0]
parameters = signature[signature.find('(') + 1:signature.find(')')]
parsed_parameters = []
for parameter in parameters.split(','):
pointer = False
if parameter.count('**') > 1:
raise RuntimeError(parameter)
if '*' in parameter:
parameter = parameter.replace('*', '')
pointer = True
parameter = ' '.join(parameter.split())
param_type, param_name = parameter.split()
if pointer:
param_type += '*'
parsed_parameters.append(FuncParameter(type_name=param_type, var_name=param_name))
parsed_signature = Signature(return_type=return_type, func_name=func_name, parameters=parsed_parameters)
parsed_example = None
return parsed_signature, parsed_example
def get_single_scanf(parameter: FuncParameter, declare: bool = True) -> str:
scanf = []
if parameter.type_name in ['int', 'bool']:
if declare:
scanf.append(f' int {parameter.var_name};')
scanf.append(f' scanf("%d", &{parameter.var_name});')
elif parameter.type_name == 'float':
if declare:
scanf.append(f' float {parameter.var_name};')
scanf.append(f' scanf("%f", &{parameter.var_name});')
elif parameter.type_name == 'char':
if declare:
scanf.append(f' char {parameter.var_name};')
scanf.append(f' scanf("%c", &{parameter.var_name});')
else:
raise NotImplementedError(parameter.type_name)
return '\n'.join(scanf)
def infer_size_from_code_or_examples(func_code: str, parameter: FuncParameter, examples) -> str:
for line in func_code.splitlines():
if 'for' in line:
if parameter.var_name in line:
before, _, after = line.partition('<')
return after.split()[0].replace(';', '')
raise RuntimeError('Cannot infer size from code')
# reverse_scalars: in simpl, scalars seem to be in reverse order
def get_scanf(signature: Signature, props: Props, func_code: str, reverse_scalars: bool = True) -> str:
# hack to have n before arrays of size n
scalar_scanfs = []
array_scanfs = []
for parameter in signature.parameters:
if parameter.type_name.count('*') > 1:
raise NotImplementedError(parameter.type_name)
elif parameter.type_name.count('*') == 0:
scalar_scanfs.append(get_single_scanf(parameter))
else: # == 1
size = None
for array in props.arrays:
if array.var_name == parameter.var_name:
size = array.size
break
scalar_type = parameter.type_name.replace('*', '')
element = FuncParameter(type_name=scalar_type, var_name=parameter.var_name+'[idx]')
single_scanf = get_single_scanf(element, declare=False)
array_scanfs.append(f' {parameter.type_name} {parameter.var_name};')
array_scanfs.append(f" {parameter.var_name} = ({parameter.type_name}) malloc({size}*sizeof({parameter.type_name.replace('*','')}));")
array_scanfs.append(' int idx;')
array_scanfs.append(f' for (idx = 0; idx < {size}; idx++) ' + '{')
array_scanfs.append(' ' + single_scanf)
array_scanfs.append(' }')
if len(scalar_scanfs) > 1 and reverse_scalars:
scalar_scanfs.reverse()
scanf = scalar_scanfs + array_scanfs
return '\n'.join(scanf) + '\n'
def get_function_call(signature: Signature) -> str:
res = ''
if signature.return_type != 'void':
res = f' {signature.return_type} res;\n'
res += ' res = '
res += signature.func_name + '(' + ' '.join([par.var_name + ',' for par in signature.parameters])
if len(signature.parameters) > 0:
res = res[:-1]
res += ');\n'
return res
def get_single_printf(type_: str, var_name: str, trailing_space: bool = False) -> str:
space = ' ' if trailing_space else ''
if type_ in ['int', 'bool']: # TODO: check bool
return f' printf("{space}%d", {var_name});'
elif type_ == 'float':
return f' printf("{space}%f", {var_name});'
elif type_ == 'char':
return f' printf({space}"%c", {var_name});'
else:
raise NotImplementedError(type_)
def print_newline() -> str:
return ' printf("\\n");'
def get_printf(signature: Signature, props: Props) -> str:
props = deepcopy(props)
if signature.return_type == 'void':
done_output = set([])
printf = []
for i in range(len(props.output)):
result_type = None
var_name = None
for parameter in signature.parameters:
# if parameter.var_name == props.output:
if parameter.var_name in props.output and parameter.var_name not in done_output:
result_type = parameter.type_name.replace('*', '')
var_name = parameter.var_name
break
size = None
for array in props.arrays:
# if array.var_name == props.output:
if array.var_name in props.output and array.var_name not in done_output:
size = array.size
assert var_name == array.var_name
break
if size is None:
pass
done_output.add(var_name)
printf.append(' int idx;')
printf.append(' int first = 0;')
printf.append(f' for (idx = 0; idx < {size}; idx++) ' + '{')
printf.append(' if (first) {')
printf.append(' ' + get_single_printf(result_type, var_name=var_name+'[idx]'))
printf.append(' first = 0;')
printf.append(' }')
printf.append(' else {')
printf.append(' ' + get_single_printf(result_type, var_name=var_name+'[idx]', trailing_space=True))
printf.append(' }')
printf.append(' }')
printf = '\n'.join(printf) + '\n' + print_newline() + '\n'
else:
printf = get_single_printf(signature.return_type, var_name='res') + '\n' + print_newline() + '\n'
return printf
def parse_props(props_str: str) -> Props:#, signature: Signature):
# signature could be parsed from parse but we already have it
if props_str.startswith('void') or props_str.count('output') > 0:
result = []
for l in props_str.splitlines():
if l.startswith('output'):
result.append(l.split()[1])
if len(result) == 0:
print('WARNING: Props output not found, using the only array instead')
for l in props_str.splitlines()[1:]:
_, var_name, size = l.split()
var_name = var_name[:-1]
result = [var_name]
break
else:
result = ['res']
arrays = []
for l in props_str.splitlines()[1:]:
if l.startswith('output'):
continue
_, var_name, size = l.split()
var_name = var_name[:-1]
array = Array(var_name=var_name, size=size)
arrays.append(array)
props = Props(output=result, arrays=arrays)
return props
def contains_array(code: str) -> bool:
for s in ['int*', 'char*', 'float*', 'int *', 'char *', 'float *']:
if code.count(s) > 0:
return True
return False
def signature2standalone(signature: Signature, function_code: str, props: str, examples) -> Tuple[str,str]:
##### c_imp
# props is only used if the return type is void, then we need to know which is the "result" of the function (side-effect)
malloc_lib = '#include <stdlib.h>' if contains_array(function_code) else ''
c_imp = f"#include <stdio.h>\n{malloc_lib}\n" + function_code + '\n'
c_imp += '#include <math.h>\n#include <stdbool.h>\n'
c_imp += 'int main() {\n'
parsed_props = parse_props(props)
scanf = get_scanf(signature, parsed_props, func_code=function_code)
# print(scanf)
c_imp += scanf
function_call = get_function_call(signature)
c_imp += ' ' + function_call
printf = get_printf(signature, props=parsed_props)
c_imp += printf
c_imp += '\n return 0;\n}\n'
c_imp = c_imp.replace('None', 'n')
# Force single declaration of idx
before_first_idx, first_idx, after_first_idx = c_imp.partition('int idx;')
c_imp = before_first_idx + first_idx + after_first_idx.replace('int idx;', '')
#print(c_imp)
def get_function_signature_string(f):
s = ''
for line in f.splitlines():
if line.startswith('#'):
continue
if len(line.split()) == 0:
continue
else:
s = line.strip().replace('{', '')
break
return s
#### main_code (external)
# props is only used if the return type is void, then we need to know which is the "result" of the function (side-effect)
malloc_lib = '#include <stdlib.h>' if contains_array(function_code) else ''
main_code = f"#include <stdio.h>\n{malloc_lib}\n"
main_code += '#include <math.h>\n#include <stdbool.h>\n'
main_code += f'extern {get_function_signature_string(function_code)};\n'
main_code += 'int main() {\n'
props = parse_props(props)
scanf = get_scanf(signature, props, func_code=function_code)
main_code += scanf
function_call = get_function_call(signature)
main_code += ' ' + function_call
printf = get_printf(signature, props=props)
main_code += printf
main_code += '\n return 0;\n}\n'
# Force single declaration of idx
before_first_idx, first_idx, after_first_idx = main_code.partition('int idx;')
main_code = before_first_idx + first_idx + after_first_idx.replace('int idx;', '')
main_code = main_code.replace('None', 'n')
return c_imp, main_code
@contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
def run_python_script(name: str, path: str) -> str:
previous_dir = os.getcwd()
os.chdir(path)
with stdoutIO() as s:
try:
exec(open(name).read())
except BaseException as e:
pass
os.chdir(previous_dir)
# 1 - min_so_far_subtracted etc have errors in the L2 files.
# 2- integers must come first to guarantee that array sizes are initialized before arrays. however, in the examples these is not respected
# so we reorder
# so, scalars (in order) then arrays (in order)
# assume only input is affected
def get_examples(example_path: str, use_simpl_instead_of_L2: bool, scalars_first: bool, signature: Signature) -> List[Tuple[str, str]]:
if use_simpl_instead_of_L2:
with open(os.path.join(example_path, 'simpl'), 'r') as f:
simpl = f.read()
data = simpl2json(simpl, signature=signature)
simpl_header = [l for l in simpl.split('\n') if "fun" in l][0]
diff_num_parameters = len(simpl_header.replace('fun', '').replace('->','').split()) - len(signature.parameters)
if diff_num_parameters != 0:
# simpl includes length of output
if diff_num_parameters == 1:
import re
for i in range(len((data['contents']['examples']))):
data['contents']['examples'][i] = re.sub(' -?\d+\)',')', data['contents']['examples'][i])
else:
# simpl includes length of output
if signature.return_type == 'void':
import re
for i in range(len((data['contents']['examples']))):
data['contents']['examples'][i] = re.sub(' -?\d+\)', ')', data['contents']['examples'][i])
# simpl includes length for each array, even if according to the c implementation they are equal
import re
for i in range(len((data['contents']['examples']))):
# remove extra Ns
c = itertools.count()
data['contents']['examples'][i] = re.sub('\] -?\d+ \[', lambda x: x.group() if not next(c) else '] [', data['contents']['examples'][i])
data['contents']['examples'][i] = re.sub(r"(\]) (-?\d+) (-?\d+) (-?\d+) (\[)", r"\1 \3 \5", data['contents']['examples'][i])
else:
with open(os.path.join(example_path, 'L2'), 'r') as f:
data = json.load(f)
parsed_examples = []
for example in data['contents']['examples']:
# "(f 1) -> 1", "(f 4) -> 36", "(f 4) -> 36", "(f 1) -> 1"
inp, _, out = example.partition('->')
inp = ' '.join(inp.strip()[2:-1].split())
out = ' '.join(out.split())
def parse(text):
parsed = ''
i = 0
in_array = False
while i < len(text):
current_char = text[i]
if len(current_char.split()) == 0:
if in_array:
parsed += ' '
else:
parsed += '\n'
elif current_char == '[':
parsed += '\n'
in_array = True
elif current_char == ']':
parsed += '\n'
in_array = False
else:
parsed += current_char
i += 1
return parsed
def parse_scalars_first(text):
parsed_scalars = ''
parsed_arrays = ''
i = 0
in_array = False
while i < len(text):
current_char = text[i]
if len(current_char.split()) == 0:
if in_array:
parsed_arrays += ' '
else:
parsed_scalars += '\n'
elif current_char == '[':
parsed_arrays += '\n'
in_array = True
elif current_char == ']':
parsed_arrays += '\n'
in_array = False
else:
if in_array:
parsed_arrays += current_char
else:
parsed_scalars += current_char
i += 1
return parsed_scalars + parsed_arrays
if scalars_first:
parsed_inp = parse_scalars_first(inp)
else:
parsed_inp = parse(inp)
parsed_out = parse(out)
parsed_examples.append((parsed_inp, parsed_out))
return parsed_examples
def get_asm_header_footer_body(asm_path: str) -> Tuple[str, str, str]:
with open(asm_path, 'r') as f:
asm = f.readlines()
header = ''
for line in asm:
if ':' in line:
break
header += line
with open(asm_path, 'r') as f:
asm = f.read()
body, _, footer = asm.partition('.cfi_endproc')
body = body[len(header):]
return header, footer, body
def simpl2json(simpl: str, signature: Signature) -> Dict:
'''
Examples
Examples
{6,0,4,8,7,6,4,7,5,9,3,8,2,4},14,{2,1,9,4,8,9,2,4,1,1,10,5,7,8},14,{0,0,0,0,0,0,0,0,0,0,0,0,0,0},14 -> {-1,0,-8,-3,-7,-8,-1,-3,0,0,-9,-4,-6,-7};
{5,6,5,9},4,{10,3,8,7},4,{0,0,0,0},4 -> {-9,-2,-7,-6};
{8,4,0,8,0,1,6,10,10,0,9,7,5,3,5,1},16,{3,9,3,3,2,8,7,1,1,5,8,7,1,4,8,4},16,{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0},16 -> {-2,-8,-2,-2,-1,-7,-6,0,0,-4,-7,-6,0,-3,-7,-3};
{8,5,8,3},4,{9,8,9,4},4,{0,0,0,0},4 -> {-8,-7,-8,-3};
{1,9,6,5,9,3,4,2,3,2,0,9,10,4,7,1},16,{1,10,2,2,0,1,8,10,6,8,4,8,3,3,10,9},16,{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0},16 -> {0,-9,-1,-1,0,0,-7,-9,-5,-7,-3,-7,-2,-2,-9,-8};
{9,4,7,7,10,10,5,1,5,9,1,7,9,10},14,{5,3,3,0,4,1,3,5,2,5,6,0,1,2},14,{0,0,0,0,0,0,0,0,0,0,0,0,0,0},14 -> {-4,-2,-2,0,-3,0,-2,-4,-1,-4,-5,0,0,-1};
{0,9,10,8,9,10,1,0},8,{1,10,3,9,9,1,6,1},8,{0,0,0,0,0,0,0,0},8 -> {0,-9,-2,-8,-8,0,-5,0};
{1,0,9,0,3,2,1,7,3,0,10,0},12,{8,6,9,1,4,1,3,1,10,4,5,6},12,{0,0,0,0,0,0,0,0,0,0,0,0},12 -> {-7,-5,-8,0,-3,0,-2,0,-9,-3,-4,-5};
{0,8,7,0,9,1},6,{6,3,4,5,7,9},6,{0,0,0,0,0,0},6 -> {-5,-2,-3,-4,-6,-8};
["(f [] []) -> []", "(f [6 0 4 8 7 6 4 7 5 9 3 8 2 4] [2 1 9 4 8 9 2 4 1 1 10 5 7 8]) -> [-1 0 -8 -3 -7 -8 -1 -3 0 0 -9 -4 -6 -7]",
'''
L2_examples = []
lines = simpl.splitlines()[1:]
simpl_header = [l for l in simpl.split('\n') if "fun" in l][0]
for line in lines:
if len(line.split()) == 0:
break
L2 = line.strip().replace('{', '[').replace('}', ']').replace(',', ' ').replace(';', '')
L2 = '(f ' + L2.replace(' ->', ') ->')
L2_examples.append(L2)
# hack to have n before arrays of size n!
diff_num_parameters = len(simpl_header.replace('fun', '').replace('->','').split()) - len(signature.parameters)
if diff_num_parameters != 0:
# simpl includes length of output
pass
return {'contents': {'examples': L2_examples}}
def run_io(c_code: str, example_path: str, just_func_code: str, main_code: str, signature: Signature, predictions_path: str, use_simpl_instead_of_L2: bool) -> Tuple[bool, bool, bool, float]:
try:
examples = get_examples(example_path, use_simpl_instead_of_L2=use_simpl_instead_of_L2, scalars_first=True, signature=signature)
except FileNotFoundError as e:
return False, False, False, 0.0 # benchmark ok, model ok, syntax model ok, BLEU model
func_name = example_path.split(os.sep)[-1]
dir_ = os.path.join(predictions_path, func_name)
# Run with gcc:sc
from neural_compilers.utils.utilities import get_tmp_file, run_command
tmp_c = get_tmp_file(c_code, extension='.c', dir=dir_)
output = tmp_c[:-2] + '.x'
stdout, stderr = run_command(f'gcc -O0 -x c -o {output} {tmp_c}')
print(stderr)
gcc_corrects = 0
bad_examples_in_benchmark = 0
for idx, (in_, out_) in enumerate(examples):
if os.path.basename(example_path) in BAD_EXAMPLES and idx in BAD_EXAMPLES[os.path.basename(example_path)]:
bad_examples_in_benchmark += 1
continue
# stdout, stderr = run_command(f'./{output} {tmp_c}', stdin=
prefix_ex = './' if not os.path.isabs(output) else ''
try:
stdout, stderr = run_command(f'{prefix_ex}{output}', stdin=in_)
except FileNotFoundError as e:
return False, False, False, 0.0
if stdout.strip() == out_.strip():
gcc_corrects += 1
print(example_path, 'GCC: ', f'{gcc_corrects}/{len(examples) - bad_examples_in_benchmark}')
print()
os.remove(tmp_c)
os.remove(output)
# first compile with gcc to get header and footer
tmp_c = get_tmp_file(just_func_code, extension='.c', dir=dir_)
output = tmp_c[:-2] + '.s'
stdout, stderr = run_command(f'gcc -O0 -c -S -o {output} {tmp_c}')
asm_header, asm_footer, asm_body = get_asm_header_footer_body(output)
func_name = example_path.split(os.sep)[-1]
import glob
max_model_corrects = 0
best_bleu = 0.0
best_syntax = False
for idx, hypothesis in reversed(list(enumerate(sorted(glob.glob(os.path.join(predictions_path, func_name, f'{func_name}*.s')))))):
print('Hypothesis:', idx+1)
print(hypothesis)
hypothesis_ = open(hypothesis).read()
model_assembly = asm_header + hypothesis_ + asm_footer
tmp_s = get_tmp_file(model_assembly, extension='.s', dir=dir_)
output = tmp_c[:-2] + '.x'
main_ = get_tmp_file(main_code, extension='.c', dir=dir_)
stdout, stderr = run_command(f'gcc -O0 -o {output} {main_} {tmp_s}')
model_corrects = 0
bad_examples_in_benchmark = 0
not_compiled = False
for idx, (in_, out_) in enumerate(examples):
if os.path.basename(example_path) in BAD_EXAMPLES and idx in BAD_EXAMPLES[os.path.basename(example_path)]:
bad_examples_in_benchmark += 1
continue
try:
prefix_ex = './' if not os.path.isabs(output) else ''
stdout, stderr = run_command(f'{prefix_ex}{output}', stdin=in_, timeout=5)
if stdout.strip() == out_:
model_corrects += 1
except BaseException as e:
if isinstance(e, TimeoutError):
break
not_compiled = True
break
ref_tok = ' '.join(code_tokenizer.tokenize(programs=asm_body, lang='asm'))
hyp_tok = hypothesis_.replace('\n', '<newline>')
bleu_score = eval_bleu(ref=ref_tok, hyp=hyp_tok)
print('BLEU =', bleu_score)
if bleu_score > best_bleu:
best_bleu = bleu_score
print('SYNTAX', 'INCORRECT' if not_compiled else 'CORRECT')
if not not_compiled:
if not best_syntax:
best_syntax = True
print(example_path, 'IO: ', f'{model_corrects}/{len(examples) - bad_examples_in_benchmark}')
else:
print(example_path, 'IO: N/A (Error:', stderr, ')')
print()
if not not_compiled:
os.remove(tmp_s)
os.remove(output)
if model_corrects > max_model_corrects:
max_model_corrects = model_corrects
return gcc_corrects == len(examples), max_model_corrects == gcc_corrects and gcc_corrects > 0, best_syntax, best_bleu
def run(synthesis_eval_path: str, predictions_path: str):
standaloner_code_works = 0
total = 0
none_in_code = 0
benchmark_oks = 0
model_oks = 0
syntax_oks = 0
bleu = 0.0
for idx, example in enumerate(sorted(os.listdir(os.path.join(synthesis_eval_path, 'examples')))):
example_path = os.path.join(synthesis_eval_path, 'examples', example)
if example.startswith('__') or not os.path.isdir(example_path) or example in BAD_CASES:
continue
total += 1
c_path = os.path.join(example_path, 'ref.c')
parsed_signature, _ = parse_file(c_path)
with open(c_path, 'r') as c:
c_code = c.read()
props_path = os.path.join(example_path, 'props')
with open(props_path, 'r') as p:
props = p.read()
try:
c_imp, main_code = signature2standalone(parsed_signature, c_code, props, examples=get_examples(example_path, use_simpl_instead_of_L2=True, scalars_first=True, signature=parsed_signature))
if c_imp.count('None') > 0:
none_in_code += 1
gcc = GCC(print_stderr=False)
if len(gcc.compile(c_imp).splitlines()) > 1:
standaloner_code_works += 1
print('-------------------')
benchmark_ok, model_ok, best_syntax, best_bleu = run_io(c_imp, example_path, just_func_code=c_code, main_code=main_code, signature=parsed_signature, predictions_path=predictions_path, use_simpl_instead_of_L2=True)
if not benchmark_ok:
benchmark_ok, model_ok, best_syntax, best_bleu = run_io(c_imp, example_path, just_func_code=c_code, main_code=main_code, signature=parsed_signature,
predictions_path=predictions_path, use_simpl_instead_of_L2=False)
if benchmark_ok:
benchmark_oks += benchmark_ok
model_oks += model_ok
syntax_oks += best_syntax
bleu += best_bleu
def str_ok(b):
return "OK" if b else "NOT OK"
print('Benchmark OK!')
complexity_ref = lizard.analyze_file(c_path).__dict__['function_list'][0].__dict__
cyclomatic = complexity_ref['cyclomatic_complexity']
nloc = complexity_ref['nloc']
tokens = complexity_ref['token_count']
params = len(complexity_ref['parameters'])
pointers = complexity_ref['long_name'].count('*')
print(f'{example}: IO = {str_ok(model_ok)} | SYNTAX = {str_ok(best_syntax)} | BLEU = {best_bleu}'
f' | C_NLOC = {nloc} | C_TOKENS = {tokens} | C_CYCLO = {cyclomatic} | PARAMS = {params} | POINTERS = {pointers}')
else:
print('Benchmark NOT OK!')
except (NotImplementedError, FileNotFoundError) as e:
print('Benchmark NOT OK!')
print('\nBenchmark ok:', benchmark_oks, 'of', total)
print('IO ok:', model_oks, 'of', benchmark_oks)
print('Syntax ok:', syntax_oks, 'of', benchmark_oks)
print('Avg BLEU:', bleu/benchmark_oks)
if __name__ == '__main__':
import argparse
from neural_compilers.utils.utilities import init_logging
import os
parser = argparse.ArgumentParser('IO Evaluator')
parser.add_argument('--synthesis-eval-path', type=str)
parser.add_argument('--predictions-path', type=str)
args = parser.parse_args()
# Set up logging etc
timestamp = time.strftime("%Y-%m-%d-%H%M")
repo = git.Repo(search_parent_directories=True)
sha = repo.head.object.hexsha
extra_id = uuid.uuid4().hex
name = f'eval-io-legacy-{timestamp}-{sha[:4]}-{extra_id[:4]}'
eval_path = os.path.join(os.path.dirname(args.predictions_path), name)
os.mkdir(eval_path)
init_logging(os.path.join(eval_path, name + '.log'))
print(args)
run(synthesis_eval_path=args.synthesis_eval_path, predictions_path=args.predictions_path)
print(os.path.join(eval_path, name + '.log'))
```
#### File: neural_compilers/utils/utilities.py
```python
import uuid
from typing import Tuple
import subprocess
import os
import numpy as np
import random
import logging
import time
from typing import Callable, Any, Optional
import os
def get_tmp_file(content: str, extension: str = '', dir: str ='') -> str:
filename = os.path.join(dir, uuid.uuid4().hex + extension)
with open(filename, 'w') as f:
f.write(content)
return filename
def get_tmp_path() -> str:
filename = uuid.uuid4().hex
return filename
def run_command(command: str, stdin: Optional[str] = None, timeout: Optional[int] = None) -> Tuple[str, str]:
output = subprocess.run(command.split(), capture_output=True, text=True, input=stdin, timeout=timeout)
return output.stdout, output.stderr
def deterministic(seed: int):
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed) # Numpy module.
random.seed(seed) # Python random module.
def timeit(func: Callable) -> Any:
def wrapped(*args, **kwargs):
func_name = func.__name__
logging.info(f'Running {func_name}')
t0 = time.time()
res = func(*args, **kwargs)
t1 = time.time()
logging.info(f'Run {func_name} in {t1-t0}s')
return res
return wrapped
def init_logging(name: str):
logging.basicConfig(filename=name, level=logging.INFO)
logging.getLogger('').addHandler(logging.StreamHandler())
``` |
{
"source": "jordiae/pipel",
"score": 3
} |
#### File: pipel/examples/example.py
```python
from pipel import CollectingPipeline as Pipeline
from pipel import PipelineLogger
import os
import filecmp
import time
import string
from typing import Iterable
from typing import Generator
from typing import cast
from typing import List
import multiprocessing
import logging
import shutil
DATA_DIR = 'data'
RES_DIR = 'res'
os.chdir(os.path.dirname(os.path.abspath(__file__)))
def prepare_example(data_dir: str):
os.makedirs(data_dir)
i = 0
for c in string.ascii_lowercase:
path = os.path.join(data_dir, c + '.txt')
with open(path, 'w') as f:
for j in range(1000):
f.write(str(i + j) + '\n')
i += j
def stream(c: string, data_dir: str, cost: int):
path = os.path.join(data_dir, c + '.txt')
with open(path, 'r') as f:
for i in f.readlines():
time.sleep(cost * 0.0001)
yield int(i.strip())
def streams(cost: int = 0) -> List[Iterable[int]]:
data_dir = DATA_DIR
s = []
for c in string.ascii_lowercase:
s.append(stream(c, data_dir, cost))
return s
def input_list(cost: int = 0) -> Iterable[int]:
data_dir = DATA_DIR
res = []
for c in string.ascii_lowercase:
path = os.path.join(data_dir, c + '.txt')
with open(path, 'r') as f:
for i in f.readlines():
time.sleep(cost * 0.0001)
res.append(int(i.strip()))
return res
def p1(i: int) -> int:
cost = 1
time.sleep(cost * 0.0001)
return i
class P2:
def __init__(self, cost: int, logger: PipelineLogger = None):
self.cost = cost
self.logger = logger
self.non_pickable_dependency = open('non_pickable_dependency.txt', 'r')
def __call__(self, i: int) -> int:
time.sleep(self.cost * 0.0001)
if self.logger is not None and i % 10000 == 0:
self.logger.info(f'{self.__class__.__name__}: processed the number {i}')
return i
def p3(i: int) -> int:
cost = 3
time.sleep(cost * 0.0001)
return i
class Output:
def __init__(self, file: str, cost: int):
self.fd = open(file, 'w')
self.cost = cost
def __call__(self, res: Iterable[int]):
for e in res:
time.sleep(self.cost * 0.0001)
self.fd.write(str(e) + '\n')
self.fd.flush()
def __del__(self):
self.fd.close()
def init_logger() -> logging.Logger:
logging.basicConfig(filename='pipe.log', level=logging.INFO)
logger = logging.getLogger('')
logger.addHandler(logging.StreamHandler())
return logger
def main():
if not os.path.exists(DATA_DIR):
os.makedirs(RES_DIR)
prepare_example(DATA_DIR)
for case in ['Equal', 'IO-bound', 'CPU-bound']:
print(case)
if case == 'Equal':
io_cost = 2
cpu_cost = 0
elif case == 'IO-bound':
io_cost = 10
cpu_cost = 0
else:
io_cost = 0
cpu_cost = 2
t0_input = time.time()
res = input_list(io_cost)
t1_input = time.time()
t0_cpu = time.time()
p2 = P2(cpu_cost)
res = [p3(p2(p1(e))) for e in res]
t1_cpu = time.time()
t0_output = time.time()
out = Output(os.path.join(RES_DIR, 'seq.txt'), io_cost)
out(res)
t1_output = time.time()
io = t1_input - t0_input + t1_output - t0_output
cpu = t1_cpu - t0_cpu
total = io + cpu
print('Vanilla Sequential')
print(f'IO: {io}s ({100*io/total:.2f}%)')
print(f'CPU: {cpu}s ({100*cpu/total:.2f}%)')
print(f'Total {total:.2f}s')
print()
t0_pipeseq = time.time()
pipeline = Pipeline(streamers=[cast(Generator, g) for g in streams(io_cost)],
mappers_factory=lambda: [p1, P2(cpu_cost), p3],
output_reducer=Output(os.path.join(RES_DIR, 'pipeseq.txt'), io_cost), batch_size=1000,
parallel=False)
pipeline.run()
t1_pipeseq = time.time()
correct = filecmp.cmp(os.path.join(RES_DIR, 'seq.txt'), os.path.join(RES_DIR, 'pipeseq.txt'))
print(f'pipelib sequential: {t1_pipeseq - t0_pipeseq:.2f}s')
print(f'pipelib sequential is {"CORRECT" if correct else "INCORRECT"}')
print()
t0_pipepar = time.time()
pipeline = Pipeline(streamers=[cast(Generator, g) for g in streams(io_cost)],
mappers_factory=lambda: [p1, P2(cpu_cost), p3],
output_reducer=Output(os.path.join(RES_DIR, 'pipepar.txt'), io_cost), batch_size=1000,
parallel=True)
pipeline.run()
t1_pipepar = time.time()
correct = filecmp.cmp(os.path.join(RES_DIR, 'seq.txt'), os.path.join(RES_DIR, 'pipepar.txt'))
print(f'pipelib parallel with {multiprocessing.cpu_count()} cores: {t1_pipepar - t0_pipepar:.2f}s')
print(f'pipelib parallel is {"CORRECT" if correct else "INCORRECT"}')
print()
print('Example with logs for the pipeline and P2:')
io_cost = 2
cpu_cost = 0
logger = init_logger()
logger = PipelineLogger(logger)
pipeline = Pipeline(streamers=[cast(Generator, g) for g in streams(io_cost)],
mappers_factory=lambda: [p1, P2(cpu_cost), p3],
output_reducer=Output(os.path.join(RES_DIR, 'pipeseq.txt'), io_cost), batch_size=1000,
parallel=True, logger=logger, log_every_iter=1)
pipeline.run()
shutil.rmtree(DATA_DIR)
shutil.rmtree(RES_DIR)
if __name__ == '__main__':
main()
``` |
{
"source": "jordiahl/Video-Streaming-Google-Home",
"score": 3
} |
#### File: Video-Streaming-Google-Home/streamingProjectorGoogleHome/main.py
```python
from flask import Flask
from flask import request
from server.middleware import middleware
mid = middleware()
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def root():
if request.method == "GET":
return mid.get_root(request)
if request.method == "POST":
return mid.post_root(request)
@app.route("/google-api", methods=["POST"])
def google_api():
if request.method == "POST":
return mid.post_google_api(request)
if __name__ == "__main__":
app.run()
```
#### File: streamingProjectorGoogleHome/server/IP_address.py
```python
from requests import get
import requests
class IP_adress:
def get_external_IP_adress(self):
external_IP_address = get('https://ipapi.co/ip/').text
print(external_IP_address)
return external_IP_address
def send_IP_address(self):
# defining the api-endpoint
API_ENDPOINT = "http://pastebin.com/api/api_post.php"
# your API key here
API_KEY = "<KEY>"
# data to be sent to api
data = {'api_dev_key':API_KEY,
'api_option':'paste',
'api_paste_code':get_external_IP_adress(),
'api_paste_format':'python'}
r = requests.post(url = API_ENDPOINT, data = data)
# extracting response text
pastebin_url = r.text
print("The pastebin URL is:%s"%pastebin_url)
IP_adress().get_external_IP_adress()
```
#### File: streamingServices/youtube/youtube_web_scraper.py
```python
from selenium import webdriver
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from streamingServices.youtube.element_has_tag import element_has_tag
from streamingServices.youtube.youtube_scraping_IDs import scraping_IDs
# from element_has_tag import element_has_tag
# from youtube_scraping_IDs import scraping_IDs
class youtube_web_scraper:
def __init__(self):
self.driver = webdriver.Chrome()
self.yID = scraping_IDs()
# self.driver.get("http://www.youtube.com")
# self.search("hello")
# self.select_ideal_video_from_list(0)
def search (self, search_string):
self.search_string = search_string
self.begin_search(search_string)
def begin_search(self, search_string):
button_element = self.driver.find_element_by_id(self.yID.search_button_element_id)
search_bar_element = self.driver.find_element_by_css_selector(self.yID.search_bar_css)
search_bar_element.send_keys(search_string) # set Text on search bar
button_element.click() # click search
def wait_for_succesful_search(self):
wait = WebDriverWait(self.driver, 10)
wait.until(element_has_tag(self.yID.video_list_tag))
def select_ideal_video_from_list(self, driver, list_index = 0):
self.wait_for_succesful_search()
video_list_elements = self.driver.find_elements_by_tag_name(self.yID.video_list_tag)
video_list_elements[list_index].click()
``` |
{
"source": "jordiandreu/taurus_pyqtgraph",
"score": 2
} |
#### File: taurus_pyqtgraph/taurus_pyqtgraph/autopantool.py
```python
__all__ = ["XAutoPanTool"]
from taurus.external.qt import QtGui, QtCore
class XAutoPanTool(QtGui.QAction):
"""
A tool that provides the "AutoPan" for the X axis of a plot
(aka "oscilloscope mode"). It is implemented as an Action, and provides a
method to attach it to a :class:`pyqtgraph.PlotItem`
"""
def __init__(self, parent=None):
QtGui.QAction.__init__(self, "Fixed range scale", parent)
self.setCheckable(True)
self.toggled.connect(self._onToggled)
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self.updateRange)
self._originalXAutoRange = None
self._viewBox = None
self._plotItem = None
self._XactionMenu = None
self._scrollStep = 0.2
def attachToPlotItem(self, plot_item):
"""Use this method to add this tool to a plot
:param plot_item: (PlotItem)
"""
self._plotItem = plot_item
self._viewBox = plot_item.getViewBox()
self._addToMenu(self._viewBox.menu)
self._originalXAutoRange = self._viewBox.autoRangeEnabled()[0]
self._viewBox.sigXRangeChanged.connect(self._onXRangeChanged)
def _addToMenu(self, menu):
for m in menu.axes:
if m.title() == "X Axis":
x_menu = m
self._XactionMenu = x_menu.actions()[0]
x_menu.insertAction(self._XactionMenu, self)
self.setParent(menu)
def _onToggled(self, checked):
if checked:
self._originalXAutoRange = self._viewBox.autoRangeEnabled()[0]
self._viewBox.enableAutoRange(x=False)
axisXrange = self._viewBox.state["viewRange"][0]
x_range = axisXrange[1] - axisXrange[0]
t = int(x_range / 10.0) * 1000
t = min(3000, t)
t = max(50, t)
self._timer.start(t)
else:
self._timer.stop()
self._viewBox.enableAutoRange(x=self._originalXAutoRange)
self._XactionMenu.setEnabled(not checked)
def _onXRangeChanged(self):
self.setChecked(False)
def updateRange(self):
"""Pans the x axis (change the viewbox range maintaining width but
ensuring that the right-most point is shown
"""
dataItems = self._plotItem.listDataItems()
vbs = set()
for item in dataItems:
vbs.add(item.getViewBox())
if len(dataItems) < 1:
self._timer.stop()
# find the largest X axis bound among all viewboxes
boundMax = float("-inf")
for vb in vbs:
cb = vb.childrenBounds()
if cb is not None and cb[0] is not None and cb[0][1] > boundMax:
boundMax = cb[0][1]
if boundMax == float("-inf"):
return
axis_X_range, _ = self._viewBox.state["viewRange"]
x_range = axis_X_range[1] - axis_X_range[0]
if boundMax > axis_X_range[1] or boundMax < axis_X_range[0]:
x_min = boundMax - axis_X_range[1]
x_max = boundMax - axis_X_range[0]
step = min(max(x_range * self._scrollStep, x_min), x_max)
self._viewBox.sigXRangeChanged.disconnect(self._onXRangeChanged)
self._viewBox.setXRange(
axis_X_range[0] + step,
axis_X_range[1] + step,
padding=0.0,
update=False,
)
self._viewBox.sigXRangeChanged.connect(self._onXRangeChanged)
```
#### File: taurus_pyqtgraph/taurus_pyqtgraph/legendtool.py
```python
__all__ = ["PlotLegendTool"]
from taurus.external.qt import QtGui
from taurus.qt.qtcore.configuration import BaseConfigurableClass
import pyqtgraph as pg
class PlotLegendTool(QtGui.QWidgetAction, BaseConfigurableClass):
"""
This tool adds a legend to the PlotItem to which it is attached, and it
inserts a checkable menu action for showing/hiding the legend.
Implementation note: this is implemented as a QWidgetAction+QCheckBox
instead of a checkable QAction to avoid closing the menu when toggling it
"""
def __init__(self, parent=None, **kwargs):
BaseConfigurableClass.__init__(self)
QtGui.QWidgetAction.__init__(self, parent)
self._cb = QtGui.QCheckBox()
self._cb.setText("Show legend")
self.setDefaultWidget(self._cb)
self.registerConfigProperty(
self._cb.isChecked, self._cb.setChecked, "checked"
)
# TODO: register config prop for legend position
self._cb.toggled.connect(self._onToggled)
self._legend = None
self._legend_kwargs = kwargs
def attachToPlotItem(self, plotItem):
"""
Use this method to add this tool to a plot
:param plot_item: (PlotItem)
"""
self._legend = plotItem.addLegend(**self._legend_kwargs)
# if no explicit pen / brush is set, use some nicer defaults
if "pen" not in self._legend_kwargs:
self._legend.setPen(pg.CONFIG_OPTIONS["foreground"])
if "pen" not in self._legend_kwargs:
bcolor = self._legend.brush().color()
bcolor.setAlphaF(0.85)
self._legend.setBrush(bcolor)
self._cb.setChecked(True)
menu = plotItem.getViewBox().menu
menu.addAction(self)
def _onToggled(self, checked):
if checked:
self._legend.show()
else:
self._legend.hide()
```
#### File: taurus_pyqtgraph/taurus_pyqtgraph/plot.py
```python
from __future__ import absolute_import
__all__ = ["TaurusPlot"]
from future.utils import string_types
import copy
from taurus.external.qt import QtGui, Qt
from taurus.core.util.containers import LoopList
from taurus.core.util.log import Logger
from taurus.qt.qtcore.configuration import BaseConfigurableClass
from pyqtgraph import PlotWidget
from .curvespropertiestool import CurvesPropertiesTool
from .taurusmodelchoosertool import TaurusXYModelChooserTool
from .legendtool import PlotLegendTool
from .datainspectortool import DataInspectorTool
from .y2axis import Y2ViewBox
from .curveproperties import CURVE_COLORS
class TaurusPlot(PlotWidget, BaseConfigurableClass):
"""
TaurusPlot is a general widget for plotting 1D data sets. It is an extended
taurus-aware version of :class:`pyqtgraph.PlotWidget`.
Apart from all the features already available in a regulat PlotWidget,
TaurusPlot incorporates the following tools/features:
- Secondary Y axis (right axis)
- A plot configuration dialog, and save/restore configuration
facilities
- A menu option for adding/removing models
- A menu option for showing/hiding the legend
- Automatic color change of curves for newly added models
"""
def __init__(self, parent=None, **kwargs):
if Qt.QT_VERSION < 0x050000:
# Workaround for issue when using super with pyqt<5
BaseConfigurableClass.__init__(self)
PlotWidget.__init__(self, parent=parent, **kwargs)
else:
super(TaurusPlot, self).__init__(parent=None, **kwargs)
# Compose with a Logger
self._logger = Logger(name=self.__class__.__name__)
self.debug = self._logger.debug
self.info = self._logger.info
self.warning = self._logger.warning
self.error = self._logger.error
# set up cyclic color generator
self._curveColors = LoopList(CURVE_COLORS)
self._curveColors.setCurrentIndex(-1)
# add save & retrieve configuration actions
menu = self.getPlotItem().getViewBox().menu
saveConfigAction = QtGui.QAction("Save configuration", menu)
saveConfigAction.triggered.connect(self._onSaveConfigAction)
menu.addAction(saveConfigAction)
loadConfigAction = QtGui.QAction("Retrieve saved configuration", menu)
loadConfigAction.triggered.connect(self._onRetrieveConfigAction)
menu.addAction(loadConfigAction)
self.registerConfigProperty(self._getState, self.restoreState, "state")
# add legend tool
legend_tool = PlotLegendTool(self)
legend_tool.attachToPlotItem(self.getPlotItem())
# add model chooser
self._model_chooser_tool = TaurusXYModelChooserTool(self)
self._model_chooser_tool.attachToPlotItem(
self.getPlotItem(), self, self._curveColors
)
# add Y2 axis
self._y2 = Y2ViewBox()
self._y2.attachToPlotItem(self.getPlotItem())
# add plot configuration dialog
self._cprop_tool = CurvesPropertiesTool(self)
self._cprop_tool.attachToPlotItem(self.getPlotItem(), y2=self._y2)
# add a data inspector
inspector_tool = DataInspectorTool(self)
inspector_tool.attachToPlotItem(self.getPlotItem())
# enable Autorange
self.getPlotItem().getViewBox().enableAutoRange(True)
self._y2.enableAutoRange(True)
# Register config properties
self.registerConfigDelegate(self._model_chooser_tool, "XYmodelchooser")
self.registerConfigDelegate(self._y2, "Y2Axis")
self.registerConfigDelegate(self._cprop_tool, "CurvePropertiesTool")
self.registerConfigDelegate(legend_tool, "legend")
self.registerConfigDelegate(inspector_tool, "inspector")
# --------------------------------------------------------------------
# workaround for bug in pyqtgraph v<=0.10.0, already fixed in
# https://github.com/pyqtgraph/pyqtgraph/commit/52754d4859
# TODO: remove this once pyqtgraph v>0.10 is released
def __getattr__(self, item):
try:
return PlotWidget.__getattr__(self, item)
except NameError:
raise AttributeError(
"{} has no attribute {}".format(self.__class__.__name__, item)
)
# --------------------------------------------------------------------
def __getitem__(self, idx):
"""
Provides a list-like interface: items can be accessed using slice
notation
"""
return self.getPlotItem().listDataItems()[idx]
def __len__(self):
return len(self.getPlotItem().listDataItems())
def setModel(self, names):
"""Reimplemented to delegate to the model chooser"""
# support passing a string in names
if isinstance(names, string_types):
names = [names]
self._model_chooser_tool.updateModels(names)
def addModels(self, names):
"""Reimplemented to delegate to the model chooser"""
# support passing a string in names
if isinstance(names, string_types):
names = [names]
self._model_chooser_tool.addModels(names)
def _getState(self):
"""Same as PlotWidget.saveState but removing viewRange conf to force
a refresh with targetRange when loading
"""
state = copy.deepcopy(self.saveState())
# remove viewRange conf
del state["view"]["viewRange"]
return state
def setXAxisMode(self, x_axis_mode):
"""Required generic TaurusPlot API """
from taurus_pyqtgraph import DateAxisItem
if x_axis_mode == "t":
axis = DateAxisItem(orientation="bottom")
axis.attachToPlotItem(self.getPlotItem())
elif x_axis_mode == "n":
axis = self.getPlotItem().axes["bottom"]["item"]
if isinstance(axis, DateAxisItem):
axis.detachFromPlotItem()
else:
raise ValueError("Unsupported x axis mode {}".format(x_axis_mode))
def _onSaveConfigAction(self):
"""wrapper to avoid issues with overloaded signals"""
return self.saveConfigFile()
def _onRetrieveConfigAction(self):
"""wrapper to avoid issues with overloaded signals"""
return self.loadConfigFile()
def plot_main(
models=(),
config_file=None,
x_axis_mode="n",
demo=False,
window_name="TaurusPlot (pg)",
):
"""Launch a TaurusPlot"""
import sys
from taurus.qt.qtgui.application import TaurusApplication
app = TaurusApplication(cmd_line_parser=None, app_name="taurusplot(pg)")
w = TaurusPlot()
# w.loadConfigFile('tmp/TaurusPlot.pck')
w.setWindowTitle(window_name)
if demo:
models = list(models)
models.extend(["eval:rand(100)", "eval:0.5*sqrt(arange(100))"])
w.setXAxisMode(x_axis_mode.lower())
if config_file is not None:
w.loadConfigFile(config_file)
if models:
w.setModel(models)
w.show()
ret = app.exec_()
# import pprint
# pprint.pprint(w.createConfig())
sys.exit(ret)
if __name__ == "__main__":
plot_main()
```
#### File: taurus_pyqtgraph/taurus_pyqtgraph/y2axis.py
```python
__all__ = ["Y2ViewBox", "set_y_axis_for_curve"]
from pyqtgraph import ViewBox, PlotItem
from taurus.qt.qtcore.configuration.configuration import BaseConfigurableClass
def set_y_axis_for_curve(y2, dataItem, plotItem, y2Axis):
"""
Sets properties provided in the `properties` dict to curves provided in
the `curves` dict. The association of a given curve with a property is
done by matching the keys in the respective dictionaries
:param y2: `True` indicates that the `dataItem` should be associated to y2,
`False` indicates that it should be associated to the main
viewbox, and `None` indicates that no change should be done.
:param plotItem: The :class:`PlotItem` containing the dataItem.
:param y2Axis: The :class:`Y2ViewBox` instance
"""
# Set the Y1 / Y2 axis if required
old_view = dataItem.getViewBox() # current view for the curve
if y2 is None: # axis is not to be changed
new_view = old_view
elif y2: # Y axis must be Y2
new_view = y2Axis # y2 axis view
else: # Y axis must be Y1
new_view = plotItem.getViewBox() # main view
if new_view is not old_view:
if old_view is not None:
old_view.removeItem(dataItem)
if not y2:
# adapt the log mode to the main view logMode
# (this is already done automatically when adding to y2)
dataItem.setLogMode(
plotItem.getAxis("bottom").logMode,
plotItem.getAxis("left").logMode,
)
new_view.addItem(dataItem)
old_view.autoRange()
new_view.autoRange()
def _PlotItem_addItem(self, item, *args, **kwargs):
"""replacement for `PlotItem.addItem` that Y2Axis will use to monkey-patch
the original one
"""
PlotItem.addItem(self, item, *args, **kwargs)
if hasattr(item, "setLogMode"):
item.setLogMode(
self.getAxis("bottom").logMode, self.getAxis("left").logMode
)
class Y2ViewBox(ViewBox, BaseConfigurableClass):
"""
A tool that inserts a secondary Y axis to a plot item (see
:meth:`attachToPlotItem`).
It is implemented as a :class:`pyqtgraph.ViewBox` and provides methods to
add and remove :class:`pyqtgraph.PlotDataItem` objects to it.
"""
def __init__(self, *args, **kwargs):
self._isAttached = False
self.plotItem = None
name = kwargs.pop("name", "Y2 ViewBox")
BaseConfigurableClass.__init__(self)
ViewBox.__init__(self, *args, name=name, **kwargs)
self.registerConfigProperty(
self._getCurvesNames, self._addCurvesByName, "Y2Curves"
)
self.registerConfigProperty(self._getState, self.setState, "viewState")
def attachToPlotItem(self, plot_item):
"""Use this method to add this axis to a plot
:param plot_item: (PlotItem)
"""
if self._isAttached:
return # TODO: log a message it's already attached
self._isAttached = True
mainViewBox = plot_item.getViewBox()
mainViewBox.sigResized.connect(self._updateViews)
self.plotItem = plot_item
# add axis-independent actions for logarithmic scale
self._addLogAxisActions()
# disable the standard (custom view-unfriendly) log actions
self.plotItem.ctrl.logXCheck.setEnabled(False)
self.plotItem.ctrl.logYCheck.setEnabled(False)
# monkey-patch the addItem method of the PlotItem
from types import MethodType
self.plotItem.addItem = MethodType(_PlotItem_addItem, self.plotItem)
# add Y2 to main scene(), show the axis and link X axis to self.
# self.plotItem.showAxis("right", show=bool(self.addedItems))
self.plotItem.scene().addItem(self)
self.plotItem.getAxis("right").linkToView(self)
self.setXLink(self.plotItem.getViewBox())
# make autorange button work for Y2 too
self.plotItem.autoBtn.clicked.connect(self._onAutoBtnClicked)
def _updateViews(self, viewBox):
self.setGeometry(viewBox.sceneBoundingRect())
self.linkedViewChanged(viewBox, self.XAxis)
def removeItem(self, item):
"""Reimplemented from :class:`pyqtgraph.ViewBox`"""
ViewBox.removeItem(self, item)
if self.plotItem is not None:
self.plotItem.showAxis("right", show=bool(self.addedItems))
def addItem(self, item, ignoreBounds=False):
"""Reimplemented from :class:`pyqtgraph.ViewBox`"""
# first add it to plotItem and then move it from main viewbox to y2
if self.plotItem is not None:
if item not in self.plotItem.listDataItems():
self.plotItem.addItem(item)
if item in self.plotItem.getViewBox().addedItems:
self.plotItem.getViewBox().removeItem(item)
ViewBox.addItem(self, item, ignoreBounds=ignoreBounds)
if self.plotItem is not None:
self.plotItem.showAxis("right", show=bool(self.addedItems))
# set the item log mode to match this view:
if hasattr(item, "setLogMode"):
item.setLogMode(
self.plotItem.getAxis("bottom").logMode,
self.plotItem.getAxis("right").logMode,
)
def _getCurvesNames(self):
"""Returns the curve names associated to the Y2 axis.
:return: (list) List of tuples of model names (xModelName, yModelName)
from each curve in this view
"""
if self.plotItem is None:
return []
ret = []
for c in self.plotItem.listDataItems():
if c.getViewBox() == self and hasattr(c, "getFullModelNames"):
ret.append(c.getFullModelNames())
return ret
def _addCurvesByName(self, names):
curves = {}
for c in self.plotItem.listDataItems():
if hasattr(c, "getFullModelNames"):
curves[c.getFullModelNames()] = c
for n in names:
c = curves[n]
vb = c.getViewBox()
if vb != self:
vb.removeItem(c)
self.addItem(c)
def _getState(self):
"""Same as ViewBox.getState but removing viewRange conf to force
a refresh with targetRange when loading
"""
state = self.getState(copy=True)
del state["viewRange"]
return state
def clearItems(self):
"""Remove the added items"""
for c in self.addedItems:
self.removeItem(c)
def _addLogAxisActions(self):
# insert & connect actions Log Scale Actions
# X (bottom)
menu = self.plotItem.getViewBox().menu.axes[0]
action = menu.addAction("Log scale")
action.setCheckable(True)
action.setChecked(self.plotItem.getAxis("bottom").logMode)
action.setParent(menu)
action.toggled.connect(self._onXLogToggled)
self.menu.axes[0].addAction(action) # Add same action to X2 menu too
# Y1 (left)
menu = self.plotItem.getViewBox().menu.axes[1]
action = menu.addAction("Log scale")
action.setCheckable(True)
action.setChecked(self.plotItem.getAxis("left").logMode)
action.setParent(menu)
action.toggled.connect(self._onY1LogToggled)
# Y2 (right)
menu = self.menu.axes[1]
action = menu.addAction("Log scale")
action.setCheckable(True)
action.setChecked(self.plotItem.getAxis("right").logMode)
action.setParent(menu)
action.toggled.connect(self._onY2LogToggled)
def _onXLogToggled(self, checked):
logx = checked
# set log mode for items of main viewbox
logy = self.plotItem.getAxis("left").logMode
for i in self.plotItem.getViewBox().addedItems:
if hasattr(i, "setLogMode"):
i.setLogMode(logx, logy)
# set log mode for items of Y2 viewbox
logy = self.plotItem.getAxis("right").logMode
for i in self.addedItems:
if hasattr(i, "setLogMode"):
i.setLogMode(logx, logy)
# set log mode for the bottom axis
self.plotItem.getAxis("bottom").setLogMode(checked)
def _onY1LogToggled(self, checked):
# set log mode for items of main viewbox
logx = self.plotItem.getAxis("bottom").logMode
logy = checked
for i in self.plotItem.getViewBox().addedItems:
if hasattr(i, "setLogMode"):
i.setLogMode(logx, logy)
# set log mode for the left axis
self.plotItem.getAxis("left").setLogMode(checked)
def _onY2LogToggled(self, checked):
# set log mode for items of Y2 viewbox
logx = self.plotItem.getAxis("bottom").logMode
logy = checked
for i in self.addedItems:
if hasattr(i, "setLogMode"):
i.setLogMode(logx, logy)
# set log mode for the right axis
self.plotItem.getAxis("right").setLogMode(checked)
def _onAutoBtnClicked(self):
self.enableAutoRange()
``` |
{
"source": "jordibisbal8/network-slicing",
"score": 3
} |
#### File: network-slicing/evaluation/acceptance_rate3D.py
```python
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class MyAxes3D(Axes3D):
def __init__(self, baseObject, sides_to_draw):
self.__class__ = type(baseObject.__class__.__name__,
(self.__class__, baseObject.__class__),
{})
self.__dict__ = baseObject.__dict__
self.sides_to_draw = list(sides_to_draw)
self.mouse_init()
def set_some_features_visibility(self, visible):
for t in self.w_zaxis.get_ticklines() + self.w_zaxis.get_ticklabels():
t.set_visible(visible)
self.w_zaxis.line.set_visible(visible)
self.w_zaxis.pane.set_visible(visible)
self.w_zaxis.label.set_visible(visible)
def draw(self, renderer):
# set visibility of some features False
self.set_some_features_visibility(False)
# draw the axes
super(MyAxes3D, self).draw(renderer)
# set visibility of some features True.
# This could be adapted to set your features to desired visibility,
# e.g. storing the previous values and restoring the values
self.set_some_features_visibility(True)
zaxis = self.zaxis
draw_grid_old = zaxis.axes._draw_grid
# disable draw grid
zaxis.axes._draw_grid = False
tmp_planes = zaxis._PLANES
if 'l' in self.sides_to_draw :
# draw zaxis on the left side
zaxis._PLANES = (tmp_planes[2], tmp_planes[3],
tmp_planes[0], tmp_planes[1],
tmp_planes[4], tmp_planes[5])
zaxis.draw(renderer)
if 'r' in self.sides_to_draw :
# draw zaxis on the right side
zaxis._PLANES = (tmp_planes[3], tmp_planes[2],
tmp_planes[1], tmp_planes[0],
tmp_planes[4], tmp_planes[5])
zaxis.draw(renderer)
zaxis._PLANES = tmp_planes
# disable draw grid
zaxis.axes._draw_grid = draw_grid_old
fig = plt.figure()
ax = fig.gca(projection='3d')
df3 = pd.read_csv('/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/acceptanceRate/acceptanceRate3_1.csv')
df4 = pd.read_csv('/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/acceptanceRate/acceptanceRate4_1.csv')
df5 = pd.read_csv('/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/acceptanceRate/acceptanceRate5_1.csv')
df6 = pd.read_csv('/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/acceptanceRate/acceptanceRate6_1.csv')
df7 = pd.read_csv('/home/jordibisbal/WS18-MSc-JordiBisbalAnsaldo--NetworkSlicing/evaluation/experiments/1/acceptanceRate/acceptanceRate7_1.csv')
y = np.array([3,4,5,6,7])
x, y = np.meshgrid(df3.index.values, y)
z = np.array([
df3.mean(axis=1).values, df6.mean(axis=1).values, df4.mean(axis=1).values, df7.mean(axis=1).values,df5.mean(axis=1).values
])
fig.add_axes(MyAxes3D(ax, 'l'))
surf = ax.plot_surface(x, y, z)
# Customize the axis.
ax.set_zlim(0, 1)
ax.set_xlim(xmin=0,xmax=100)
ax.set_ylim(ymin=3,ymax=7)
ax.w_yaxis.set_major_locator(LinearLocator(5))
ax.set_xlabel('# VNR', fontsize=8)
ax.set_ylabel('# ' + '$I_p$', fontsize=8)
ax.set_zlabel('average acceptance rate E[' + r'$\varphi$]', fontsize=8)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.savefig('ev_acceptance_rate3D.png')
plt.show()
``` |
{
"source": "jordibsk10/homeassistant-seatconnect",
"score": 2
} |
#### File: custom_components/seatconnect/sensor.py
```python
import logging
from . import DATA_KEY, DOMAIN, SeatEntity
from .const import DATA
from homeassistant.const import CONF_RESOURCES
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Seat sensors."""
if discovery_info is None:
return
async_add_entities([SeatSensor(hass.data[DATA_KEY], *discovery_info)])
async def async_setup_entry(hass, entry, async_add_devices):
data = hass.data[DOMAIN][entry.entry_id][DATA]
coordinator = data.coordinator
if coordinator.data is not None:
if CONF_RESOURCES in entry.options:
resources = entry.options[CONF_RESOURCES]
else:
resources = entry.data[CONF_RESOURCES]
async_add_devices(
SeatSensor(
data, instrument.vehicle_name, instrument.component, instrument.attr
)
for instrument in (
instrument
for instrument in data.instruments
if instrument.component == "sensor" and instrument.attr in resources
)
)
return True
class SeatSensor(SeatEntity):
"""Representation of a Seat Sensor."""
@property
def state(self):
"""Return the state of the sensor."""
return self.instrument.state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self.instrument.unit
``` |
{
"source": "jordic/aiofsearch",
"score": 2
} |
#### File: tmpo/fsearch/main.py
```python
from aiohttp import web
from asyncio import subprocess
from asyncio import create_subprocess_exec
from asyncio import ensure_future
from os.path import join
from os.path import dirname
import aiohttp
import asyncio
import os
import json
import logging
logger = logging.getLogger('aiofsearch')
_curr_path = os.getcwd()
_empty = b'\n'
_static = join(
dirname(dirname(__file__)),
'front', 'dist'
)
class Result:
def __init__(self, file, path, found):
self.file = file.decode('utf-8')
self.path = path
self.lines = []
self.found = found
def encode(self):
return json.dumps(dict(
base=self.path,
file=self.file.replace(self.path, ""),
lines=self.lines,
found=self.found
))
def add(self, line):
try:
self.lines.append(line.decode('utf-8'))
except UnicodeDecodeError:
# ommit unicode errors
pass
async def make_app():
print(f'static {_static}')
app = web.Application()
app.add_routes([web.get('/-/search', search_files)])
app.add_routes([web.get('/-/open', open_file)])
app.add_routes([web.get('/', index_html)])
app.add_routes([web.static('/', _static, show_index=True)])
return app
async def index_html(request):
return web.FileResponse(join(_static, 'index.html'))
async def open_file(request):
file = request.query.get('f')
line = request.query.get('l', '0')
if file:
logger.info(f'opened {_curr_path}{file}:{line}')
cmd = ["/usr/bin/subl", f'{_curr_path}{file}:{line}']
await create_subprocess_exec(*cmd)
return web.json_response(dict(result='ok'))
async def process_ws(ws, process):
result = None
counter = 0
while True:
try:
line = await process.stdout.readline()
except ValueError:
pass
if line:
if not result:
counter = counter + 1
result = Result(line[:-1], _curr_path, counter)
elif line != _empty:
result.add(line[:-1])
else:
await ws.send_str(result.encode())
result = None
else:
await ws.close()
logger.debug('stop loop: no line')
break
if process.returncode:
logger.debug('stop loop: process exit')
break
if ws.closed:
logger.debug('stop loop: ws closed')
break
async def search_files(request):
query = request.query.get('q', 'StreamResponse')
cmd = [
"/usr/bin/ag", query, _curr_path, '--nocolor', '--group',
"--ignore", "*.min*", "--ignore", "*.map*", "--ignore",
"*node_modules*", "--ignore", "*env*"
]
process = await create_subprocess_exec(
*cmd, stdout=subprocess.PIPE
)
ws = web.WebSocketResponse()
await ws.prepare(request)
task = ensure_future(process_ws(ws, process))
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close':
logger.info('client had disconnected')
await ws.close()
process.kill()
elif msg.type == aiohttp.WSMsgType.ERROR:
logger.error('ws connection closed with exception %s' %
ws.exception())
return ws
if __name__ == "__main__":
web.run_app(make_app())
def run():
web.run_app(make_app())
``` |
{
"source": "jordicam/calculadora",
"score": 3
} |
#### File: jordicam/calculadora/calculadora.py
```python
from calculadora_utils.inputs import seleccion
from calculadora_utils.run import calculadora_run
def main():
select = seleccion()
while select != 3:
if select == 1:
pass
elif select == 2:
resultado = calculadora_run()
print(f"El resultado de la calculadora es: {resultado}")
select= seleccion()
####EJECUTO
if __name__ == "__main__":
value = main()
```
#### File: calculadora/calculadora_utils/inputs.py
```python
def getnumber():
valor1 = float(input('Enter your input number 1:'))
valor2 = float(input('Enter your input number 2:'))
return valor1, valor2 #####!!!!!!!
def getoperacion():
operacion = input('Qué operación quieres hacer?')
return(operacion)
def seleccion():
print("1. Banco\n 2. Calculadora\n 3. Salir")
select = int(input('Qué quieres hacer?'))
return select
```
#### File: calculadora/calculadora_utils/operaciones.py
```python
def suma(valor1,valor2):
suma = valor1 + valor2
return(suma)
def resta(valor1,valor2):
resta = valor1 - valor2
return(resta)
```
#### File: calculadora/calculadora_utils/run.py
```python
from calculadora_utils.inputs import getnumber, getoperacion
from calculadora_utils.operaciones import suma, resta
def calculadora_run():
valor1, valor2 = getnumber()
operacion = getoperacion()
#### "" --> str no "" --> variable ##CONDICIONALES
if operacion == "suma":
resultado = suma(valor1, valor2)
elif operacion == "resta":
resultado = resta(valor1, valor2)
else:
resultado = ""
print("error_valor")
return(resultado)
``` |
{
"source": "jordicam/web-scraping",
"score": 3
} |
#### File: web-scraping/tests/test_general.py
```python
from webscraping.utilities.utility import descargarimagen, downloadinfo_url, downloadparrafos
from tests.utilities import clean_up, clean_enlace
import os
######UNIT TEST################
def test_descargarimagen():
soup = clean_enlace()
myimg = soup.findAll("img")[0]
descargarimagen(myimg)
assert os.path.exists("400px-Municipalities_of_Spain.svg.png") == True
def test_descargarparrafo():
clean_up("*.txt")
soup = clean_enlace()
downloadparrafos(soup)
assert os.path.exists("Output.txt") == True
#######END TO END TEST#############
def test_main():
clean_up("*.png")
clean_up("*.txt")
enlace = "https://es.wikipedia.org/wiki/Anexo:Municipios_de_Espa%C3%B1a_por_poblaci%C3%B3n"
nimg = 4
imagenes_desc = downloadinfo_url(enlace,nimg)
for img in imagenes_desc:
assert os.path.exists(img) == True
assert len(imagenes_desc) == nimg
assert os.path.exists("Output.txt") == True
```
#### File: web-scraping/webscraping/main.py
```python
import argparse
from webscraping.utilities.utility import get_html, downloadimages, downloadinfo_url, descargarimagen
def leer_inputs():
parser = argparse.ArgumentParser(description='Descarga todas las imagnes de tu url preferida!')
parser.add_argument('--url', type=str, help='Pon tu url a descargar las imagines i.e --url www.wikipedia.com')
parser.add_argument('--nimg', type=int, help='Pon el número de imágenes a descargar')
args = parser.parse_args()
return args.url, args.nimg
def main():
enlace, nimg = leer_inputs()
downloadinfo_url(enlace, nimg)
####EJECUCION
if __name__ == "__main__":
main()
``` |
{
"source": "jordicea/image-size-classifier",
"score": 3
} |
#### File: image-size-classifier/src/File.py
```python
from os import path
class File:
def __init__(self, file_path):
self.__path = file_path
self.__name = path.basename(self.__path)
self.__size = path.getsize(self.__path)
self.__jpg_name = ''
self.__jpg_size = 0
self.__jpg_percent = 0
def get_path(self) -> str:
return self.__path
def get_name(self) -> str:
return self.__name
def get_size(self) -> int:
return self.__size
def set_jpg(self, path_name: str):
self.__jpg_name = path_name
self.__jpg_size = path.getsize(self.__jpg_name)
def get_jpg_name(self) -> str:
return path.basename(self.__jpg_name)
def get_jpg_size(self) -> int:
return self.__jpg_size
def set_jpg_percent(self, jpg_percent: float):
self.__jpg_percent = jpg_percent
def get_jpg_percent(self) -> float:
return self.__jpg_percent
``` |
{
"source": "jordic/fastapi_iam",
"score": 3
} |
#### File: fastapi_iam/auth/extractors.py
```python
from fastapi.requests import Request
import base64
async def get_extractors(iam, request: Request):
user = None
for extractor in iam.get_security_policy().extractors:
user = await extractor(request).extract_token()
if user:
break
return user
# This part is from guillotina https://github.com/plone/guillotina
class BasePolicy:
name = "<FILL IN>"
def __init__(self, request):
self.request = request
async def extract_token(self):
"""
Extracts token from request.
This will be a dictionary including something like {id, password},
depending on the auth policy to authenticate user against
"""
raise NotImplemented()
class BearerAuthPolicy(BasePolicy):
name = "bearer"
async def extract_token(self):
header_auth = self.request.headers.get("AUTHORIZATION")
if header_auth is not None:
schema, _, encoded_token = header_auth.partition(" ")
if schema.lower() == "bearer":
return {"type": "bearer", "token": encoded_token.strip()}
class BasicAuthPolicy(BasePolicy):
name = "basic"
async def extract_token(self, value=None):
if value is None:
header_auth = self.request.headers.get("AUTHORIZATION")
else:
header_auth = value
if header_auth is not None:
schema, _, encoded_token = header_auth.partition(" ")
if schema.lower() == "basic":
try:
token = base64.b64decode(encoded_token).decode("utf-8")
except Exception: # pragma: no cover
# could be unicode, could be binascii generic,
# should just be ignored if we can't decode
return
userid, _, password = token.partition(":")
return {
"type": "basic",
"id": userid.strip(),
"token": password.strip(),
}
```
#### File: fastapi_iam/auth/hasher.py
```python
from ..utils import run_in_threadpool
from functools import lru_cache
import argon2
ph = argon2.PasswordHasher()
class ArgonPasswordHasher:
algorithm = "argon2"
async def hash_password(self, password):
if isinstance(password, str):
password = password.encode("utf-8")
hashed_password = await run_in_threadpool(ph.hash, password)
return hashed_password
async def check_password(self, token, password) -> bool:
return await run_in_threadpool(
self.argon2_password_validator, token, password
)
@lru_cache(100)
def argon2_password_validator(self, token, password):
try:
return ph.verify(token, password)
except (
argon2.exceptions.InvalidHash,
argon2.exceptions.VerifyMismatchError,
):
return False
```
#### File: fastapi_iam/fastapi_iam/__init__.py
```python
from . import auth
from . import interfaces
from . import views
from .initialize import initialize_db
from .provider import set_provider
from .services import pg
from .views import admin
from fastapi import APIRouter
from fastapi_asyncpg import configure_asyncpg
from functools import partial
import logging
import typing
logger = logging.getLogger("fastapi_iam")
default_settings = {
"db_schema": "",
"jwt_expiration": 6 * 60 * 60, # expiratoin in seconds
"jwt_algorithm": "HS256",
"jwt_secret_key": "XXXXX",
"cookie_domain": None,
"session_expiration": 60 * 60 * 24 * 360, # one year
"rotate_refresh_tokens": True,
"db_pool": None,
"services": {
interfaces.IUsersStorage: pg.UserStorage,
interfaces.ISessionStorage: pg.SessionStorage,
interfaces.IGroupsStorage: pg.GroupStorage,
},
"default_service_factory": pg.pg_service_factory,
"admin_routes": True,
"refresh_token_secret_key": "xxxxx", # just in case using secure cookie tokens
}
def configure_iam(
settings: typing.Dict[str, typing.Any],
*,
security_policy=auth.PersistentSecurityPolicy,
fastapi_asyncpg: configure_asyncpg = None,
):
if "jwt_secret_key" not in settings:
logger.warning("INSECURE SECRET KEY, provide a new one")
defaults = default_settings.copy()
defaults.update(settings)
iam = IAM(
defaults,
fastapi_asyncpg=fastapi_asyncpg,
security_policy=security_policy,
)
set_provider(iam)
return iam
class IAM:
def __init__(
self,
settings,
*,
fastapi_asyncpg=None,
security_policy=None,
api_router_cls=APIRouter,
):
self.router = api_router_cls()
self.settings = settings
self.db = fastapi_asyncpg
self.security_policy = security_policy
self.services = settings["services"]
self.services_factory = {}
self.initialize_iam_db = partial(initialize_db, settings)
self.setup_routes()
def set_asyncpg(self, db):
self.db = db
def setup_routes(self):
self.router.add_api_route("/status", views.status)
self.router.add_api_route("/login", views.login, methods=["POST"])
self.router.add_api_route(
"/logout", views.logout, methods=["POST", "GET"]
)
self.router.add_api_route("/renew", views.renew, methods=["POST"])
self.router.add_api_route("/whoami", views.whoami)
if self.settings["admin_routes"] is True:
admin.setup_routes(self.router)
@property
def pool(self):
return self.db.pool
def get_security_policy(self):
return self.security_policy(self)
def get_service(self, service_type):
assert service_type in self.services
factory = self.settings["default_service_factory"]
if service_type in self.services_factory:
factory = self.services_factory[service_type]
return factory(self, self.services[service_type])
```
#### File: services/pg/base.py
```python
import asyncpg
class BaseRepository:
def __init__(self, db: asyncpg.Connection, schema: str = None):
self.db = db
self._schema = schema
@property
def schema(self):
return f"{self._schema}." if self._schema else ""
```
#### File: fastapi_iam/fastapi_iam/testing.py
```python
from functools import partial
async def login(client, username, password) -> "Client":
res = await client.post(
"/auth/login",
form={"username": username, "password": password},
)
assert res.status_code == 200
return Client(client, res.json()["access_token"])
def auth_header(token):
return {"Authorization": f"Bearer {token}"}
class Client:
def __init__(self, client, access_token):
self.client = client
self.token = access_token
def __getattr__(self, name):
func = getattr(self.client, name)
return partial(func, headers=auth_header(self.token))
```
#### File: fastapi_iam/fastapi_iam/utils.py
```python
from collections.abc import MutableMapping
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import Any
import asyncio
def merge_dicts(d1: dict, d2: dict) -> dict:
"""
Update two dicts of dicts recursively,
if either mapping has leaves that are non-dicts,
the second's leaf overwrites the first's.
"""
for k, v in d1.items():
if k in d2:
# this next check is the only difference!
if all(isinstance(e, MutableMapping) for e in (v, d2[k])):
d2[k] = merge_dicts(v, d2[k])
if isinstance(v, list):
d2[k].extend(v)
# we could further check types and merge as appropriate here.
d3 = d1.copy()
d3.update(d2)
return d3
async def run_in_threadpool(func, *args, **kwargs):
curr = partial(func, *args, **kwargs)
loop = asyncio.get_running_loop()
with ThreadPoolExecutor() as pool:
return await loop.run_in_executor(pool, curr)
def resolve_dotted_name(name: str) -> Any:
"""
import the provided dotted name
>>> resolve_dotted_name('guillotina.interfaces.IRequest')
<InterfaceClass guillotina.interfaces.IRequest>
:param name: dotted name
"""
if not isinstance(name, str):
return name # already an object
names = name.split(".")
used = names.pop(0)
found = __import__(used)
for n in names:
used += "." + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def maybe_resolve(item: Any):
if isinstance(item, str):
return resolve_dotted_name(item)
return item
```
#### File: fastapi_iam/tests/test_admin.py
```python
from fastapi_iam import testing
import pytest
pytestmark = pytest.mark.asyncio
async def test_admin_routes(users):
client, iam = users
# tc is a customized test client that injects the correct headers
tc = await testing.login(client, "<EMAIL>", "<PASSWORD>")
res = await tc.get("/auth/whoami")
assert res.status_code == 200
assert res.json()["email"] == "<EMAIL>"
res = await tc.get("/auth/users")
assert res.status_code == 403
logged = await testing.login(client, "<EMAIL>", "<PASSWORD>")
res = await logged.get("/auth/users")
assert res.status_code == 200
assert res.json()["total"] == 3
async def test_create_user(users):
client, _ = users
logged = await testing.login(client, "<EMAIL>", "<PASSWORD>")
# create a new user
new_user = {
"email": "<EMAIL>",
"password": "<PASSWORD>",
"is_staff": True,
"is_admin": True,
"is_active": True,
}
resp = await logged.post("/auth/users", json=new_user)
assert resp.status_code == 201
res = await logged.get("/auth/users")
assert res.json()["total"] == 4
new_ = await testing.login(client, "<EMAIL>", "1234")
resp = await new_.get("/auth/users")
assert resp.json()["total"] == 4
inactive = new_user.copy()
inactive.update({"is_active": False, "email": "<EMAIL>"})
resp = await logged.post("/auth/users", json=inactive)
res = await logged.get("/auth/users?is_active=false")
assert res.json()["total"] == 2
async def test_create_group(users):
client, _ = users
logged = await testing.login(client, "<EMAIL>", "<PASSWORD>")
res = await logged.post("/auth/groups", json={"name": "group1"})
assert res.status_code == 201
res = await logged.get("/auth/groups")
assert res.json() == ["group1"]
async def test_update_user(users):
client, _ = users
logged = await testing.login(client, "<EMAIL>", "<PASSWORD>")
res = await logged.get("/auth/users?q=inactive%")
assert res.status_code == 200
user_id = res.json()["items"][0]["user_id"]
res = await logged.patch(
f"/auth/users/{user_id}", json={"is_active": True, "password": "<PASSWORD>"}
)
assert res.status_code == 200
assert res.json()["email"] == "<EMAIL>"
updated = await testing.login(client, "<EMAIL>", "<PASSWORD>")
res = await updated.get("/auth/whoami")
assert res.status_code == 200
# add a group
await logged.post("/auth/groups", json={"name": "group1"})
res = await logged.patch(
f"/auth/users/{user_id}", json={"groups": ["group1"]}
)
assert "group1" in res.json()["groups"]
```
#### File: fastapi_iam/tests/test_login.py
```python
import pytest
import jwt
pytestmark = pytest.mark.asyncio
def auth_header(token):
return {"Authorization": f"Bearer {token}"}
async def test_login(users):
client, ins = users
res = await client.post(
"/auth/login",
form={"username": "<EMAIL>", "password": "<PASSWORD>"},
)
assert res.status_code == 200
assert "access_token" in res.json()
token = res.json()["access_token"]
# ensure token is decodificable
validated = jwt.decode(
token,
ins.settings["jwt_secret_key"],
algorithms=ins.settings["jwt_algorithm"],
)
assert validated["email"] == "<EMAIL>"
assert "staff" in validated["principals"]
assert validated["is_admin"] is False
assert "refresh" in res.cookies
# todo: verify cookie max-age, domain, httponly
res = await client.post(
"/auth/login", form={"username": "invalid", "password": "<PASSWORD>"}
)
assert res.status_code == 400
res = await client.get("/auth/whoami")
assert res.status_code == 200
assert res.json()["username"] == "anonymous"
res = await client.get("/auth/whoami", headers=auth_header(token))
assert res.status_code == 200
user = res.json()
assert user["email"] == "<EMAIL>"
assert "password" not in user
assert user["is_admin"] is False
# ensure we can logout
res = await client.get("/auth/logout", headers=auth_header(token))
assert res.status_code == 200
res = await client.get("/auth/whoami", headers=auth_header(token))
assert res.status_code == 403
# TODO check cookie logout is present
# we don't fail if there's no cookie
res = await client.post("/auth/logout")
async def test_invalid_token(users):
client, _ = users
res = await client.get(
"/auth/whoami", headers={"Authorization": "Bearer XXX"}
)
assert res.status_code == 403
async def test_disable_user(users):
client, iam = users
res = await client.post(
"/auth/login",
form={"username": "<EMAIL>", "password": "<PASSWORD>"},
)
assert res.status_code == 200
assert "access_token" in res.json()
token = res.json()["access_token"]
res = await client.get("/auth/whoami", headers=auth_header(token))
assert res.status_code == 200
user_id = res.json()["user_id"]
async with iam.pool.acquire() as db:
await db.execute(
"update users set is_active=false where user_id=$1", user_id
)
res = await client.post(
"/auth/login",
form={"username": "<EMAIL>", "password": "<PASSWORD>"},
)
assert res.status_code == 412
async def test_refresh_token(users):
client, iam = users
# change expiration time to be negative
iam.settings["jwt_expiration"] = -60
res = await client.post(
"/auth/login",
form={"username": "<EMAIL>", "password": "<PASSWORD>"},
)
assert res.status_code == 200
access_token = res.json()["access_token"]
try:
_ = jwt.decode(
access_token,
iam.settings["jwt_secret_key"],
algorithms=iam.settings["jwt_algorithm"],
)
except jwt.exceptions.ExpiredSignatureError:
pass
# keep the refresh cookie for later use
refresh_token = res.cookies["refresh"]
# we are not authenticated
res = await client.get("/auth/whoami", headers=auth_header(access_token))
assert res.status_code == 403
headers = {"refresh": f"{refresh_token}"}
iam.settings["jwt_expiration"] = 60 * 60 * 24
renew = await client.post("/auth/renew", cookies=headers)
assert renew.status_code == 200
nt = renew.json()["access_token"]
res = await client.get("/auth/whoami", headers=auth_header(nt))
assert res.status_code == 200
```
#### File: fastapi_iam/tests/test_migrations.py
```python
import pytest
pytestmark = pytest.mark.asyncio
async def testing_migrations(conn):
val = await conn.fetchval("SELECT value from users_version")
assert val == 1
```
#### File: fastapi_iam/tests/test_storage.py
```python
from fastapi_asyncpg import sql
from fastapi_iam.services.pg import UserStorage
from fastapi_iam.services.pg import GroupStorage
from fastapi_iam import models
import pytest
pytestmark = pytest.mark.asyncio
user1 = models.UserCreate(email="<EMAIL>", password="<PASSWORD>")
groups = ["admin", "staff", "mkt"]
async def test_base_model_service_storage(conn):
repo = UserStorage(conn)
await repo.create(user1)
assert await sql.count(conn, "users") == 1
user = await repo.by_email("<EMAIL>")
assert user.password == "<PASSWORD>"
grepo = GroupStorage(conn)
# add groups to user
for group in groups:
await grepo.add_group(group)
assert await sql.count(conn, "groups") == 3
assert set(await grepo.get_groups()) == set(groups)
user = await repo.update_groups(user, [groups[0], groups[1]])
assert len(user.groups) == 2
assert set(user.groups) == set(groups[:2])
user = await repo.update_groups(user, groups)
assert set(user.groups) == set(groups)
user = await repo.update_groups(user, ["xxxxx"])
assert len(user.groups) == 0
async def test_users_search(users):
_, ins = users
storage = UserStorage(ins.pool)
result = await storage.search()
assert result["total"] == 3
emails = set([r.email for r in result["items"]])
assert emails == {"<EMAIL>", "<EMAIL>", "<EMAIL>"}
result = await storage.search(q="test%")
assert result["total"] == 1
assert result["items"][0].email == "<EMAIL>"
result = await storage.search(is_active=False)
assert result["total"] == 1
assert result["items"][0].email == "<EMAIL>"
result = await storage.search(is_admin=True, is_active=True)
assert result["total"] == 1
assert result["items"][0].email == "<EMAIL>"
result = await storage.search(is_staff=True)
assert result["total"] == 3
``` |
{
"source": "jordic/guillotina",
"score": 2
} |
#### File: guillotina/commands/apigen.py
```python
from guillotina.commands import Command
from guillotina.content import load_cached_schema
from guillotina.documentation.generate import process_command_file
class APIGenCommand(Command):
description = 'Generate APIDoc data'
hide = True
def get_parser(self):
parser = super(APIGenCommand, self).get_parser()
parser.add_argument('-i', '--input', nargs='?', help='Input filename')
parser.add_argument('-e', '--endpoint', nargs='?',
default='http://localhost:8080', help='Guillotina Endpoint')
parser.add_argument('-o', '--output', nargs='?',
default='./', help='Output path')
return parser
async def run(self, arguments, settings, app):
load_cached_schema()
process_command_file(
arguments.input,
arguments.endpoint,
arguments.output
)
```
#### File: guillotina/interfaces/security.py
```python
from guillotina.i18n import MessageFactory
from guillotina.schema import Text
from guillotina.schema import TextLine
from zope.interface import Attribute
from zope.interface import Interface
import copyreg
_ = MessageFactory('guillotina')
Public = 'guillotina.Public' # constant to check for always allowed permission
# These are the "setting" values returned by several methods defined
# in these interfaces. The implementation may move to another
# location in the future, so this should be the preferred module to
# import these from.
class PermissionSetting:
"""PermissionSettings should be considered as immutable.
They can be compared by identity. They are identified by
their name.
"""
def __new__(cls, name, description=None):
"""Keep a dict of PermissionSetting instances, indexed by
name. If the name already exists in the dict, return that
instance rather than creating a new one.
"""
instances = cls.__dict__.get('_z_instances')
if instances is None:
cls._z_instances = instances = {}
it = instances.get(name)
if it is None:
instances[name] = it = object.__new__(cls)
it._init(name, description)
return it
def _init(self, name, description):
self.__name = name
self.__description = description
def get_description(self):
return self.__description
def get_name(self):
return self.__name
def __str__(self):
return 'PermissionSetting: %s' % self.__name
__repr__ = __str__
# register PermissionSettings to be symbolic constants by identity,
# even when pickled and unpickled.
copyreg.constructor(PermissionSetting)
copyreg.pickle(PermissionSetting,
PermissionSetting.get_name,
PermissionSetting)
Allow = PermissionSetting(
'Allow', 'Explicit allow setting for permissions')
Deny = PermissionSetting(
'Deny', 'Explicit deny setting for permissions')
AllowSingle = PermissionSetting(
'AllowSingle', 'Explicit allow and not inherit permission')
Unset = PermissionSetting(
'Unset', 'Unset constant that denotes no setting for permission')
class IGroups(Interface): # pylint: disable=E0239
"""A group Utility search."""
class IRole(Interface): # pylint: disable=E0239
"""A role object."""
id = TextLine(
title='Id',
description='Id as which this role will be known and used.',
readonly=True,
required=True)
title = TextLine(
title='Title',
description='Provides a title for the role.',
required=True)
description = Text(
title='Description',
description='Provides a description for the role.',
required=False)
class IPrincipalRoleMap(Interface): # pylint: disable=E0239
"""Mappings between principals and roles."""
def get_principals_for_role(self, role_id): # noqa: N805
"""Get the principals that have been granted a role.
Return the list of (principal id, setting) who have been assigned or
removed from a role.
If no principals have been assigned this role,
then the empty list is returned.
"""
def get_roles_for_principal(self, principal_id): # noqa: N805
"""Get the roles granted to a principal.
Return the list of (role id, setting) assigned or removed from
this principal.
If no roles have been assigned to
this principal, then the empty list is returned.
"""
def get_setting(self, role_id, principal_id, default=Unset): # noqa: N805
"""Return the setting for this principal, role combination
"""
def get_principals_and_roles(self):
"""Get all settings.
Return all the principal/role combinations along with the
setting for each combination as a sequence of tuples with the
role id, principal id, and setting, in that order.
"""
class IPrincipalRoleManager(IPrincipalRoleMap):
"""Management interface for mappings between principals and roles."""
def assign_role_to_principal(self, role_id, principal_id): # noqa: N805
"""Assign the role to the principal."""
def assign_role_to_principal_no_inherit(self, role_id, principal_id): # noqa: N805
"""Assign the role to the principal."""
def remove_role_from_principal(self, role_id, principal_id): # noqa: N805
"""Remove a role from the principal."""
def unset_role_for_principal(self, role_id, principal_id): # noqa: N805
"""Unset the role for the principal."""
class IRolePermissionMap(Interface): # pylint: disable=E0239
"""Mappings between roles and permissions."""
def get_permissions_for_role(self, role_id): # noqa: N805
"""Get the premissions granted to a role.
Return a sequence of (permission id, setting) tuples for the given
role.
If no permissions have been granted to this
role, then the empty list is returned.
"""
def get_roles_for_permission(self, permission_id): # noqa: N805
"""Get the roles that have a permission.
Return a sequence of (role id, setting) tuples for the given
permission.
If no roles have been granted this permission, then the empty list is
returned.
"""
def get_setting(self, permission_id, role_id, default=Unset): # noqa: N805
"""Return the setting for the given permission id and role id
If there is no setting, Unset is returned
"""
def get_roles_and_permissions(self):
"""Return a sequence of (permission_id, role_id, setting) here.
The settings are returned as a sequence of permission, role,
setting tuples.
If no principal/role assertions have been made here, then the empty
list is returned.
"""
class IRolePermissionManager(IRolePermissionMap):
"""Management interface for mappings between roles and permissions."""
def grant_permission_to_role(self, permission_id, role_id): # noqa: N805
"""Bind the permission to the role.
"""
def grant_permission_to_role_no_inherit(self, role_id, principal_id): # noqa: N805
"""Assign the role to the principal without local inherit."""
def deny_permission_to_role(self, permission_id, role_id): # noqa: N805
"""Deny the permission to the role
"""
def unset_permission_from_role(self, permission_id, role_id): # noqa: N805
"""Clear the setting of the permission to the role.
"""
class IPrincipalPermissionMap(Interface): # pylint: disable=E0239
"""Mappings between principals and permissions."""
def get_principals_for_permission(self, permission_id): # noqa: N805
"""Get the principas that have a permission.
Return the list of (principal_id, setting) tuples that describe
security assertions for this permission.
If no principals have been set for this permission, then the empty
list is returned.
"""
def get_permissions_for_principal(self, principal_id): # noqa: N805
"""Get the permissions granted to a principal.
Return the list of (permission, setting) tuples that describe
security assertions for this principal.
If no permissions have been set for this principal, then the empty
list is returned.
"""
def get_setting(self, permission_id, principal_id, default=Unset): # noqa: N805
"""Get the setting for a permission and principal.
Get the setting (Allow/Deny/Unset) for a given permission and
principal.
"""
def get_principals_and_permissions(self):
"""Get all principal permission settings.
Get the principal security assertions here in the form
of a list of three tuple containing
(permission id, principal id, setting)
"""
class IPrincipalPermissionManager(IPrincipalPermissionMap):
"""Management interface for mappings between principals and permissions."""
def grant_permission_to_principal(self, permission_id, principal_id): # noqa: N805
"""Assert that the permission is allowed for the principal.
"""
def grant_permission_to_principal_no_inherit(self, role_id, principal_id): # noqa: N805
"""Assign the role to the principal without local inherit."""
def deny_permission_to_principal(self, permission_id, principal_id): # noqa: N805
"""Assert that the permission is denied to the principal.
"""
def unset_permission_for_principal(self, permission_id, principal_id): # noqa: N805
"""Remove the permission (either denied or allowed) from the
principal.
"""
class IGrantInfo(Interface): # pylint: disable=E0239
"""Get grant info needed for checking access
"""
def principal_permission_grant(self, principal, permission): # noqa: N805
"""Return the principal-permission grant if any
The return value is one of Allow, Deny, or Unset
"""
def get_roles_for_permission(self, permission): # noqa: N805
"""Return the role grants for the permission
The role grants are an iterable of role, setting tuples, where
setting is either Allow or Deny.
"""
def get_roles_for_principal(self, principal): # noqa: N805
"""Return the role grants for the principal
The role grants are an iterable of role, setting tuples, where
setting is either Allow or Deny.
"""
class IInteraction(Interface): # pylint: disable=E0239
"""A representation of an interaction between some actors and the system.
"""
participations = Attribute("""An iterable of participations.""")
def add(self, participation): # noqa: N805
"""Add a participation."""
def remove(self, participation): # noqa: N805
"""Remove a participation."""
def check_permission(self, permission, object): # noqa: N805
"""Return whether security context allows permission on object.
Arguments:
permission -- A permission name
object -- The object being accessed according to the permission
"""
class IPermission(Interface): # pylint: disable=E0239
"""A permission object."""
id = TextLine(
title=_('Id'),
description=_('Id as which this permission will be known and used.'),
readonly=True,
required=True)
title = TextLine(
title=_('Title'),
description=_('Provides a title for the permission.'),
required=True)
description = Text(
title=_('Description'),
description=_('Provides a description for the permission.'),
required=False)
class IPrincipal(Interface): # pylint: disable=E0239
"""Principals are security artifacts that execute actions in a security
environment.
The most common examples of principals include user and group objects.
It is likely that IPrincipal objects will have associated views
used to list principals in management interfaces. For example, a
system in which other meta-data are provided for principals might
extend IPrincipal and register a view for the extended interface
that displays the extended information. We'll probably want to
define a standard view name (e.g. 'inline_summary') for this
purpose.
"""
id = TextLine(
title=_('Id'),
description=_('The unique identification of the principal.'),
required=True,
readonly=True)
class IParticipation(Interface): # pylint: disable=E0239
interaction = Attribute('The interaction')
principal = Attribute('The authenticated principal')
class ISecurityPolicy(Interface): # pylint: disable=E0239
def __call__(self, participation=None): # noqa: N805
"""Creates a new interaction for a given request.
If participation is not None, it is added to the new interaction.
"""
```
#### File: guillotina/utils/misc.py
```python
from aiohttp.web import Request
from collections import MutableMapping
from functools import partial
from guillotina import glogging
from guillotina.component import get_utility
from guillotina.exceptions import RequestNotFound
from guillotina.interfaces import IApplication
from guillotina.interfaces import IRequest
from guillotina.profile import profilable
from hashlib import sha256 as sha
import aiotask_context
import asyncio
import inspect
import random
import string
import time
import types
try:
random = random.SystemRandom() # type: ignore
using_sys_random = True
except NotImplementedError:
using_sys_random = False
RANDOM_SECRET = random.randint(0, 1000000)
logger = glogging.getLogger('guillotina')
def strings_differ(string1: str, string2: str) -> bool:
"""Check whether two strings differ while avoiding timing attacks.
This function returns True if the given strings differ and False
if they are equal. It's careful not to leak information about *where*
they differ as a result of its running time, which can be very important
to avoid certain timing-related crypto attacks:
http://seb.dbzteam.org/crypto/python-oauth-timing-hmac.pdf
"""
if len(string1) != len(string2):
return True
invalid_bits = 0
for a, b in zip(string1, string2):
invalid_bits += a != b
return invalid_bits != 0
def get_random_string(length: int = 30,
allowed_chars: str = string.ascii_letters + string.digits) -> str:
"""
Heavily inspired by Plone/Django
Returns a securely generated random string.
"""
if not using_sys_random:
# do our best to get secure random without sysrandom
seed_value = "%s%s%s" % (random.getstate(), time.time(), RANDOM_SECRET)
random.seed(sha(seed_value.encode('utf-8')).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)])
def merge_dicts(d1: dict, d2: dict) -> dict:
"""
Update two dicts of dicts recursively,
if either mapping has leaves that are non-dicts,
the second's leaf overwrites the first's.
"""
# in Python 2, use .iteritems()!
for k, v in d1.items():
if k in d2:
# this next check is the only difference!
if all(isinstance(e, MutableMapping) for e in (v, d2[k])):
d2[k] = merge_dicts(v, d2[k])
if isinstance(v, list):
d2[k].extend(v)
# we could further check types and merge as appropriate here.
d3 = d1.copy()
d3.update(d2)
return d3
async def apply_coroutine(func: types.FunctionType, *args, **kwargs) -> object:
"""
Call a function with the supplied arguments.
If the result is a coroutine, await it.
"""
result = func(*args, **kwargs)
if asyncio.iscoroutine(result):
return await result
return result
def loop_apply_coroutine(loop, func: types.FunctionType, *args, **kwargs) -> object:
"""
Call a function with the supplied arguments.
If the result is a coroutine, use the supplied loop to run it.
"""
if asyncio.iscoroutinefunction(func):
future = asyncio.ensure_future(
func(*args, **kwargs), loop=loop)
loop.run_until_complete(future)
return future.result()
else:
return func(*args, **kwargs)
@profilable
def get_current_request() -> IRequest:
"""
Return the current request by heuristically looking it up from stack
"""
try:
task_context = aiotask_context.get('request')
if task_context is not None:
return task_context
except (ValueError, AttributeError, RuntimeError):
pass
# fallback
frame = inspect.currentframe()
while frame is not None:
request = getattr(frame.f_locals.get('self'), 'request', None)
if request is not None:
return request
elif isinstance(frame.f_locals.get('request'), Request):
return frame.f_locals['request']
frame = frame.f_back
raise RequestNotFound(RequestNotFound.__doc__)
def lazy_apply(func, *call_args, **call_kwargs):
'''
apply arguments in the order that they come in the function signature
and do not apply if argument not provided
call_args will be applied in order if func signature has args.
otherwise, call_kwargs is the magic here...
'''
sig = inspect.signature(func)
args = []
kwargs = {}
for idx, param_name in enumerate(sig.parameters):
param = sig.parameters[param_name]
if param.kind == inspect.Parameter.KEYWORD_ONLY:
if param.name in call_kwargs:
args.append(call_kwargs.pop(param.name))
continue
if param.kind == inspect.Parameter.VAR_KEYWORD:
kwargs.update(call_kwargs) # this will be the last iteration...
continue
if param.kind == inspect.Parameter.POSITIONAL_ONLY:
if len(call_args) >= (idx + 1):
args.append(call_args[idx])
elif param.name in call_kwargs:
args.append(call_kwargs.pop(param.name))
else:
if param.name in call_kwargs:
kwargs[param.name] = call_kwargs.pop(param.name)
elif (param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
len(call_args) >= (idx + 1)):
args.append(call_args[idx])
return func(*args, **kwargs)
def to_str(value):
if isinstance(value, bytes):
value = value.decode('utf-8')
return value
def clear_conn_statement_cache(conn):
try:
conn._con._stmt_cache.clear()
except Exception: # pragma: no cover
try:
conn._stmt_cache.clear()
except Exception:
pass
def list_or_dict_items(val):
if isinstance(val, list):
new_val = []
for item in val:
new_val.extend([(k, v) for k, v in item.items()])
return new_val
return [(k, v) for k, v in val.items()]
async def run_async(func, *args, **kwargs):
root = get_utility(IApplication, name='root')
loop = asyncio.get_event_loop()
func = partial(func, *args, **kwargs)
return await loop.run_in_executor(root.executor, func)
def safe_unidecode(val):
if isinstance(val, str):
# already decoded
return val
for codec in ('utf-8', 'windows-1252', 'latin-1'):
try:
return val.decode(codec)
except UnicodeDecodeError:
pass
return val.decode('utf-8', errors='replace')
``` |
{
"source": "jordic/guillotina_rediscache",
"score": 2
} |
#### File: guillotina_rediscache/guillotina_rediscache/utility.py
```python
from guillotina import app_settings
from guillotina import configure
from guillotina.profile import profilable
from guillotina_rediscache import cache
from guillotina_rediscache import serialize
from guillotina_rediscache.interfaces import IRedisChannelUtility
import aioredis
import asyncio
import logging
import pickle
logger = logging.getLogger('guillotina_rediscache')
@configure.utility(provides=IRedisChannelUtility)
class RedisChannelUtility:
def __init__(self, settings=None, loop=None):
self._loop = loop
self._settings = {}
self._ignored_tids = []
self._pool = None
self._redis = None
@profilable
async def initialize(self, app=None):
settings = app_settings['redis']
while True:
try:
self._pool = await cache.get_redis_pool(self._loop)
self._redis = aioredis.Redis(self._pool)
res = await self._redis.subscribe(settings['updates_channel'])
ch = res[0]
while (await ch.wait_message()):
try:
msg = serialize.loads(await ch.get())
await self.invalidate(msg)
except (TypeError, pickle.UnpicklingError):
pass
except (asyncio.CancelledError, RuntimeError):
# task cancelled, let it die
return
except Exception:
logger.warn(
'Error subscribing to redis changes. Waiting before trying again',
exc_info=True)
await asyncio.sleep(5)
async def finalize(self, app):
settings = app_settings['redis']
if self._redis is not None:
try:
await self._redis.unsubscribe(settings['updates_channel'])
await cache.close_redis_pool()
except (asyncio.CancelledError, RuntimeError):
# task cancelled, let it die
return
@profilable
async def invalidate(self, data):
assert isinstance(data, dict)
assert 'tid' in data
assert 'keys' in data
if data['tid'] in self._ignored_tids:
# on the same thread, ignore this sucker...
self._ignored_tids.remove(data['tid'])
return
mem_cache = cache.get_memory_cache()
for key in data['keys']:
if key in mem_cache:
del mem_cache[key]
for cache_key, ob in data.get('push', {}).items():
mem_cache[cache_key] = ob
def ignore_tid(self, tid):
# so we don't invalidate twice...
self._ignored_tids.append(tid)
```
#### File: guillotina_rediscache/measures/serialize.py
```python
from guillotina_rediscache import serialize
import time
ITERATIONS = 100000
# ----------------------------------------------------
# Measure performance of serializing data
#
# Lessons:
# - pickle is MUCH faster than json
# ----------------------------------------------------
async def runit():
print(f'Test content serialization')
start = time.time()
for _ in range(ITERATIONS):
blah = serialize.dumps({
'dlsfkds': 'dslfkdsf',
'dslfks': 'sdlfkjds',
'state': b'X' * ITERATIONS
})
serialize.loads(blah)
end = time.time()
print(f'Done with {ITERATIONS} in {end - start} seconds')
async def run():
await runit()
```
#### File: guillotina_rediscache/measures/speed_create.py
```python
from guillotina.content import create_content_in_container
from guillotina.transactions import get_tm
from guillotina.transactions import get_transaction
from guillotina.utils import get_current_request
import time
import uuid
ITERATIONS = 100
# ----------------------------------------------------
# Measure performance of caching with content creation
#
# Lessons:
# - asyncio.ensure_future is okay
# - close cleanup done in future helps request performance and improves
# close performance from around 0.04 -> 0.005
# - releasing connection back to pool is slow(all sync and closes connection, why?)
# We should not be manually managing this queue. Let aioredis manage queue
# - 5-10% improvement overall performance on transactions
# ----------------------------------------------------
async def run_create(container):
request = get_current_request()
txn = get_transaction(request)
tm = get_tm(request)
print(f'Test content create')
start = time.time()
for _ in range(ITERATIONS):
id_ = uuid.uuid4().hex
await create_content_in_container(container, 'Item', id_)
await tm.commit(txn=txn)
await tm.begin(request=request)
end = time.time()
print(f'Done with {ITERATIONS} in {end - start} seconds')
async def run(container):
await run_create(container)
``` |
{
"source": "jordiclariana/gmail-yaml-filters",
"score": 2
} |
#### File: gmail_yaml_filters/tests/test_xml.py
```python
from __future__ import unicode_literals
import pytest
from gmail_yaml_filters.ruleset import RuleSet
from gmail_yaml_filters.main import ruleset_to_xml
NS = {'apps': 'http://schemas.google.com/apps/2006'}
def sample_rule(name):
return {
'from': <EMAIL>'.format(name),
'trash': True,
}
@pytest.fixture
def ruleset():
return RuleSet.from_object([sample_rule('alice'), sample_rule('🐶')])
def test_ruleset_to_xml(ruleset):
"""
A hideous, basic, but working integration test for turning rules into XML.
"""
xml = ruleset_to_xml(ruleset, pretty_print=False)
assert xml.startswith("<?xml version='1.0' encoding='utf8'?>")
assert '<apps:property name="from" value="<EMAIL>"/><apps:property name="shouldTrash" value="true"/></entry>' in xml
assert '<apps:property name="from" value="<EMAIL>"/><apps:property name="shouldTrash" value="true"/></entry>' in xml
def test_ruleset_with_empty_rule():
"""
Tests that we don't generate rules without any actions.
"""
xml = ruleset_to_xml(RuleSet.from_object([{'from': 'alice'}]))
assert '<entry>' not in xml
``` |
{
"source": "jordiclive/ControlPrefixes",
"score": 3
} |
#### File: examples/control/extract.py
```python
from rake_nltk import Rake
import nltk
r = Rake()
# Uses stopwords for english from NLTK, and all puntuation characters by
# default
def get_keywords(inp):
r.extract_keywords_from_text(inp)
result = r.get_ranked_phrases()
return result
swap = {'“':'\"', '”':'\"', '’':'\''}
if __name__ == '__main__':
filename = '/u/scr/xlisali/contrast_LM/data_api/dataset/matching_debug_0.txt'
outname = '/u/scr/xlisali/contrast_LM/data_api/dataset/matching_debug_0_kw.txt'
line_lst = []
key_lst = []
out_handle = open(outname, 'w')
with open(filename, 'r') as f:
for line in f :
line = line.strip().split('||')[1]
# print(line)
# print('here')
line = line.split()
for i, word in enumerate(line):
if word in swap:
line[i] = swap[word]
print(line)
line = ' '.join(line)
result = get_keywords(line)
line_lst.append(line)
key_lst.append(result[:3])
out_handle.write('{}||{}\n'.format(line, result[:3]))
out_handle.close()
# print(line_lst)
# nltk.tokenize.sent_tokenize(line_lst[0])
# r.extract_keywords_from_sentences(line_lst)
# final = r.get_ranked_phrases()
# # final = get_keywords(line_lst)
# print(len(final), len(line_lst))
# print(final[0], len(final), len(final[0]))
```
#### File: transformers/examples/lightning_base.py
```python
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
BartForConditionalGeneration,
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from prefixTuning import PrefixTuning
logger = logging.getLogger(__name__)
MODEL_MODES = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"GEC": AutoModelForSeq2SeqLM,
"translation": AutoModelForSeq2SeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
arg_to_scheduler = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
arg_to_scheduler_choices = sorted(arg_to_scheduler.keys())
arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class OurModelCheckPoint(pl.callbacks.ModelCheckpoint):
# def __init__(self, filepath: Optional[str] = None,
# monitor: Optional[str] = None,
# verbose: bool = False,
# save_last: Optional[bool] = None,
# save_top_k: Optional[int] = None,
# save_weights_only: bool = False,
# mode: str = "auto",
# period: int = 1,
# prefix: str = "",
# dirpath: Optional[Union[str, Path]] = None,
# filename: Optional[str] = None,):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def save_checkpoint(self, trainer, pl_module):
print('saving checkpoint now')
10/0
return
def _save_model(self, filepath: str, trainer, pl_module):
print('saving models now/..')
print('try calling the pl_module save')
pl_module.on_save_checkpoint(None, filepath)
return
class PrefixTransformer(pl.LightningModule):
def __init__(
self,
hparams: argparse.Namespace,
num_labels=None,
mode="base",
config=None,
tokenizer=None,
seq2seq_model=None,
**config_kwargs
):
"""Initialize a model, tokenizer and config."""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
print('the cache dir is {}'.format(cache_dir))
if config is None:
self.config = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,
**({"num_labels": num_labels} if num_labels is not None else {}),
cache_dir=cache_dir,
**config_kwargs,
)
else:
self.config: PretrainedConfig = config
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,
cache_dir=cache_dir,
)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
# print(self.hparams.preseqlen)
self.config.preseqlen = self.hparams.preseqlen
self.config.use_prefix = True
self.seq2seq_model_type = MODEL_MODES[mode]
if seq2seq_model is None:
self.seq2seq_model = BartForConditionalGeneration.from_pretrained(
self.hparams.model_name_or_path,
from_tf=bool(".ckpt" in self.hparams.model_name_or_path),
config=self.config,
cache_dir=cache_dir,
)
else:
self.seq2seq_model = seq2seq_model
config_prefix = AutoConfig.from_pretrained(self.hparams.model_name_or_path, cache_dir=cache_dir)
self.model_type = config_prefix.model_type
if self.hparams.optim_prefix == 'yes':
optim_prefix_bool = True
elif self.hparams.optim_prefix == 'no':
optim_prefix_bool = False
else:
assert False, "model_args.optim_prefix should be either yes or no"
print(self.model_type)
config_prefix._my_arg_tune_mode = self.hparams.tuning_mode
config_prefix._my_arg_task_mode = self.hparams.task_mode
config_prefix._my_arg_control = True
config_prefix.train_weights = False
config_prefix.optim_prefix = optim_prefix_bool
config_prefix.preseqlen = self.hparams.preseqlen
config_prefix.use_infix = (self.hparams.format_mode == 'infix')
config_prefix.format_mode = self.hparams.format_mode
config_prefix.prefix_dropout = self.hparams.prefix_dropout
config_prefix.vocab_size = len(self.tokenizer)
config_prefix.lowdata = ('lowdata' in self.hparams.output_dir)
if config_prefix.lowdata and self.hparams.use_lowdata_token == 'yes':
config_prefix.lowdata_token = self.tokenizer([self.hparams.lowdata_token],
add_prefix_space=True)['input_ids'] # return_tensors='np',
print(self.hparams.lowdata_token)
print(config_prefix.lowdata_token)
print(self.tokenizer.pad_token_id)
# some extra stuff.
config_prefix.mid_dim = self.hparams.mid_dim
# print(config_prefix)
if self.hparams.prefixModel_name_or_path is not None:
print('loading from {}'.format(hparams.prefixModel_name_or_path))
self.model = PrefixTuning.from_pretrained(self.hparams.prefixModel_name_or_path,
from_tf=bool(".ckpt" in self.hparams.prefixModel_name_or_path),
cache_dir=cache_dir,
config=config_prefix,
model_gpt2=self.seq2seq_model)
else:
self.model = PrefixTuning(config_prefix, self.seq2seq_model)
def load_hf_checkpoint(self, *args, **kwargs):
assert False, 'why need to load model here?'
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
optimizer = Adafactor(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False
)
else:
optimizer = AdamW(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon
)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return [optimizer], [scheduler]
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
@property
def total_steps(self) -> int:
"""The number of total training steps that will be run. Used for lr scheduler purposes."""
num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores
effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
dataset_size = len(self.train_loader.dataset)
return (dataset_size / effective_batch_size) * self.hparams.max_epochs
def setup(self, mode):
if mode == "fit":
self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True)
def get_dataloader(self, type_path, batch_size, shuffle=False):
raise NotImplementedError("You must implement this for your task")
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(
self.hparams.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(),
str(self.hparams.max_seq_length),
),
)
@pl.utilities.rank_zero_only
def save_checkpoint(self, trainer) -> None:
print('Saving the the checkpoint.')
return
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[str, Any], filepath=None) -> None:
# if filepath is not None:
# save_path = filepath[:-5]
# else:
# save_path = self.output_dir.joinpath("checkpoint-hello")
save_path = self.output_dir.joinpath("checkpoint-curr_best")
print('the suggested save_path is {}, saving to {}'.format(filepath[:-5], save_path))
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
print('SAVING TO checkpoint {}'.format(save_path))
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--prefixModel_name_or_path",
default=None,
type=str,
help="Path to pretrained prefix model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--prefix_mode",
default='activation',
type=str,
help="embedding or activation",
)
parser.add_argument(
"--preseqlen",
default=1,
type=int,
help="the length of the prefix.",
)
parser.add_argument(
"--optim_prefix",
default='yes',
type=str,
help="use the task specific optimization of the prefix.",
)
parser.add_argument(
"--tuning_mode",
default='prefixtune',
type=str,
help="Could be prefixtune or finetune",
)
parser.add_argument(
"--prefix_dropout",
default=0.0,
type=float,
help="the dropout rate for our prefix model.",
)
parser.add_argument(
"--use_dropout",
default='no',
type=str,
help="whether to dropout the main model during training. ",
)
parser.add_argument(
"--mid_dim",
default=512,
type=int,
help="the dimension of the intermediate layer.",
)
# parser.add_argument(
# "--task_mode",
# default='GEC',
# type=int,
# help="the default task, or dataset name. ",
# )
parser.add_argument(
"--format_mode",
default='cat',
type=str,
help="whether to look at the input again, including [infix, cat, peek, nopeek]",
)
parser.add_argument(
"--use_lowdata_token",
default='yes',
type=str,
help="whether or not to use the lowdata token, ",
)
parser.add_argument(
"--lowdata_token",
default='<PASSWORD>',
type=str,
help="the low data token to use. ",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default='/vol/bitbucket/jc1619/xsum',
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--encoder_layerdrop",
type=float,
help="Encoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--decoder_layerdrop",
type=float,
help="Decoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--dropout",
type=float,
help="Dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--attention_dropout",
type=float,
help="Attention dropout probability (Optional). Goes into model.config",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--lr_scheduler",
default="linear",
choices=arg_to_scheduler_choices,
metavar=arg_to_scheduler_metavar,
type=str,
help="Learning rate scheduler",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int)
parser.add_argument("--train_batch_size", default=10, type=int)
parser.add_argument("--eval_batch_size", default=10, type=int)
parser.add_argument("--adafactor", action="store_true")
class BaseTransformer(pl.LightningModule):
def __init__(
self,
hparams: argparse.Namespace,
num_labels=None,
mode="base",
config=None,
tokenizer=None,
model=None,
**config_kwargs
):
"""Initialize a model, tokenizer and config."""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(hparams)
self.step_count = 0
self.output_dir = Path(self.hparams.output_dir)
cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
self.config = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path,
**({"num_labels": num_labels} if num_labels is not None else {}),
cache_dir=cache_dir,
**config_kwargs,
)
else:
self.config: PretrainedConfig = config
extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams, p, None):
assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute"
setattr(self.config, p, getattr(self.hparams, p))
if tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path,
cache_dir=cache_dir,
)
else:
self.tokenizer: PreTrainedTokenizer = tokenizer
self.config.preseqlen = -1
self.config.use_prefix = False
self.model_type = MODEL_MODES[mode]
if model is None:
self.model = self.model_type.from_pretrained(
self.hparams.model_name_or_path,
from_tf=bool(".ckpt" in self.hparams.model_name_or_path),
config=self.config,
cache_dir=cache_dir,
)
else:
self.model = model
def load_hf_checkpoint(self, *args, **kwargs):
# assert False, 'unknown why need to load the model here.'
self.model = self.model_type.from_pretrained(*args, **kwargs)
def get_lr_scheduler(self):
get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler]
scheduler = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
optimizer = Adafactor(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False
)
else:
optimizer = AdamW(
optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon
)
self.opt = optimizer
scheduler = self.get_lr_scheduler()
return [optimizer], [scheduler]
def test_step(self, batch, batch_nb):
return self.validation_step(batch, batch_nb)
def test_epoch_end(self, outputs):
return self.validation_end(outputs)
@property
def total_steps(self) -> int:
"""The number of total training steps that will be run. Used for lr scheduler purposes."""
num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores
effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
dataset_size = len(self.train_loader.dataset)
return (dataset_size / effective_batch_size) * self.hparams.max_epochs
def setup(self, mode):
if mode == "fit":
self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True)
def get_dataloader(self, type_path, batch_size, shuffle=False):
raise NotImplementedError("You must implement this for your task")
def train_dataloader(self):
return self.train_loader
def val_dataloader(self):
return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False)
def test_dataloader(self):
return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False)
def _feature_file(self, mode):
return os.path.join(
self.hparams.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(),
str(self.hparams.max_seq_length),
),
)
@pl.utilities.rank_zero_only
def on_save_checkpoint(self, checkpoint: Dict[str, Any], filepath=None) -> None:
save_path = self.output_dir.joinpath("checkpoint-curr_best")
print('the suggested save_path is {}, saving to {}'.format(filepath[:-5], save_path))
# save_path = self.output_dir.joinpath("best_tfmr")
self.model.config.save_step = self.step_count
self.model.save_pretrained(save_path)
self.tokenizer.save_pretrained(save_path)
@staticmethod
def add_model_specific_args(parser, root_dir):
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default='/vol/bitbucket/jc1619/xsum',
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--encoder_layerdrop",
type=float,
help="Encoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--decoder_layerdrop",
type=float,
help="Decoder layer dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--dropout",
type=float,
help="Dropout probability (Optional). Goes into model.config",
)
parser.add_argument(
"--attention_dropout",
type=float,
help="Attention dropout probability (Optional). Goes into model.config",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--lr_scheduler",
default="linear",
choices=arg_to_scheduler_choices,
metavar=arg_to_scheduler_metavar,
type=str,
help="Learning rate scheduler",
)
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int)
parser.add_argument("--train_batch_size", default=32, type=int)
parser.add_argument("--eval_batch_size", default=32, type=int)
parser.add_argument("--adafactor", action="store_true")
class LoggingCallback(pl.Callback):
def on_batch_end(self, trainer, pl_module):
lr_scheduler = trainer.lr_schedulers[0]["scheduler"]
lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(lrs)
def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Validation results *****")
metrics = trainer.callback_metrics
# Log results
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
rank_zero_info("***** Test results *****")
metrics = trainer.callback_metrics
# Log and save results to file
output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(metrics):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(key, str(metrics[key])))
writer.write("{} = {}\n".format(key, str(metrics[key])))
def add_generic_args(parser, root_dir) -> None:
# TODO(SS): allow all pl args? parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O2",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int)
parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm")
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument(
"--gradient_accumulation_steps",
dest="accumulate_grad_batches",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
def generic_train(
model,
args: argparse.Namespace,
early_stopping_callback=False,
logger=True, # can pass WandbLogger() here
extra_callbacks=[],
checkpoint_callback=None,
logging_callback=None,
**extra_train_kwargs
):
pl.seed_everything(args.seed)
# init model
odir = Path(model.hparams.output_dir)
odir.mkdir(exist_ok=True)
# add custom checkpoints
# if checkpoint_callback is None:
# checkpoint_callback = pl.callbacks.ModelCheckpoint(
# filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1
# )
#get_checkpoint_callback(args.output_dir, model.val_metric, args.save_top_k, lower_is_better)
# OLD VERSION
# checkpoint_callback = OurModelCheckPoint(filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1)
# monitor_var = args.monitor_var
checkpoint_callback = OurModelCheckPoint(filepath=args.output_dir, prefix="checkpoint", monitor="val_rouge2", mode="max", save_top_k=1)
# checkpoint_callback = OurModelCheckPoint(
# filepath=os.path.join(args.output_dir, exp),
# monitor=f"val_{metric}",
# mode="min" if "loss" in metric else "max",
# save_top_k=save_top_k,
# period=0, # maybe save a checkpoint every time val is run, not just end of epoch.
# )
if logging_callback is None:
logging_callback = LoggingCallback()
train_params = {}
# TODO: remove with PyTorch 1.6 since pl uses native amp
if args.fp16:
train_params["precision"] = 16
train_params["amp_level"] = args.fp16_opt_level
if args.gpus > 1:
train_params["distributed_backend"] = "ddp"
train_params["accumulate_grad_batches"] = args.accumulate_grad_batches
# train_params['progress_bar_refresh_rate'] = 0
check_every_n_epochs = 1 #int (50*args.train_batch_size / 100) # want to check every 50 steps.
print('the max number of epochs is {}'.format(args.max_epochs))
print('early stopping', early_stopping_callback)
print('checkpoint_callback', checkpoint_callback)
print('logging', logging_callback)
trainer = pl.Trainer.from_argparse_args(
args,
max_epochs=args.max_epochs,
weights_summary=None,
callbacks=[logging_callback] + extra_callbacks,
logger=logger,
# check_val_every_n_epoch=check_every_n_epochs,
checkpoint_callback=checkpoint_callback,
early_stop_callback=early_stopping_callback,
**train_params,
)
print('args.do_train:', args.do_train)
if args.do_train:
trainer.fit(model)
return trainer
```
#### File: examples/seq2seq/topic_url_process.py
```python
import sys, os
from collections import Counter
import numpy as np
def process_url(url):
url = url.strip('/')
keywords = url.split('/')[-3:]
# print('old', keywords)
if keywords[-1].isdigit():
if 'www' in keywords[0]:
del keywords[0]
keywords = keywords[:-1]
else:
detailed_topic = keywords[-1].split('-')[:-1]
if 'www' in keywords[0]:
del keywords[0]
# print(detailed_topic, keywords)
if len(detailed_topic) > 0:
keywords = keywords[:-1] + [detailed_topic[0]]
else:
keywords = keywords[:-1]
# print(tuple(keywords))
return tuple([x for x in keywords if x ])
def count_topic(topic_lst):
count = Counter()
for topic in topic_lst:
count[topic] += 1
print(count)
# gather topics that has count > 10
topic_selected_lst = []
for key, val in count.items():
if val > 10:
topic_selected_lst.append(key)
print(key, val)
def get_file_handle():
pass
if __name__ == '__main__':
topic_lst = []
with open('xsum_topic/{}.topic'.format(sys.argv[1]), 'r') as f:
for line in f:
line = line.strip()
topic = process_url(line)
topic_lst.append(topic)
# count_topic(topic_lst)
# two options.
# 1. Train on (news, *) and eval on (sports)
# 2. Train on (news, uk), (news, business), (news, world) and eval on (news, other*)
option = 2
if option == 1:
data_dir = 'xsum_topic-news-sports2'
train_path_src = os.path.join(data_dir, 'train.source')
train_path_tgt = os.path.join(data_dir, 'train.target')
train_path_topic = os.path.join(data_dir, 'train.topic')
dev_path_src = os.path.join(data_dir, 'val.source')
dev_path_tgt = os.path.join(data_dir, 'val.target')
dev_path_topic = os.path.join(data_dir, 'val.topic')
test_path_src = os.path.join(data_dir, 'test.source')
test_path_tgt = os.path.join(data_dir, 'test.target')
test_path_topic = os.path.join(data_dir, 'test.topic')
source_path = 'xsum_topic/{}.source'.format(sys.argv[1])
source_lst = []
with open(source_path, 'r') as f:
for line in f :
source_lst.append(line)
target_path = 'xsum_topic/{}.target'.format(sys.argv[1])
target_lst = []
with open(target_path, 'r') as f:
for line in f:
target_lst.append(line)
assert len(target_lst) == len(source_lst)
assert len(target_lst) == len(topic_lst)
max_num = None
if sys.argv[3] == 'train':
out_source = open(train_path_src, 'w') # train
out_target = open(train_path_tgt, 'w')
out_topic = open(train_path_topic, 'w')
print('writing to train')
elif sys.argv[3] == 'test':
out_source = open(test_path_src, 'w') # test
out_target = open(test_path_tgt, 'w')
out_topic = open(test_path_topic, 'w')
print('writing to test')
max_num = 8000
elif sys.argv[3] == 'val':
out_source = open(dev_path_src, 'w') # dev
out_target = open(dev_path_tgt, 'w')
out_topic = open(dev_path_topic, 'w')
print('writing to val')
final_lst_topic = []
final_lst_source = []
final_lst_target = []
for topic, src, tgt in zip(topic_lst, source_lst, target_lst):
if topic[0] == sys.argv[2]:
if max_num is None:
out_topic.write(str(topic) + '\n')
out_source.write(src)
out_target.write(tgt)
else:
final_lst_topic.append(str(topic))
final_lst_source.append(src)
final_lst_target.append(tgt)
if max_num is not None:
assert len(final_lst_topic) == len(final_lst_target)
assert len(final_lst_topic) == len(final_lst_source)
print('the max number is {}'.format(max_num))
cand_lst = np.random.choice(len(final_lst_topic), max_num, replace=False)
for cand in cand_lst:
out_topic.write(final_lst_topic[cand] + '\n')
out_source.write(final_lst_source[cand])
out_target.write(final_lst_target[cand])
# elif topic[0] == 'sport':
# out_topic_val.write(str(topic) + '\n')
# out_source_val.write(src)
# out_target_val.write(tgt)
out_source.close()
out_topic.close()
out_target.close()
# out_topic_val.close()
# out_source_val.close()
# out_target_val.close()
elif option == 2:
data_dir = 'xsum_news2'
train_path_src = os.path.join(data_dir, 'train.source')
train_path_tgt = os.path.join(data_dir, 'train.target')
train_path_topic = os.path.join(data_dir, 'train.topic')
dev_path_src = os.path.join(data_dir, 'val.source')
dev_path_tgt = os.path.join(data_dir, 'val.target')
dev_path_topic = os.path.join(data_dir, 'val.topic')
test_path_src = os.path.join(data_dir, 'test.source')
test_path_tgt = os.path.join(data_dir, 'test.target')
test_path_topic = os.path.join(data_dir, 'test.topic')
source_path = 'xsum_topic/{}.source'.format(sys.argv[1])
source_lst = []
with open(source_path, 'r') as f:
for line in f :
source_lst.append(line)
target_path = 'xsum_topic/{}.target'.format(sys.argv[1])
target_lst = []
with open(target_path, 'r') as f:
for line in f:
target_lst.append(line)
assert len(target_lst) == len(source_lst)
assert len(target_lst) == len(topic_lst)
max_num = None
if sys.argv[3] == 'train':
out_source = open(train_path_src, 'w') # train
out_target = open(train_path_tgt, 'w')
out_topic = open(train_path_topic, 'w')
print('writing to train')
elif sys.argv[3] == 'test':
out_source = open(test_path_src, 'w') # test
out_target = open(test_path_tgt, 'w')
out_topic = open(test_path_topic, 'w')
print('writing to test')
max_num = 8000
elif sys.argv[3] == 'val':
out_source = open(dev_path_src, 'w') # dev
out_target = open(dev_path_tgt, 'w')
out_topic = open(dev_path_topic, 'w')
print('writing to val')
final_lst_topic = []
final_lst_source = []
final_lst_target = []
for topic, src, tgt in zip(topic_lst, source_lst, target_lst):
if topic[0] == 'news':
if topic in [('news', 'uk'), ('news', 'business'), ('news', 'world'),]:
if sys.argv[2] == 'yes':
if max_num is None:
out_topic.write(str(topic) + '\n')
out_source.write(src)
out_target.write(tgt)
else:
final_lst_topic.append(str(topic))
final_lst_source.append(src)
final_lst_target.append(tgt)
else:
if sys.argv[2] == 'no':
if max_num is None:
out_topic.write(str(topic) + '\n')
out_source.write(src)
out_target.write(tgt)
else:
final_lst_topic.append(str(topic))
final_lst_source.append(src)
final_lst_target.append(tgt)
# elif topic[0] == 'sport':
# out_topic_val.write(str(topic) + '\n')
# out_source_val.write(src)
# out_target_val.write(tgt)
if max_num is not None:
assert len(final_lst_topic) == len(final_lst_target)
assert len(final_lst_topic) == len(final_lst_source)
print('the max number is {}'.format(max_num))
cand_lst = np.random.choice(len(final_lst_topic), max_num, replace=False)
for cand in cand_lst:
out_topic.write(final_lst_topic[cand] + '\n')
out_source.write(final_lst_source[cand])
out_target.write(final_lst_target[cand])
out_source.close()
out_topic.close()
out_target.close()
# out_topic_val.close()
# out_source_val.close()
# out_target_val.close()
```
#### File: examples/text-generation/qualitative_read_lowdata.py
```python
import os, sys
import glob
from collections import defaultdict
def list_to_line(lst, line):
for name in lst:
# print(name)
line_result = open(name, "r").readlines()[line]
print(line_result)
print()
return
hyperparam_dict = {('data2textprefixtunet=table', 100): '100st=200',
('data2textfinetunet=table', 100): '100st=100',
('data2textprefixtunet=table', 50): '50st=200',
('data2textfinetunet=table', 50): '50st=100',
('data2textprefixtunet=table', 200): '200st=200',
('data2textfinetunet=table', 200): '200st=200',
('data2textprefixtunet=table', 500): '500st=400',
('data2textfinetunet=table', 500): '500st=400',
}
file_lst = glob.glob('e2e_results_conv/*lowdata*t=table-to-text:*beam')
print(file_lst)
name_dict_pt = defaultdict(list)
name_dict_ft = defaultdict(list)
for name in file_lst:
if 'finetune' not in name and 'prefixtune' not in name:
continue
if 'Alowdata' not in name and 'Blowdata' not in name and 'Clowdata'not in name and 'Dlowdata' not in name \
and 'Elowdata' not in name and 'Flowdata' not in name and 'Glowdata' not in name :
continue
base_name = os.path.basename(name)
base_name = base_name.split('_')
# print(base_name)
train_label = base_name[14] # seeded dataset
model_name =base_name[0] + base_name[-3].split('-')[0] #table or summary
steps = base_name[15] # max_steps
seed = base_name[10] # seeded training.
train_num = int(base_name[15].split('st=')[0])
if (model_name, train_num) not in hyperparam_dict:
continue
else:
if steps != hyperparam_dict[(model_name, train_num)] :
continue
if train_label != str(0):
continue
print(train_label, model_name, steps, seed, train_num)
if base_name[0] == 'data2textfinetune':
name_dict_ft[train_num].append(name)
else:
name_dict_pt[train_num].append(name)
# print(name_dict)
list_to_line(['e2e_results_conv/data2textfinetune_y_51_act_cat_b=10-e=5_d=0.0_'
'u=no_lr=5e-05_w=0.0_s=200_r=n_m=512_Clowdata_4_50st=100_ev=50_ws'
'=0_t=table-to-text:-checkpoint-50_valid_src'], 300)
print(50, '-'*20)
list_to_line(name_dict_pt[50], 300)
print(100, '-'*20)
list_to_line(name_dict_pt[100], 300)
print(200, '-'*20)
list_to_line(name_dict_pt[200], 300)
print(500, '-'*20)
list_to_line(name_dict_pt[500], 300)
print('-'*300)
print(50, '-'*20)
list_to_line(name_dict_ft[50], 300)
print(100, '-'*20)
list_to_line(name_dict_ft[100], 300)
print(200, '-'*20)
list_to_line(name_dict_ft[200], 300)
print(500, '-'*20)
list_to_line(name_dict_ft[500], 300)
#
# result_dict[model_name][train_num][steps].append(
# (float(bleu), float(nist), float(meteor), float(rougel), float(cider)))
# print(result_dict)
# print(result_dict['data2textfinetune_n_20'][100])
# prone down hyperparam for data building.
```
#### File: examples/text-generation/run_compose.py
```python
import argparse
import logging
import numpy as np
import torch
import json
from transformers import (
CTRLLMHeadModel,
CTRLTokenizer,
GPT2LMHeadModel,
GPT2Tokenizer,
OpenAIGPTLMHeadModel,
OpenAIGPTTokenizer,
TransfoXLLMHeadModel,
TransfoXLTokenizer,
XLMTokenizer,
XLMWithLMHeadModel,
XLNetLMHeadModel,
XLNetTokenizer,
BertForMaskedLM, BertModel,
BertTokenizer, BertTokenizerFast, AutoConfig,
)
import sys, os
sys.path.insert(1, '/u/scr/xlisali/contrast_LM/transformers/examples/control')
from train_control import PrefixTuning, PrefixEmbTuning
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
MODEL_CLASSES = {
"gpt2": (GPT2LMHeadModel, GPT2Tokenizer),
"ctrl": (CTRLLMHeadModel, CTRLTokenizer),
"openai-gpt": (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
"xlnet": (XLNetLMHeadModel, XLNetTokenizer),
"transfo-xl": (TransfoXLLMHeadModel, TransfoXLTokenizer),
"xlm": (XLMWithLMHeadModel, XLMTokenizer),
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by <NAME>
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PREFIX = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, <NAME>, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
#
# Functions to prepare models' input
#
def prepare_ctrl_input(args, _, tokenizer, prompt_text):
if args.temperature > 0.7:
logger.info("CTRL typically works better with lower temperatures (and lower top_k).")
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False)
if not any(encoded_prompt[0] == x for x in tokenizer.control_codes.values()):
logger.info("WARNING! You are not starting your generation from a control code so you won't get good results")
return prompt_text
def prepare_xlm_input(args, model, tokenizer, prompt_text):
# kwargs = {"language": None, "mask_token_id": None}
# Set the language
use_lang_emb = hasattr(model.config, "use_lang_emb") and model.config.use_lang_emb
if hasattr(model.config, "lang2id") and use_lang_emb:
available_languages = model.config.lang2id.keys()
if args.xlm_language in available_languages:
language = args.xlm_language
else:
language = None
while language not in available_languages:
language = input("Using XLM. Select language in " + str(list(available_languages)) + " >>> ")
model.config.lang_id = model.config.lang2id[language]
# kwargs["language"] = tokenizer.lang2id[language]
# TODO fix mask_token_id setup when configurations will be synchronized between models and tokenizers
# XLM masked-language modeling (MLM) models need masked token
# is_xlm_mlm = "mlm" in args.model_name_or_path
# if is_xlm_mlm:
# kwargs["mask_token_id"] = tokenizer.mask_token_id
return prompt_text
def prepare_xlnet_input(args, _, tokenizer, prompt_text):
prefix = args.prefix if args.prefix else args.padding_text if args.padding_text else PREFIX
prompt_text = prefix + prompt_text
return prompt_text
def prepare_transfoxl_input(args, _, tokenizer, prompt_text):
prefix = args.prefix if args.prefix else args.padding_text if args.padding_text else PREFIX
prompt_text = prefix + prompt_text
return prompt_text
PREPROCESSING_FUNCTIONS = {
"ctrl": prepare_ctrl_input,
"xlm": prepare_xlm_input,
"xlnet": prepare_xlnet_input,
"transfo-xl": prepare_transfoxl_input,
}
def read_e2e_files(path, tokenizer):
file_dict = {}
with open(path, 'r') as f:
for line in f:
src, tgt = line.strip().split('||')
src = src + ' {}'.format(tokenizer.bos_token)
if src not in file_dict:
file_dict[src] = []
file_dict[src].append(tgt)
return file_dict
def read_webnlg_files(path, tokenizer):
file_dict = {}
with open(path) as f:
lines_dict = json.load(f)
full_rela_lst = []
full_src_lst = []
# full_tgt_lst = []
total_count = 0
for i, example in enumerate(lines_dict['entries']):
sents = example[str(i + 1)]['lexicalisations']
triples = example[str(i + 1)]['modifiedtripleset']
rela_lst = []
temp_triples = ''
for j, tripleset in enumerate(triples):
subj, rela, obj = tripleset['subject'], tripleset['property'], tripleset['object']
rela_lst.append(rela)
if i > 0:
temp_triples += ' | '
temp_triples += '{} : {} : {}'.format(subj, rela, obj)
temp_triples = ' {} {}'.format(temp_triples, tokenizer.bos_token)
for sent in sents:
if True: #sent["comment"] == 'good'
if (temp_triples,tuple(rela_lst)) not in file_dict:
file_dict[(temp_triples,tuple(rela_lst))] = []
full_src_lst.append(temp_triples)
full_rela_lst.append(tuple(rela_lst))
file_dict[(temp_triples,tuple(rela_lst))].append(sent["lex"])
print(len(file_dict), len(full_src_lst))
assert len(full_rela_lst) == len(full_src_lst)
assert len(full_rela_lst) == len(file_dict)
return file_dict
def read_triples_files(path, tokenizer):
file_dict = {}
with open(path) as f:
lines_dict = json.load(f)
full_rela_lst = []
full_src_lst = []
for example in lines_dict:
rela_lst = []
temp_triples = ''
for i, tripleset in enumerate(example['tripleset']):
subj, rela, obj = tripleset
rela = rela.lower()
rela_lst.append(rela)
if i > 0:
temp_triples += ' | '
temp_triples += '{} : {} : {}'.format(subj, rela, obj)
temp_triples = ' {} {}'.format(temp_triples, tokenizer.bos_token)
for sent in example['annotations']:
if (temp_triples, tuple(rela_lst)) not in file_dict:
file_dict[(temp_triples, tuple(rela_lst))] = []
full_src_lst.append(temp_triples)
full_rela_lst.append(tuple(rela_lst))
file_dict[(temp_triples, tuple(rela_lst))].append(sent['text'])
print(len(file_dict), len(full_src_lst))
assert len(full_rela_lst) == len(full_src_lst)
assert len(full_rela_lst) == len(file_dict)
return file_dict
def write_e2e_corr(prompt_lst, file_dict, corr_path):
with open(corr_path, 'w') as f:
for x in prompt_lst:
for line in file_dict[x]:
print(line, file=f)
print('', file=f)
return
def write_e2e_src(prompt_lst, corr_path):
with open(corr_path, 'w') as f:
for x in prompt_lst:
print(x, file=f)
return
def get_emb(sent_lst, word_lst, num_layer=1):
# load bert
tokenizer_bert = BertTokenizerFast.from_pretrained('bert-large-uncased')
model = BertModel.from_pretrained('bert-large-uncased', return_dict=True).cuda()
for param in model.parameters():
param.requires_grad = False
device = model.device
edited_sent = []
chosen_word = []
with torch.no_grad():
computed_ = 0
mid_ = 300
full_score = []
while computed_ < len(sent_lst):
temp_sent = sent_lst[computed_:computed_ + mid_]
temp_word = word_lst[computed_:computed_ + mid_]
temp_input = tokenizer_bert(temp_sent, return_tensors="pt", padding=True,
is_split_into_words=False, return_offsets_mapping=True, add_special_tokens=True)
input_ids = temp_input["input_ids"]
# print(temp_input.keys())
mask_input = temp_input['attention_mask']
bsz, seqlen = input_ids.shape
# print(input_ids.shape)
cand_idx = tokenizer_bert(temp_word, add_special_tokens=False)['input_ids']
# print(cand_idx)
# if BPE has multiple subwords.
cand_idx = torch.tensor([i[-1] for i in cand_idx]) # bsz
# print(cand_idx)
cand_idx2 = cand_idx.unsqueeze(1).expand(bsz, seqlen)
mask = (input_ids == cand_idx2)
# print(mask.sum(dim=1))
# print(mask.nonzero())
# what if the occurence of a subword is not in the primary word?
# if has multiple occurence? only taking the first one.
mask = (mask.cumsum(dim=1) == 1) & mask
# print(mask)
# print(mask.sum(dim=1))
# print(mask.nonzero())
mask_idx = mask.nonzero()
# print(input_ids.shape)
edit_temp = []
keep_mask = []
word_temp = []
for i, (sent1, word1) in enumerate(zip(temp_sent, temp_word)):
# TODO: could check against the offests and make final changes!
temp_idx1 = temp_input["offset_mapping"][i][mask_idx[i, 1]]
# print(word1, sent1)
# print(sent1[temp_idx1[0]:temp_idx1[1]])
sent1 = sent1.split()
widx = sent1.index(word1)
by_tokenl = sum([len(l) + 1 for l in sent1[:widx]])
by_tokenr = sum([len(l) + 1 for l in sent1[:widx + 1]]) - 1
# print(by_tokenl, by_tokenr, temp_idx1)
if by_tokenl != temp_idx1[0].item() and by_tokenr != temp_idx1[1].item():
# print('dangerous')
# print(sent1, word1, by_tokenl, by_tokenr, temp_idx1)
# simple option: delete it form input_ids
keep_mask.append(False)
continue
else:
keep_mask.append(True)
new_sent = [word1, '[BOS]'] + sent1[:widx] + ['[', sent1[widx], ']'] + sent1[widx + 1:] + ['[EOS]']
assert len(new_sent) == len(sent1) + 5
edit_temp.append(new_sent)
word_temp.append(word1)
keep_mask = torch.tensor(keep_mask)
# print(keep_mask.shape, input_ids.shape, mask.shape, 'hi')
input_ids = input_ids[keep_mask]
mask = mask[keep_mask]
mask_input = mask_input[keep_mask]
# print(input_ids.shape, mask.shape, len(edit_temp))
assert input_ids.size(0) == len(edit_temp)
edited_sent += edit_temp
chosen_word += word_temp
# print(len(edited_sent), len(chosen_word))
outputs = model(input_ids.to(device), attention_mask=mask_input.to(device), output_hidden_states=True)
if num_layer > 1:
all_hidden_states = outputs.hidden_states
selected_all_hidden_states = [ii[mask] for ii in all_hidden_states[-num_layer:]]
# print([ii.shape for ii in selected_all_hidden_states])
hidden_layer = torch.stack(selected_all_hidden_states, dim=1)
# print(hidden_layer.shape, selected_all_hidden_states[0].shape)
# print('all hidden', selected_all_hidden_states.shape)
else:
last_hidden_states = outputs.last_hidden_state
hidden_layer = last_hidden_states[mask].unsqueeze(1)
computed_ += mid_
full_score.append(hidden_layer.cpu())
full_score = torch.cat(full_score, dim=0)
return full_score, edited_sent, chosen_word
def adjust_length_to_model(length, max_sequence_length):
if length < 0 and max_sequence_length > 0:
length = max_sequence_length
elif 0 < max_sequence_length < length:
length = max_sequence_length # No generation bigger than model size
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
return length
def read_doc_for_embmatch(file_name, num_layer):
word_lst = []
sent_lst = []
with open(file_name, 'r') as f:
for line in f:
word, sent = line.strip().split('||')
word_lst.append(word)
sent_lst.append(sent)
emb_match, sent_cleaned_lst, chosen_word = get_emb(sent_lst, word_lst, num_layer=num_layer)
prompt_text_lst = [word + ' [BOS]' for word in chosen_word]
return prompt_text_lst, emb_match.split(1), sent_cleaned_lst
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
required=False,
help="Path to pre-trained tokenizer or shortcut name selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--prefixModel_name_or_path",
default=None,
type=str,
required=False,
help="Path to pre-trained PrefixTuning Model or shortcut name selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--prefixModel_name_or_path2",
default=None,
type=str,
required=False,
help="Path to the second pre-trained PrefixTuning Model or shortcut name selected in the list: " + ", ".join(
MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path2",
default=None,
type=str,
required=False,
help="Path to the second pre-trained Model or shortcut name selected in the list: " + ", ".join(
MODEL_CLASSES.keys()),
)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--task_mode", type=str, default="embMatch")
parser.add_argument("--control_mode", type=str, default="yes")
parser.add_argument("--prefix_mode", type=str, default="activation")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped")
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
)
parser.add_argument(
"--repetition_penalty", type=float, default=1.0, help="primarily useful for CTRL model; in that case, use 1.2"
)
parser.add_argument("--k", type=int, default=0)
parser.add_argument("--p", type=float, default=0.9)
parser.add_argument("--alpha1", type=float, default=0.9)
parser.add_argument("--alpha2", type=float, default=0.9)
parser.add_argument("--split_file", type=str, default='compose2')
parser.add_argument("--tuning_mode", type=str, default="finetune", help="prefixtune or finetune")
parser.add_argument("--format_mode", type=str, default="peek", help="peek, cat, nopeek, or infix")
parser.add_argument("--optim_prefix", type=str, default="no", help="optim_prefix")
parser.add_argument("--preseqlen", type=int, default=5, help="preseqlen")
parser.add_argument("--prefix", type=str, default="", help="Text added prior to input.")
parser.add_argument("--control_dataless", type=str, default="no", help="control dataless mode")
parser.add_argument("--padding_text", type=str, default="", help="Deprecated, the use of `--prefix` is preferred.")
parser.add_argument("--xlm_language", type=str, default="", help="Optional language when used with the XLM model.")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--num_return_sequences", type=int, default=1, help="The number of samples to generate.")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
logger.warning(
"device: %s, n_gpu: %s, 16-bits training: %s",
args.device,
args.n_gpu,
args.fp16,
)
set_seed(args)
# Initialize the model and tokenizer
if args.tuning_mode == 'finetune':
print(args.tuning_mode, args.model_name_or_path)
print(args.model_name_or_path2)
assert args.model_name_or_path2 is not None, "model_name_path2 should not be none because we are composing two models."
try:
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
except KeyError:
raise KeyError("the model {} you specified is not supported. You are welcome to add it and open a PR :)")
if args.model_name_or_path:
print('loading the trained tokenizer')
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
elif args.tokenizer_name:
print('loading from the init tokenizer')
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name)
print(len(tokenizer), tokenizer.bos_token, tokenizer.eos_token, tokenizer.pad_token)
config = AutoConfig.from_pretrained(args.model_name_or_path)
print(config)
model = model_class.from_pretrained(args.model_name_or_path, config=config)
config2 = AutoConfig.from_pretrained(args.model_name_or_path2)
print(config2)
model2 = model_class.from_pretrained(args.model_name_or_path2, config=config2)
model.to(args.device)
model2.to(args.device)
elif args.tuning_mode == 'prefixtune':
print('loading from PrefixTuning.', args.prefixModel_name_or_path,)
print('loading the second model from PrefixTuning.', args.prefixModel_name_or_path2, )
if args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
assert False, 'shouldn not init config from scratch. '
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
try:
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
except KeyError:
raise KeyError("the model {} you specified is not supported. You are welcome to add it and open a PR :)")
if args.model_name_or_path:
print('loading the trained tokenizer')
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
elif args.tokenizer_name:
print('loading from the init tokenizer')
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name)
# TODAYFIX.
config._my_arg_tune_mode = args.tuning_mode
config._my_arg_task_mode = args.task_mode
model = model_class.from_pretrained(args.model_name_or_path, config=config)
model.to(args.device)
print(len(tokenizer), tokenizer.bos_token, tokenizer.eos_token, tokenizer.pad_token)
# TODO LISA
add_pad = False
if args.model_name_or_path == 'gpt2-medium':
if args.task_mode == 'dataless':
print(args.tuning_mode, 'dataless setting, so no new tokens at all.')
print('We do not add special tokens to the tokenizer, instead, we just finetune on <|endoftext|>')
print(tokenizer.eos_token_id)
print(tokenizer.eos_token)
print(tokenizer.pad_token_id)
tokenizer.pad_token = tokenizer.eos_token
print(tokenizer.pad_token, tokenizer.pad_token_id)
elif add_pad:
print('extending the size of word embeddings. to include the [PAD] ')
num_added_tokens = tokenizer.add_special_tokens(
{'pad_token': '[PAD]'})
embedding_layer = model.resize_token_embeddings(len(tokenizer))
else:
print(tokenizer.eos_token_id)
print(tokenizer.eos_token)
print(tokenizer.pad_token_id)
tokenizer.pad_token = tokenizer.eos_token
print(tokenizer.pad_token, tokenizer.pad_token_id)
########################################3
print(len(tokenizer), tokenizer.bos_token, tokenizer.eos_token, tokenizer.pad_token)
gpt2 = model
# config._my_arg_task_mode = args.task_mode
# config._my_arg_control = True
# config.train_weights = 'no'
print(config)
if args.optim_prefix == 'yes':
optim_prefix_bool = True
elif args.optim_prefix == 'no':
optim_prefix_bool = False
else:
assert False, "model_args.optim_prefix should be either yes or no"
if args.prefixModel_name_or_path is not None:
config = AutoConfig.from_pretrained(args.prefixModel_name_or_path, )
print(config)
if args.prefix_mode == 'embedding':
model = PrefixEmbTuning.from_pretrained(
args.prefixModel_name_or_path,
from_tf=bool(".ckpt" in args.prefixModel_name_or_path, ),
config=config,
model_gpt2=gpt2, optim_prefix=optim_prefix_bool, preseqlen=args.preseqlen,
use_infix=(args.format_mode == 'infix')
)
elif args.prefix_mode == 'activation':
model = PrefixTuning.from_pretrained(
args.prefixModel_name_or_path,
from_tf=bool(".ckpt" in args.prefixModel_name_or_path, ),
config=config,
model_gpt2=gpt2, optim_prefix=optim_prefix_bool, preseqlen=args.preseqlen,
use_infix=(args.format_mode == 'infix')
)
model.to(args.device)
else:
assert False, "prefixModel_name_or_path is NONE."
if args.prefixModel_name_or_path2 is not None:
config2 = AutoConfig.from_pretrained(args.prefixModel_name_or_path2, )
print(config2)
if args.prefix_mode == 'embedding':
model2 = PrefixEmbTuning.from_pretrained(
args.prefixModel_name_or_path2,
from_tf=bool(".ckpt" in args.prefixModel_name_or_path2, ),
config=config2,
model_gpt2=gpt2, optim_prefix=optim_prefix_bool, preseqlen=args.preseqlen,
use_infix=(args.format_mode == 'infix')
)
elif args.prefix_mode == 'activation':
model2 = PrefixTuning.from_pretrained(
args.prefixModel_name_or_path2,
from_tf=bool(".ckpt" in args.prefixModel_name_or_path2, ),
config=config2,
model_gpt2=gpt2, optim_prefix=optim_prefix_bool, preseqlen=args.preseqlen,
use_infix=(args.format_mode == 'infix')
)
model2.to(args.device)
else:
assert False, "prefixModel_name_or_path2 is NONE."
# DEBUG
# if args.model_name_or_path == 'gpt2-medium':
# num_added_tokens = tokenizer.add_special_tokens(
# {'pad_token': '[PAD]', 'bos_token': '[BOS]', 'eos_token': '[EOS]'})
# embedding_layer = model.resize_token_embeddings(len(tokenizer))
# if not args.model_name_or_path == 'gpt2-medium':
# # num_added_tokens = tokenizer.add_special_tokens(
# # {'pad_token': '[PAD]', 'bos_token': '[BOS]', 'eos_token': '[EOS]'})
# embedding_layer = model.resize_token_embeddings(len(tokenizer)-3)
#
#
#
# model1_param = list(model.parameters())
# model2 = model_class.from_pretrained('gpt2-medium')
# model2_param = list(model2.parameters())
# print(len(model1_param), len(model2_param))
# for i, j in zip(model1_param, model2_param):
# print(torch.abs(i.mean()-j.mean()), end=' ')
if args.fp16:
model.half()
args.length = adjust_length_to_model(args.length, max_sequence_length=model.config.max_position_embeddings)
logger.info(args)
if args.task_mode == 'topic':
# ["world","sports","business","science"]
# prompt_text_lst = [
# "Topic music [BOS]",
# "Topic sci-fi [BOS]",
# "Topic humor [BOS]",
# "Topic happy [BOS]",
# "Topic novel [BOS]",
# "Topic stories [BOS]",
# "Topic fiction [BOS]",
# "Topic news [BOS]",
# "Topic arts [BOS]",
# "Topic technology [BOS]",
# "Topic science [BOS]",
# "Topic sports [BOS]",
# "Topic world [BOS]",
# "Topic business [BOS]",
#
# ]
# prompt_text_lst = [
# "Topic technology [BOS] The president",
# "Topic science [BOS] The president",
# "Topic sports [BOS] The president",
# "Topic world [BOS] The president",
# "Topic business [BOS] The president",
# ]
# prompt_text_lst = [
# "Topic technology: The president",
# "Topic science: The president",
# "Topic sports: The president",
# "Topic world: The president",
# "Topic business: The president",
# ]
prompt_text_lst = [
"Topic science{}The president".format(tokenizer.bos_token),
"Topic sports{}The president".format(tokenizer.bos_token),
"Topic world{}The president".format(tokenizer.bos_token),
"Topic business{}The president".format(tokenizer.bos_token),
"Topic technology{}".format(tokenizer.bos_token),
"Topic science{}".format(tokenizer.bos_token),
"Topic sports{}".format(tokenizer.bos_token),
"Topic world{}".format(tokenizer.bos_token),
"Topic business{}".format(tokenizer.bos_token),
"Topic science{}Once upon a time,".format(tokenizer.bos_token),
"Topic sports{}Once upon a time,".format(tokenizer.bos_token),
"Topic world{}Once upon a time,".format(tokenizer.bos_token),
"Topic business{}Once upon a time,".format(tokenizer.bos_token),
"Topic technology{}Last week".format(tokenizer.bos_token),
"Topic science{}Last week".format(tokenizer.bos_token),
"Topic sports{}Last week".format(tokenizer.bos_token),
"Topic world{}Last week".format(tokenizer.bos_token),
"Topic business{}Last week".format(tokenizer.bos_token),
]
decode_mode = 'nucleus'
QUICK_CHECK = True
elif args.task_mode == 'length':
# prompt_text_lst = [
# "length 10 [BOS]",
# "length 20 [BOS]",
# "length 30 [BOS]",
# "length 40 [BOS]",
# ]
prompt_text_lst = [
"length 0 [BOS]",
"length 1 [BOS]",
"length 2 [BOS]",
"length 3 [BOS]",
"length 4 [BOS]",
"length 0 [BOS] Once upon a time, ",
"length 1 [BOS] Once upon a time, ",
"length 2 [BOS] Once upon a time, ",
"length 3 [BOS] Once upon a time, ",
"length 4 [BOS] Once upon a time, ",
]
elif args.task_mode == 'keyword':
prompt_text_lst = [
"Keyword bank [BOS]",
"Keyword nice [BOS]",
"keyword simulating [BOS]",
"Keyword necessity [BOS]",
"Keyword positive [BOS]",
"Keyword science [BOS]",
"Keyword bank [BOS] Once upon a time, ",
"Keyword nice [BOS] Once upon a time, ",
"keyword simulating [BOS] Once upon a time, ",
"Keyword necessity [BOS] Once upon a time, ",
"Keyword positive [BOS] Once upon a time, ",
"Keyword science [BOS] Once upon a time, ",
]
elif args.task_mode == 'data2text':
QUICK_CHECK = False
if QUICK_CHECK:
prompt_text_lst = [
"name : <NAME> | Type : coffee shop | area : city centre {}".format(tokenizer.bos_token),
"name : <NAME> | Type : coffee shop | customer rating : 5 out of 5 {}".format(tokenizer.bos_token),
"name : <NAME> | Type : pub | food : Chinese | area : city centre | family friendly : no {}".format(tokenizer.bos_token),
"name : <NAME> | Type : restaurant | food : Chinese | area : city centre | family friendly : yes | near : Rainbow Vegetarian Café {}".format(tokenizer.bos_token),
"name : Giraffe | Type : restaurant | food : Fast food | area : riverside | family friendly : no | near : Rainbow Vegetarian Café {}".format(tokenizer.bos_token),
"name : <NAME> | Type : coffee shop | customer rating : 1 out of 5 | family friendly : yes | near : Avalon {}".format(tokenizer.bos_token),
"name : <NAME> | Type : restaurant | food : Chinese | price : high | customer rating : 1 out of 5 | area : city centre | family friendly : no {}".format(tokenizer.bos_token),
"name : <NAME> | Type : restaurant | food : English | price : moderate | area : riverside | family friendly : yes | near : Raja Indian Cuisine {}".format(tokenizer.bos_token),
]
decode_mode = 'beam'
else:
# TODO.LISA
# test_path = '/u/scr/xlisali/e2e_data/contain_near_Type_src1_test.txt'
test_path = '/u/scr/xlisali/e2e_data/contain_near_Type_no_area_src1_test.txt'
# test_path = '/u/scr/xlisali/e2e_data/src1_test.txt'
prompt_text_dict = read_e2e_files(test_path, tokenizer)
if args.prefixModel_name_or_path is not None:
temp = os.path.basename(args.prefixModel_name_or_path)
else:
temp = os.path.basename(args.model_name_or_path)
# print(prompt_text_dict)
prompt_text_lst = list(prompt_text_dict.keys())
split_file = args.split_file
decode_mode = 'beam'
curr_dir = '/u/scr/xlisali/contrast_LM/transformers/examples/' \
'text-generation/e2e_results_new' \
'/{}_{}_{}_{}_{}'.format(temp, args.alpha1, args.alpha2, split_file, decode_mode)
print(curr_dir)
gold_dir = '/u/scr/xlisali/contrast_LM/transformers/examples/text-generation/e2e_results_new/{}_{}_{}'.format(temp,
split_file,
'gold')
print(gold_dir)
write_e2e_corr(prompt_text_lst, prompt_text_dict, gold_dir)
src_dir = '/u/scr/xlisali/contrast_LM/transformers/examples/text-generation/e2e_results_new/{}_{}_{}'.format(temp,
split_file,
'src')
write_e2e_src(prompt_text_lst, src_dir)
out_handle = open(curr_dir, 'w')
elif args.task_mode == 'webnlg' or args.task_mode == 'triples':
QUICK_CHECK = False
if args.task_mode == 'webnlg':
test_path = "/u/scr/xlisali/WebNLG/webnlg-dataset/release_v2/json/webnlg_release_v2_test.json"
prompt_text_dict = read_webnlg_files(test_path, tokenizer)
elif args.task_mode == 'triples':
test_path = "/u/scr/xlisali/DART/dart/data/v1.1.1/dart-v1.1.1-full-test.json"
prompt_text_dict = read_triples_files(test_path, tokenizer)
if QUICK_CHECK:
prompt_text_pair = list(prompt_text_dict.keys())[:20]
prompt_text_lst, prompt_rela_lst = zip(*prompt_text_pair)
decode_mode = 'beam'
else:
prompt_text_pair = list(prompt_text_dict.keys())
prompt_text_lst, prompt_rela_lst = zip(*prompt_text_pair)
if args.prefixModel_name_or_path is not None:
temp = os.path.basename(args.prefixModel_name_or_path)
else:
temp = os.path.basename(args.model_name_or_path)
# print(prompt_text_dict)
split_file = 'test'
decode_mode = 'beam'
curr_dir = '/u/scr/xlisali/contrast_LM/transformers/examples/text-generation/e2e_results_new/{}_{}_{}'.format(
temp, split_file, decode_mode)
print(curr_dir)
gold_dir = '/u/scr/xlisali/contrast_LM/transformers/examples/text-generation/e2e_results_new/{}_{}_{}'.format(
temp,
split_file,
'gold')
print(gold_dir)
write_e2e_corr(prompt_text_pair, prompt_text_dict, gold_dir)
src_dir = '/u/scr/xlisali/contrast_LM/transformers/examples/text-generation/e2e_results_new/{}_{}_{}'.format(
temp,
split_file,
'src')
write_e2e_src(prompt_text_pair, src_dir)
out_handle = open(curr_dir, 'w')
if args.control_mode == 'yes':
print('processing control codes')
control_words = [[' '+ii.split()[1].split(tokenizer.bos_token)[0] ] for ii in prompt_text_lst]
print(control_words)
control_codes = tokenizer(control_words, add_special_tokens=True, truncation=True,
is_split_into_words=True)["input_ids"]
# prompt_text = args.prompt if args.prompt else input("Model prompt >>> ")
for prompt_idx, prompt_text in enumerate(prompt_text_lst):
# Different models need different input formatting and/or extra arguments
requires_preprocessing = args.model_type in PREPROCESSING_FUNCTIONS.keys()
if requires_preprocessing:
prepare_input = PREPROCESSING_FUNCTIONS.get(args.model_type)
preprocessed_prompt_text = prepare_input(args, model, tokenizer, prompt_text)
if model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
tokenizer_kwargs = {"add_space_before_punct_symbol": True}
else:
tokenizer_kwargs = {}
encoded_prompt = tokenizer.encode(
preprocessed_prompt_text, add_special_tokens=False, return_tensors="pt", **tokenizer_kwargs
)
else:
prefix = args.prefix if args.prefix else args.padding_text
encoded_prompt = tokenizer.encode(prefix + prompt_text, add_special_tokens=False, return_tensors="pt")
encoded_prompt = encoded_prompt.to(args.device)
if encoded_prompt.size()[-1] == 0:
input_ids = None
else:
input_ids = encoded_prompt
if args.control_mode == 'yes' and args.control_dataless != 'yes':
control_code = torch.LongTensor(control_codes[prompt_idx]).to(model.device).unsqueeze(0).expand(args.num_return_sequences, -1)
# print(control_code)
# print(control_code.shape)
# print(input_ids)
# DEBUG
# control_code = None
else:
control_code = None
# for param in model.base_model.parameters():
# print(param.requires_grad)
# if args.control_dataless == 'yes':
if args.tuning_mode == 'prefixtune':
if args.task_mode == 'embMatch':
control_code = emb_match[prompt_idx].to(model.device)
elif args.task_mode == 'keyword':
control_code = torch.LongTensor(control_codes[prompt_idx]).to(model.device).unsqueeze(0)
print(control_code)
elif args.task_mode == 'topic':
control_code = torch.LongTensor(control_codes[prompt_idx]).to(model.device).unsqueeze(0)
print(control_code)
elif args.task_mode == 'data2text':
src = prompt_text_lst[prompt_idx].split()[:-1]
# print(src)
src = ' '.join(src)
catl = src.split('|')
cat = [cc.split(':')[0].strip() for cc in catl]
# print(cat)
src_cat = tokenizer(cat, add_special_tokens=True, truncation=True, is_split_into_words=True)['input_ids']
src = tokenizer(src, add_special_tokens=True, truncation=True, is_split_into_words=False)['input_ids']
mode = None
if 'cat2' in args.prefixModel_name_or_path or 'cat' in args.prefixModel_name_or_path:
mode = 'cat'
elif 'nopeek' in args.prefixModel_name_or_path or 'nop' in args.prefixModel_name_or_path:
mode = 'nopeek'
elif 'peek' in args.prefixModel_name_or_path or 'pee' in args.prefixModel_name_or_path:
mode = 'peek'
elif 'prefixtune15' in args.prefixModel_name_or_path:
mode = 'instruction_based'
# assert False, "prefixtune20 shouldn't be processed here."
else:
if args.format_mode == 'infix':
mode = 'infix'
else:
assert False, "Given that it's in prefix tuning mode, need to specify a valid prefix mode, " \
"(cat, nopeek, peek)"
print(mode)
if mode == 'cat':
cc = src_cat
elif mode == 'peek' or mode == 'nopeek':
cc = src
elif mode == 'infix':
cc = src
# print('control code is ', cc)
if mode == 'nopeek' or mode == 'infix':
input_pp = tokenizer.bos_token
encoded_prompt = tokenizer(input_pp, add_special_tokens=True, truncation=True, return_tensors="pt", is_split_into_words=False)['input_ids'].to(model.device)
input_ids = encoded_prompt
if mode in ['cat', 'peek', 'nopeek', 'infix']:
control_code = torch.LongTensor(cc).to(model.device).unsqueeze(0)
elif mode == 'instruction_based':
control_code = None
else:
assert False, "invalid mode type."
# TODO.LISA
if config.optim_prefix:
control_code = None
elif args.task_mode == 'webnlg' or args.task_mode == 'triples':
src = prompt_text_lst[prompt_idx].split()[:-1]
print(src)
cat = prompt_rela_lst[prompt_idx]
print(cat)
src_cat = tokenizer(cat, add_special_tokens=True, truncation=True, is_split_into_words=True)['input_ids']
src = tokenizer(src, add_special_tokens=True, truncation=True, is_split_into_words=False)['input_ids']
mode = None
if 'cat2' in args.prefixModel_name_or_path or 'cat' in args.prefixModel_name_or_path:
mode = 'cat'
elif 'nopeek' in args.prefixModel_name_or_path or 'nop' in args.prefixModel_name_or_path:
mode = 'nopeek'
elif 'peek' in args.prefixModel_name_or_path or 'pee' in args.prefixModel_name_or_path:
mode = 'peek'
elif 'tune_y_' in args.prefixModel_name_or_path or config.optim_prefix:
mode = 'instruction_based'
# assert False, "prefixtune20 shouldn't be processed here."
else:
if args.format_mode == 'infix':
mode = 'infix'
else:
assert False, "Given that it's in prefix tuning mode, need to specify a valid prefix mode, " \
"(cat, nopeek, peek)"
print(mode)
if mode == 'cat':
cc = src_cat
elif mode == 'peek' or mode == 'nopeek':
cc = src
elif mode == 'infix':
cc = src
# print('control code is ', cc)
if mode == 'nopeek' or mode == 'infix':
input_pp = tokenizer.bos_token
encoded_prompt = tokenizer(input_pp, add_special_tokens=True, truncation=True, return_tensors="pt", is_split_into_words=False)['input_ids'].to(model.device)
input_ids = encoded_prompt
if mode in ['cat', 'peek', 'nopeek', 'infix']:
control_code = torch.LongTensor(cc).to(model.device).unsqueeze(0)
elif mode == 'instruction_based':
control_code = None
else:
assert False, "invalid mode type."
# TODO.LISA
if config.optim_prefix:
control_code = None
else:
control_code = None
print('control code is None')
if args.format_mode != 'infix':
print(config.optim_prefix, optim_prefix_bool)
print(control_code)
prompt1 = model.get_prompt(control_code, gpt2=gpt2, bsz=1)
prompt2 = model2.get_prompt(control_code, gpt2=gpt2, bsz=1)
else:
print(control_code)
prompt1 = model.get_prompt(control_code, None, gpt2=gpt2, bsz=1)
prompt2 = model2.get_prompt(control_code, None, gpt2=gpt2, bsz=1)
assert len(prompt1) == len(prompt2)
if False : # generally bad.
prompt1 = [torch.stack([x[0] * args.alpha1, x[1]], dim=0) for x in prompt1] #past_key, past_value
prompt2 = [torch.stack([x[0] * args.alpha2, x[1]], dim=0) for x in prompt2]
prompt = [torch.cat([x1, x2], dim=3) for (x1, x2) in zip(prompt1, prompt2)]
elif split_file == 'compose2':
prompt = [torch.cat([args.alpha1 * x1, args.alpha2 * x2], dim=3) for (x1, x2) in zip(prompt1, prompt2)]
elif split_file == 'compose0':
prompt = prompt1
elif split_file == 'compose1':
prompt = prompt2
assert len(prompt) == len(prompt1)
print(prompt[0].shape, prompt1[0].shape, prompt2[0].shape)
# prompt1 = [x.expand(-1, args.num_return_sequences , -1, -1, -1) for x in prompt1]
# prompt2 = [x.expand(-1, args.num_return_sequences, -1, -1, -1) for x in prompt2]
prompt = [x.expand(-1, args.num_return_sequences, -1, -1, -1) for x in prompt]
# assert emb_match_temp is None
# assert control_code is None
print(input_ids, decode_mode)
if decode_mode == 'nucleus':
output_sequences = gpt2.generate(
input_ids=input_ids,
emb_match=None,
control_code=None,
past_key_values=prompt,
max_length=args.length + len(encoded_prompt[0]),
temperature=args.temperature,
top_k=args.k,
top_p=0.5,
repetition_penalty=args.repetition_penalty,
do_sample=True,
# num_beams=5,
num_return_sequences=5,
)
elif decode_mode == 'beam':
output_sequences = gpt2.generate(
input_ids=input_ids,
emb_match=None,
control_code=None,
past_key_values=prompt,
max_length=args.length + len(encoded_prompt[0]),
min_length=5,
temperature=args.temperature,
top_k=args.k,
top_p=0.5,
repetition_penalty=args.repetition_penalty,
do_sample=False,
num_beams=5,
bad_words_ids=[[628], [198]] if True else None,
num_return_sequences=1,
)
print(output_sequences)
elif args.tuning_mode == 'finetune':
print(decode_mode)
if decode_mode == 'nucleus':
output_sequences = model.generate(
input_ids=input_ids,
emb_match=emb_match_temp,
control_code=control_code,
max_length=args.length + len(encoded_prompt[0]),
temperature=args.temperature,
top_k=args.k,
top_p=0.5,
repetition_penalty=args.repetition_penalty,
do_sample=True,
num_beams=5,
num_return_sequences=1,
)
elif decode_mode == 'beam':
output_sequences = model.generate(
input_ids=input_ids,
emb_match=emb_match_temp,
control_code=control_code,
max_length=args.length + len(encoded_prompt[0]),
temperature=args.temperature,
top_k=args.k,
top_p=0.5,
repetition_penalty=args.repetition_penalty,
do_sample=False,
num_beams=5,
num_return_sequences=1,
)
# output_sequences = model.generate(
# input_ids=input_ids,
# emb_match=emb_match_temp,
# control_code=control_code,
# max_length=args.length + len(encoded_prompt[0]),
# temperature=args.temperature,
# top_k=args.k,
# top_p=args.p,
# repetition_penalty=args.repetition_penalty,
# do_sample=True,
# num_return_sequences=args.num_return_sequences,
# )
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
generated_sequences = []
if QUICK_CHECK:
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
print("=== GENERATED SEQUENCE {} ===".format(generated_sequence_idx + 1))
# args.stop_token = tokenizer.eos_token
generated_sequence = generated_sequence.tolist()
# Decode text
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
# Remove all text after the stop token
text = text[: text.find(args.stop_token) if args.stop_token else None]
# Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
total_sequence = (
prompt_text + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :]
)
generated_sequences.append(total_sequence)
print(total_sequence)
else:
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
print("=== GENERATED SEQUENCE {} ===".format(generated_sequence_idx + 1))
# args.stop_token = tokenizer.eos_token
generated_sequence = generated_sequence.tolist()
# Decode text
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
print(text)
text_output = text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)):]
idx = text_output.find(tokenizer.eos_token)
if idx >= 0:
text_output = text_output[:idx]
print(text_output.strip(), file=out_handle)
# # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
# total_sequence = (
# prompt_text + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)):]
# )
#
# generated_sequences.append(total_sequence)
# print(total_sequence)
print()
# return generated_sequences
if __name__ == "__main__":
main()
```
#### File: transformers/webnlg/multi_prefix_tuning2.py
```python
import torch
from transformers import PretrainedBartModel
from torch import nn
from partial_embed import PartiallyFixedEmbedding
class PrefixTuning(PretrainedBartModel):
"""Classification Head for transformer encoders"""
def __init__(self, config, preseqlen=5):
super().__init__(config)
print('under the PrefixTuning model')
self.match_n_layer = config.num_layers # 6
self.match_n_head = config.num_attention_heads # 12
self.n_embd = config.d_model # 768 512
self.match_n_embd = self.n_embd // self.match_n_head # 64
if hasattr(config, 'new_token_len'):
self.new_token_len = config.new_token_len
else:
self.new_token_len = 3
self.es = PartiallyFixedEmbedding(torch.rand(1,1024),self.new_token_len)
if hasattr(config, 'preseqlen'):
self.preseqlen = config.preseqlen
elif self.optim_prefix:
self.preseqlen = preseqlen
if hasattr(config, '_my_arg_task_mode'):
self.task_mode = config._my_arg_task_mode # GEC
else:
self.task_mode = 'underspecified'
assert False, 'the task is underspecified'
self.format_mode = 'cat'
if hasattr(config, 'prefix_dropout'):
self.prefix_dropout = config.prefix_dropout
self.dropout = nn.Dropout(self.prefix_dropout)
else:
self.prefix_dropout = 0.0
if hasattr(config, 'mid_dim'):
self.mid_dim = config.mid_dim
else:
self.mid_dim = 800
self.use_encoder_prefix = True
self.use_cross_prefix = True
if hasattr(config, 'm_prefix_len'):
self.m_prefix_len = config.m_prefix_len
print('M_Prefix_LEN')
else:
self.m_prefix_len = 0
if self.m_prefix_len > 0:
self.get_prompt = self.get_prompt_multiple_prefix
self.categories = ['cats']
self.new_token_len = 6
self.input_tokens = torch.arange(self.preseqlen+(self.m_prefix_len * self.new_token_len)).long()
self.wte = nn.Embedding(self.preseqlen+(self.m_prefix_len * self.new_token_len), self.n_embd)
self.control_trans = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim), # 1024 x 800
nn.Tanh(), #800 x 12 * 2 * 1024
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd))
if self.use_encoder_prefix:
self.wte_enc = nn.Embedding(self.preseqlen+(self.m_prefix_len * self.new_token_len), self.n_embd)
self.control_trans_enc = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd))
if self.use_cross_prefix:
self.wte2 = nn.Embedding(self.preseqlen+(self.m_prefix_len * self.new_token_len), self.n_embd)
self.control_trans2 = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd))
total_param = 0
for name, param in self.named_parameters():
#print(param.shape)
total_param += param.numel()
print('Base Total Param is {}'.format(total_param))
self.categories = ['cats']
self.new_token_len = [6]
total_param = 0
for name, param in self.named_parameters():
print(name,param.shape)
total_param += param.numel()
print('total param is {}'.format(total_param))
def get_encoder_output(self, gpt2, temp_input):
return gpt2.model.encoder.forward_with_encoder_past(temp_input).past_key_values
def get_prompt_multiple_prefix(self, conditional_info,bsz=None, sample_size=1):
input_tokens = self.input_tokens.unsqueeze(0).expand(bsz, -1).to(
self.device) # 8 x 200 rows of tensor([[ 0, 1, 2, ..., 197, 198, 199],
temp_control = self.wte(
input_tokens) # self.wte = Embedding(200, 768) , # temp_control is 8, 200, 1024. so 8 repeats of the embedding matrix
past_key_values = self.control_trans(
temp_control) # 8 x preseqlen x 24576
temp_control = self.wte2(input_tokens)
past_key_values2 = self.control_trans2(temp_control)
temp_control = self.wte_enc(input_tokens)
past_key_values_enc = self.control_trans_enc(temp_control)
for category_idx, category in enumerate(self.categories):
idxmap = {i: ((i) * self.m_prefix_len, ((i + 1) * self.m_prefix_len)) for i in
range(self.new_token_len[category_idx])}
cond = list(map(idxmap.get, conditional_info[category].tolist()))
past_key_values_multiple = torch.stack([past_key_values[i, self.preseqlen+j[0]:self.preseqlen+j[1], :] for i,j in enumerate(cond)])
past_key_values = torch.cat([past_key_values_multiple, past_key_values[:,:self.preseqlen,:]], dim = 1)
past_key_values_multiple = torch.stack([past_key_values2[i, self.preseqlen+j[0]:self.preseqlen+j[1], :] for i,j in enumerate(cond)])
past_key_values2 = torch.cat([past_key_values_multiple, past_key_values2[:,:self.preseqlen,:]], dim = 1)
past_key_values_multiple = torch.stack([past_key_values_enc[i, self.preseqlen + j[0]:self.preseqlen + j[1], :] for i, j in enumerate(cond)])
past_key_values_enc = torch.cat([past_key_values_multiple, past_key_values_enc[:, :self.preseqlen, :]], dim = 1)
if sample_size > 1:
past_key_values = torch.cat(sample_size*[past_key_values])
bsz, seqlen, _ = past_key_values.shape
past_key_values = past_key_values.view(bsz, seqlen, self.match_n_layer * 2, self.match_n_head,
self.match_n_embd) # 16, 200, 12, 12, 64 (12*64 = 768, for bart base)
past_key_values = self.dropout(past_key_values) # no dropout
past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(
2)
if sample_size > 1:
past_key_values2 = torch.cat(sample_size * [past_key_values2])
past_key_values2 = past_key_values2.view(bsz, seqlen, self.match_n_layer * 2, self.match_n_head,
self.match_n_embd)
past_key_values2 = self.dropout(past_key_values2)
past_key_values2 = past_key_values2.permute([2, 0, 3, 1, 4]).split(2)
bsz_enc, seqlen, _ = past_key_values_enc.shape
past_key_values_enc = past_key_values_enc.view(bsz_enc, seqlen, self.match_n_layer * 2, self.match_n_head,
self.match_n_embd)
past_key_values_enc = self.dropout(past_key_values_enc)
past_key_values_enc = past_key_values_enc.permute([2, 0, 3, 1, 4]).split(2)
result = []
for i, key_val in enumerate(past_key_values):
temp_dict = {'self': {"prev_key": key_val[0].contiguous(),
"prev_value": key_val[1].contiguous(),
}}
if self.use_cross_prefix:
key_val2 = past_key_values2[i]
temp_dict['encoder_decoder'] = {"prev_key": key_val2[0].contiguous(),
"prev_value": key_val2[1].contiguous(),
}
if self.use_encoder_prefix:
key_val_enc = past_key_values_enc[i]
temp_dict['encoder'] = {"prev_key": key_val_enc[0].contiguous(),
"prev_value": key_val_enc[1].contiguous(),
}
result.append(temp_dict)
return result
def forward(self,
input_ids=None,
frozen_model=None,
past_key_values=None,
conditional_info = None,
**kwargs,
):
#{"input_ids": batch, "labels": labels, 'src_attn': src_attn, 'tgt_attn':tgt_attn, 'src':src}
bsz = input_ids.shape[0]
past_key_values_prompt = self.get_prompt_multiple_prefix(conditional_info,bsz=bsz)
if past_key_values is not None:
assert False, "Past key values"
else:
past_key_values = past_key_values_prompt
if frozen_model is None:
assert False, "Didn't specify frozen model"
output = frozen_model(input_ids=input_ids,
past_key_values=past_key_values, **kwargs)
return output
if __name__ == '__main__':
from utils2 import pickle_load, pickle_save
from finetune_2 import PrefixSummarizationModule
from transformers.modeling_bart import shift_tokens_right
args = pickle_load('/Users/jordi/Desktop/Master/prefix_tuning/transformers/GEC/args_m_prefix_T5_2.pkl')
args.m_prefix_len = 2
batch = pickle_load('/Users/jordi/Desktop/Master/prefix_tuning/transformers/GEC/b1_m_prefix_T5_2.pkl')
model = PrefixSummarizationModule(args)
pad_token_id = model.tokenizer.pad_token_id
src_ids, src_mask = batch["input_ids"], batch["attention_mask"]
tgt_ids = batch["labels"]
decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id)
out = model(src_ids, attention_mask = src_mask, decoder_input_ids = decoder_input_ids, use_cache = False,
use_prefix = True,conditional_info={'cats':batch['cats']})
``` |
{
"source": "jordiclive/GEM-metrics",
"score": 3
} |
#### File: GEM-metrics/gem_metrics/ngrams.py
```python
from typing import Dict, List, Tuple
from .metric import ReferencelessMetric
from .texts import Predictions
import numpy as np
from nltk import ngrams
class NGramStats(ReferencelessMetric):
"""Ngram basic statistics and entropy, working with tokenized & lowercased data (+ variant excluding punctuation):
- data length (total number of words)
- mean instance length (number of words)
- distinct-N (ratio of distinct N-grams / total number of N-grams)
- vocab_size-N (total number of distinct N-grams)
- unique-N (number of N-grams that only occur once in the whole data)
- entropy-N (Shannon entropy over N-grams)
- cond-entropy-N (language model style conditional entropy -- N-grams conditioned on N-1-grams)
All these are computed for 1,2,3-grams (conditional entropy only for 2,3).
Based on:
https://github.com/evanmiltenburg/NLG-diversity/blob/main/diversity.py
https://github.com/tuetschek/e2e-stats/blob/master/nlg_dataset_stats.py
"""
def support_caching(self):
# NGramStats is corpus-level, so individual examples can't be aggregated.
return False
def compute(self, cache, predictions: Predictions) -> Dict:
results = {}
for data_id, data in [
("", predictions.list_tokenized_lower),
("-nopunct", predictions.list_tokenized_lower_nopunct),
]:
lengths = [len(inst) for inst in data]
results[f"total_length{data_id}"] = sum(lengths)
results[f"mean_pred_length{data_id}"] = np.mean(lengths)
results[f"std_pred_length{data_id}"] = np.std(lengths)
results[f"median_pred_length{data_id}"] = np.median(lengths)
results[f"min_pred_length{data_id}"] = min(lengths)
results[f"max_pred_length{data_id}"] = max(lengths)
last_ngram_freqs = (
None # for conditional entropy, we need lower-level n-grams
)
for N in [1, 2, 3]:
ngram_freqs, uniq_ngrams, ngram_len = self._ngram_stats(data, N)
results[f"distinct-{N}{data_id}"] = (
len(ngram_freqs) / ngram_len if ngram_len > 0 else 0
)
results[f"vocab_size-{N}{data_id}"] = len(ngram_freqs)
results[f"unique-{N}{data_id}"] = uniq_ngrams
results[f"entropy-{N}{data_id}"] = self._entropy(ngram_freqs)
if last_ngram_freqs:
results[f"cond_entropy-{N}{data_id}"] = self._cond_entropy(
ngram_freqs, last_ngram_freqs
)
last_ngram_freqs = ngram_freqs
return results
def _ngram_stats(self, data: List[List[str]], N: int) -> Tuple[Dict, int, int]:
"""Return basic ngram statistics, as well as a dict of all ngrams and their freqsuencies."""
ngram_freqs = {} # ngrams with frequencies
ngram_len = 0 # total number of ngrams
for inst in data:
for ngram in ngrams(inst, N):
ngram_freqs[ngram] = ngram_freqs.get(ngram, 0) + 1
ngram_len += 1
# number of unique ngrams
uniq_ngrams = len([val for val in ngram_freqs.values() if val == 1])
return ngram_freqs, uniq_ngrams, ngram_len
def _entropy(self, ngram_freqs: Dict) -> float:
"""Shannon entropy over ngram frequencies"""
total_freq = sum(ngram_freqs.values())
return -sum(
[
freq / total_freq * np.log2(freq / total_freq)
for freq in ngram_freqs.values()
]
)
def _cond_entropy(self, joint: Dict, ctx: Dict) -> float:
"""Conditional/next-word entropy (language model style), using ngrams (joint) and n-1-grams (ctx)."""
total_joint = sum(joint.values())
total_ctx = sum(ctx.values())
# H(y|x) = - sum_{x,y} p(x,y) log_2 p(y|x)
# p(y|x) = p(x,y) / p(x)
return -sum(
[
freq
/ total_joint
* np.log2((freq / total_joint) / (ctx[ngram[:-1]] / total_ctx))
for ngram, freq in joint.items()
]
)
```
#### File: GEM-metrics/tests/test_ter.py
```python
import unittest
import gem_metrics.ter
from tests.test_referenced import TestReferencedMetric
class TestTer(TestReferencedMetric, unittest.TestCase):
def setUp(self):
super().setUp()
self.metric = gem_metrics.ter.TER()
self.true_results_basic = {"ter": 47.5}
self.true_results_identical_pred_ref = {"ter": 0.0}
self.true_results_mismatched_pred_ref = {"ter": 100.0}
self.true_results_empty_pred = {"ter": 100.0}
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jordidh/joc-escriu-nom",
"score": 3
} |
#### File: jordidh/joc-escriu-nom/escriu.py
```python
import sys
import os
import subprocess
import pyttsx
from gtts import gTTS
import readchar
from PIL import Image
ordinals = ["primera", "segona", "tercera", "quarta", "cinquena", "sisena", "setena", "vuitena", "novena", "desena"]
def digues(frase):
tts = gTTS(text=frase, lang='ca')
tts.save("frase.mp3")
os.system("vlc --play-and-exit --quiet frase.mp3")
return
def ordinal(numero):
if numero < 10:
return ordinals[numero]
else:
return "següent"
print("JDH 27/08/2016 Bogotà")
print("Programa per ensenyar al Martí i a la Laia a escriure el seu nom (o un altre) en un teclat d'ordinador")
print("Command Line Arguments:" + str(len(sys.argv)))
print("Command Line Arguments List:" + str(sys.argv))
if len(sys.argv) != 2:
print("Per funcionar s'ha de cridar el programa amb un nom. Per exemple: $ python joc-escriu-nom.py MARTI")
sys.exit()
nom = sys.argv[1].upper()
if len(nom) < 2:
print("El nom ha de ser de 2 lletres o mes")
sys.exit()
index = 0
nomEscrit = "";
#imgOK = Image.open("bb8.png")
#imgKO = Image.open("darkvader.jpg")
digues("Escriu " + nom)
while index < len(nom):
print("Has d'escriure \"" + nom + "\" i has escrit \"" + nomEscrit + "\". Escriu una lletra:")
print(nom + " -> " + nomEscrit)
digues("Busca la lletra " + nom[index])
keyPressed = readchar.readchar().upper()
if keyPressed == "EXIT":
sys.exit()
if len(keyPressed) > 1:
#imgKO.show()
p = subprocess.Popen(["display", "darkvader.jpg"])
digues("Només has d'escriure una lletra. Torna-ho a provar")
p.kill()
#imgKO.close()
else:
if nom[index] != keyPressed:
#imgKO.show()
p = subprocess.Popen(["display", "darkvader.jpg"])
digues("NO. Has escrit la " + keyPressed + ". Torna-ho a provar")
p.kill()
#imgKO.close()
else:
if index < (len(nom) - 1):
nomEscrit = nomEscrit + keyPressed
index = index + 1
#digues("Perfecte. Ara escriu la " + ordinal(index) + " lletra")
else:
#imgOK.show()
p = subprocess.Popen(["display", "bb8.png"])
digues("Ja has acabat. Ho has fet molt bé")
p.kill()
#imgOK.close()
index = index + 1
print("Fi del joc. Torna-ho a provar amb un altre nom")
#digues("Fi del jòc. Si vols, torna-ho a provar amb un altre nòm")
``` |
{
"source": "jordifierro/abidria-api",
"score": 3
} |
#### File: abidria-api/experiences/entities.py
```python
class Experience:
def __init__(self, title, description, author_id,
author_username=None, id=None, picture=None, is_mine=False, is_saved=False):
self._id = id
self._title = title
self._description = description
self._picture = picture
self._author_id = author_id
self._author_username = author_username
self._is_mine = is_mine
self._is_saved = is_saved
@property
def id(self):
return self._id
@property
def title(self):
return self._title
@property
def description(self):
return self._description
@property
def picture(self):
return self._picture
@property
def author_id(self):
return self._author_id
@property
def author_username(self):
return self._author_username
@property
def is_mine(self):
return self._is_mine
@property
def is_saved(self):
return self._is_saved
def builder(self):
return Experience.Builder(self)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class Builder:
def __init__(self, experience):
self._id = experience.id
self._title = experience.title
self._description = experience.description
self._picture = experience.picture
self._author_id = experience.author_id
self._author_username = experience.author_username
self._is_mine = experience.is_mine
self._is_saved = experience.is_saved
def id(self, id):
self._id = id
return self
def title(self, title):
self._title = title
return self
def description(self, description):
self._description = description
return self
def picture(self, picture):
self._picture = picture
return self
def author_id(self, author_id):
self._author_id = author_id
return self
def author_username(self, author_username):
self._author_username = author_username
return self
def is_mine(self, is_mine):
self._is_mine = is_mine
return self
def is_saved(self, is_saved):
self._is_saved = is_saved
return self
def build(self):
return Experience(id=self._id, title=self._title, description=self._description,
picture=self._picture, author_id=self._author_id,
author_username=self._author_username, is_mine=self._is_mine,
is_saved=self._is_saved)
```
#### File: abidria-api/experiences/models.py
```python
from django.db import models
from stdimage.models import StdImageField
from stdimage.utils import UploadToUUID, pre_delete_delete_callback, pre_save_delete_callback
from people.models import ORMPerson
class ORMExperience(models.Model):
title = models.CharField(max_length=30, blank=False)
description = models.TextField(blank=True)
picture = StdImageField(upload_to=UploadToUUID(path='experiences'),
variations={'large': (1280, 1280),
'medium': (640, 640),
'small': (320, 320)},
blank=True)
author = models.ForeignKey(ORMPerson, null=True, on_delete=models.CASCADE)
class Meta:
verbose_name = 'Experience'
verbose_name_plural = 'Experiences'
def __str__(self):
return self.title
models.signals.post_delete.connect(pre_delete_delete_callback, sender=ORMExperience)
models.signals.pre_save.connect(pre_save_delete_callback, sender=ORMExperience)
class ORMSave(models.Model):
person = models.ForeignKey(ORMPerson, on_delete=models.CASCADE)
experience = models.ForeignKey(ORMExperience, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = 'Save'
verbose_name_plural = 'Saves'
def __str__(self):
return "{} - {}".format(str(self.person), str(self.experience))
```
#### File: abidria-api/people/interactors.py
```python
from abidria.exceptions import EntityDoesNotExistException, ConflictException, NoLoggedException, \
InvalidEntityException
from people.entities import Person
class CreateGuestPersonAndReturnAuthTokenInteractor:
def __init__(self, client_secret_key_validator, person_repo, auth_token_repo):
self.client_secret_key_validator = client_secret_key_validator
self.person_repo = person_repo
self.auth_token_repo = auth_token_repo
def set_params(self, client_secret_key):
self.client_secret_key = client_secret_key
return self
def execute(self):
self.client_secret_key_validator.validate(client_secret_key=self.client_secret_key)
created_guest_person = self.person_repo.create_guest_person()
return self.auth_token_repo.create_auth_token(person_id=created_guest_person.id)
class AuthenticateInteractor:
def __init__(self, auth_token_repo):
self.auth_token_repo = auth_token_repo
def set_params(self, access_token):
self.access_token = access_token
return self
def execute(self):
try:
auth_token = self.auth_token_repo.get_auth_token(access_token=self.access_token)
return auth_token.person_id
except EntityDoesNotExistException:
return None
class RegisterUsernameAndEmailInteractor:
def __init__(self, person_validator, person_repo, confirmation_token_repo, mailer_service):
self.person_validator = person_validator
self.person_repo = person_repo
self.confirmation_token_repo = confirmation_token_repo
self.mailer_service = mailer_service
def set_params(self, logged_person_id, username, email):
self.logged_person_id = logged_person_id
self.username = username
self.email = email
return self
def execute(self):
if self.logged_person_id is None:
raise NoLoggedException()
person = self.person_repo.get_person(id=self.logged_person_id)
if person.is_email_confirmed:
raise ConflictException(source='person', code='already_registered', message='Person already registered')
updated_person = Person(id=person.id, is_registered=True,
username=self.username, email=self.email, is_email_confirmed=False)
self.person_validator.validate(updated_person)
updated_person = self.person_repo.update_person(updated_person)
self.confirmation_token_repo.delete_confirmation_tokens(person_id=updated_person.id)
confirmation_token = self.confirmation_token_repo.create_confirmation_token(person_id=updated_person.id)
self.mailer_service.send_ask_confirmation_mail(confirmation_token=confirmation_token,
username=updated_person.username, email=updated_person.email)
return updated_person
class ConfirmEmailInteractor:
def __init__(self, person_repo, confirmation_token_repo):
self.person_repo = person_repo
self.confirmation_token_repo = confirmation_token_repo
def set_params(self, logged_person_id, confirmation_token):
self.logged_person_id = logged_person_id
self.confirmation_token = confirmation_token
return self
def execute(self):
if self.logged_person_id is None:
raise NoLoggedException()
try:
person_id = self.confirmation_token_repo.get_person_id(confirmation_token=self.confirmation_token)
except EntityDoesNotExistException:
raise InvalidEntityException(source='confirmation_token', code='invalid',
message='Invalid confirmation token')
if person_id != self.logged_person_id:
raise InvalidEntityException(source='confirmation_token', code='invalid',
message='Invalid confirmation token')
self.confirmation_token_repo.delete_confirmation_tokens(person_id=person_id)
person = self.person_repo.get_person(id=self.logged_person_id)
updated_person = Person(id=person.id, is_registered=person.is_registered,
username=person.username, email=person.email,
is_email_confirmed=True)
updated_person = self.person_repo.update_person(updated_person)
return updated_person
``` |
{
"source": "jordifierro/pachatary-api",
"score": 3
} |
#### File: pachatary-api/experiences/entities.py
```python
class Experience:
def __init__(self, title, description,
id=None, author_id=None, author_profile=None,
picture=None, is_mine=False, is_saved=False,
saves_count=0, share_id=None):
self._id = id
self._title = title
self._description = description
self._picture = picture
self._author_id = author_id
self._author_profile = author_profile
self._is_mine = is_mine
self._is_saved = is_saved
self._saves_count = saves_count
self._share_id = share_id
@property
def id(self):
return self._id
@property
def title(self):
return self._title
@property
def description(self):
return self._description
@property
def picture(self):
return self._picture
@property
def author_id(self):
return self._author_id
@property
def author_profile(self):
return self._author_profile
@property
def is_mine(self):
return self._is_mine
@property
def is_saved(self):
return self._is_saved
@property
def saves_count(self):
return self._saves_count
@property
def share_id(self):
return self._share_id
def builder(self):
return Experience.Builder(self)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class Builder:
def __init__(self, experience):
self._id = experience.id
self._title = experience.title
self._description = experience.description
self._picture = experience.picture
self._author_profile = experience.author_profile
self._author_id = experience.author_id
self._is_mine = experience.is_mine
self._is_saved = experience.is_saved
self._saves_count = experience.saves_count
self._share_id = experience.share_id
def id(self, id):
self._id = id
return self
def title(self, title):
self._title = title
return self
def description(self, description):
self._description = description
return self
def picture(self, picture):
self._picture = picture
return self
def author_profile(self, author_profile):
self._author_profile = author_profile
return self
def author_id(self, author_id):
self._author_id = author_id
return self
def is_mine(self, is_mine):
self._is_mine = is_mine
return self
def is_saved(self, is_saved):
self._is_saved = is_saved
return self
def saves_count(self, saves_count):
self._saves_count = saves_count
return self
def share_id(self, share_id):
self._share_id = share_id
return self
def build(self):
return Experience(id=self._id, title=self._title, description=self._description,
picture=self._picture, author_profile=self._author_profile,
author_id=self._author_id, is_mine=self._is_mine, is_saved=self._is_saved,
saves_count=self._saves_count, share_id=self._share_id)
```
#### File: pachatary-api/experiences/interactors.py
```python
import random
from enum import Enum
from pachatary.exceptions import ConflictException, BlockedContentException
from experiences.entities import Experience
class GetExperiencesInteractor:
MAX_PAGINATE_LIMIT = 20
def __init__(self, experience_repo, get_profile_interactor, permissions_validator):
self.experience_repo = experience_repo
self.get_profile_interactor = get_profile_interactor
self.permissions_validator = permissions_validator
def set_params(self, saved, username, logged_person_id, limit, offset):
self.saved = saved
self.username = username
self.logged_person_id = logged_person_id
self.limit = limit
self.offset = offset
return self
def execute(self):
self.permissions_validator.validate_permissions(logged_person_id=self.logged_person_id)
if self.limit > GetExperiencesInteractor.MAX_PAGINATE_LIMIT:
self.limit = GetExperiencesInteractor.MAX_PAGINATE_LIMIT
if self.saved:
result = self.experience_repo.get_saved_experiences(limit=self.limit, offset=self.offset,
logged_person_id=self.logged_person_id)
else:
if self.username == 'self':
target_person_id = self.logged_person_id
else:
target_person_id = self.get_profile_interactor.set_params(
username=self.username, logged_person_id=self.logged_person_id).execute().person_id
result = self.experience_repo.get_person_experiences(limit=self.limit, offset=self.offset,
logged_person_id=self.logged_person_id,
target_person_id=target_person_id)
result.update({"next_limit": self.limit})
return result
class SearchExperiencesInteractor:
MAX_PAGINATION_LIMIT = 20
def __init__(self, experience_repo, block_repo, permissions_validator):
self.experience_repo = experience_repo
self.block_repo = block_repo
self.permissions_validator = permissions_validator
def set_params(self, word, location, logged_person_id, limit, offset):
self.word = word
self.location = location
self.logged_person_id = logged_person_id
self.limit = limit
self.offset = offset
return self
def execute(self):
self.permissions_validator.validate_permissions(logged_person_id=self.logged_person_id)
if self.limit > SearchExperiencesInteractor.MAX_PAGINATION_LIMIT:
self.limit = SearchExperiencesInteractor.MAX_PAGINATION_LIMIT
result = self.experience_repo.search_experiences(self.logged_person_id,
word=self.word, location=self.location,
limit=self.limit, offset=self.offset)
blocked_people = self.block_repo.get_blocked_people(person_id=self.logged_person_id)
if len(blocked_people) > 0:
filtered_experiences = [x for x in result['results'] if x.author_id not in blocked_people]
result.update({'results': filtered_experiences})
result.update({'next_limit': self.limit})
return result
class CreateNewExperienceInteractor:
def __init__(self, experience_repo, experience_validator, permissions_validator):
self.experience_repo = experience_repo
self.experience_validator = experience_validator
self.permissions_validator = permissions_validator
def set_params(self, title, description, logged_person_id):
self.title = title
self.description = description
self.logged_person_id = logged_person_id
return self
def execute(self):
self.permissions_validator.validate_permissions(logged_person_id=self.logged_person_id,
wants_to_create_content=True)
experience = Experience(title=self.title, description=self.description, author_id=self.logged_person_id)
self.experience_validator.validate_experience(experience)
return self.experience_repo.create_experience(experience)
class ModifyExperienceInteractor:
def __init__(self, experience_repo, experience_validator, permissions_validator):
self.experience_repo = experience_repo
self.experience_validator = experience_validator
self.permissions_validator = permissions_validator
def set_params(self, id, title, description, logged_person_id):
self.id = id
self.title = title
self.description = description
self.logged_person_id = logged_person_id
return self
def execute(self):
self.permissions_validator.validate_permissions(logged_person_id=self.logged_person_id,
has_permissions_to_modify_experience=self.id)
experience = self.experience_repo.get_experience(id=self.id, logged_person_id=self.logged_person_id)
new_title = self.title if self.title is not None else experience.title
new_description = self.description if self.description is not None else experience.description
updated_experience = experience.builder().title(new_title).description(new_description).build()
self.experience_validator.validate_experience(updated_experience)
return self.experience_repo.update_experience(updated_experience, logged_person_id=self.logged_person_id)
class UploadExperiencePictureInteractor:
def __init__(self, experience_repo, permissions_validator):
self.experience_repo = experience_repo
self.permissions_validator = permissions_validator
def set_params(self, experience_id, picture, logged_person_id):
self.experience_id = experience_id
self.picture = picture
self.logged_person_id = logged_person_id
return self
def execute(self):
self.permissions_validator.validate_permissions(logged_person_id=self.logged_person_id,
has_permissions_to_modify_experience=self.experience_id)
return self.experience_repo.attach_picture_to_experience(experience_id=self.experience_id, picture=self.picture)
class SaveUnsaveExperienceInteractor:
class Action(Enum):
SAVE = 1
UNSAVE = 2
def __init__(self, experience_repo, permissions_validator, get_experience_interactor):
self.experience_repo = experience_repo
self.permissions_validator = permissions_validator
self.get_experience_interactor = get_experience_interactor
def set_params(self, action, experience_id, logged_person_id):
self.action = action
self.experience_id = experience_id
self.logged_person_id = logged_person_id
return self
def execute(self):
self.permissions_validator.validate_permissions(logged_person_id=self.logged_person_id)
experience = self.get_experience_interactor.set_params(experience_id=self.experience_id,
logged_person_id=self.logged_person_id).execute()
if experience.author_id == self.logged_person_id:
raise ConflictException(source='experience', code='self_save',
message='You cannot save your own experiences')
if self.action is SaveUnsaveExperienceInteractor.Action.SAVE:
self.experience_repo.save_experience(person_id=self.logged_person_id, experience_id=self.experience_id)
elif self.action is SaveUnsaveExperienceInteractor.Action.UNSAVE:
self.experience_repo.unsave_experience(person_id=self.logged_person_id, experience_id=self.experience_id)
return True
class GetOrCreateExperienceShareIdInteractor:
def __init__(self, experience_repo, permissions_validator, id_generator, get_experience_interactor):
self.experience_repo = experience_repo
self.permissions_validator = permissions_validator
self.id_generator = id_generator
self.get_experience_interactor = get_experience_interactor
def set_params(self, experience_id, logged_person_id):
self.experience_id = experience_id
self.logged_person_id = logged_person_id
return self
def execute(self):
self.permissions_validator.validate_permissions(logged_person_id=self.logged_person_id)
experience = self.get_experience_interactor.set_params(experience_id=self.experience_id,
logged_person_id=self.logged_person_id).execute()
if experience.share_id is not None:
return experience.share_id
updated_with_share_id = False
while not updated_with_share_id:
try:
share_id = self.id_generator.generate()
experience = experience.builder().share_id(share_id).build()
experience = self.experience_repo.update_experience(experience)
updated_with_share_id = True
except ConflictException:
pass
return experience.share_id
class IdGenerator:
LENGTH = 8
CHOICES = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
def generate(self):
return ''.join(random.choice(IdGenerator.CHOICES) for _ in range(IdGenerator.LENGTH))
class GetExperienceInteractor:
def __init__(self, experience_repo, block_repo, permissions_validator):
self.experience_repo = experience_repo
self.block_repo = block_repo
self.permissions_validator = permissions_validator
def set_params(self, experience_id=None, experience_share_id=None, logged_person_id=None):
self.experience_id = experience_id
self.experience_share_id = experience_share_id
self.logged_person_id = logged_person_id
return self
def execute(self):
self.permissions_validator.validate_permissions(logged_person_id=self.logged_person_id)
if self.experience_id is not None:
experience = self.experience_repo.get_experience(id=self.experience_id,
logged_person_id=self.logged_person_id)
elif self.experience_share_id is not None:
experience = self.experience_repo.get_experience(share_id=self.experience_share_id,
logged_person_id=self.logged_person_id)
if self.block_repo.block_exists(creator_id=self.logged_person_id, target_id=experience.author_id):
raise BlockedContentException
else:
return experience
class FlagExperienceInteractor:
def __init__(self, experience_repo, permissions_validator, get_experience_interactor):
self.experience_repo = experience_repo
self.permissions_validator = permissions_validator
self.get_experience_interactor = get_experience_interactor
def set_params(self, logged_person_id, experience_id, reason):
self.logged_person_id = logged_person_id
self.experience_id = experience_id
self.reason = reason
return self
def execute(self):
self.permissions_validator.validate_permissions(logged_person_id=self.logged_person_id)
self.get_experience_interactor.set_params(experience_id=self.experience_id,
logged_person_id=self.logged_person_id).execute()
return self.experience_repo.flag_experience(person_id=self.logged_person_id,
experience_id=self.experience_id,
reason=self.reason)
```
#### File: pachatary-api/people/basic_factories.py
```python
from .repositories import PersonRepo, BlockRepo
from .validators import PersonPermissionsValidator
def create_person_repo():
return PersonRepo()
def create_block_repo():
return BlockRepo()
def create_person_permissions_validator():
return PersonPermissionsValidator(person_repo=create_person_repo())
```
#### File: pachatary-api/people/services.py
```python
from django.template.loader import get_template
from django.core import mail
from django.conf import settings
class MailerService:
PUBLIC_EMAIL_CONFIRMATION_PATH = '/redirects/people/me/email-confirmation'
PUBLIC_LOGIN_EMAIL_PATH = '/redirects/people/me/login'
def send_ask_confirmation_mail(self, confirmation_token, email, username):
url = '{}{}?token={}'.format(settings.PUBLIC_DOMAIN,
MailerService.PUBLIC_EMAIL_CONFIRMATION_PATH, confirmation_token)
context_params = {'username': username, 'confirmation_url': url}
plain_text_message = get_template('ask_confirmation_email.txt').render(context_params)
html_message = get_template('ask_confirmation_email.html').render(context_params)
subject, origin_email, target_email = 'Pachatary account confirmation', settings.EMAIL_HOST_ORIGIN, email
mail.send_mail(subject,
plain_text_message,
origin_email, [target_email, ],
html_message=html_message,
fail_silently=False)
def send_login_mail(self, login_token, email, username):
url = '{}{}?token={}'.format(settings.PUBLIC_DOMAIN, MailerService.PUBLIC_LOGIN_EMAIL_PATH, login_token)
context_params = {'username': username, 'login_url': url}
plain_text_message = get_template('login_email.txt').render(context_params)
html_message = get_template('login_email.html').render(context_params)
subject, origin_email, target_email = 'Pachatary login', settings.EMAIL_HOST_ORIGIN, email
mail.send_mail(subject,
plain_text_message,
origin_email, [target_email, ],
html_message=html_message,
fail_silently=False)
```
#### File: people/tests/unit_test_interactors.py
```python
from mock import Mock, call
from pachatary.exceptions import InvalidEntityException, EntityDoesNotExistException, ConflictException, \
NoLoggedException
from people.entities import Person, AuthToken
from people.interactors import CreateGuestPersonAndReturnAuthTokenInteractor, AuthenticateInteractor, \
RegisterUsernameAndEmailInteractor, ConfirmEmailInteractor, LoginEmailInteractor, LoginInteractor, \
BlockInteractor
from profiles.entities import Profile
from experiences.entities import Experience
from experiences.interactors import SaveUnsaveExperienceInteractor
class TestCreateGuestPersonAndReturnAuthToken:
def test_creates_guest_person_and_returns_auth_token(self):
TestCreateGuestPersonAndReturnAuthToken._ScenarioMaker() \
.given_a_client_secret_key() \
.given_a_client_secret_key_validator_that_accepts_that_key() \
.given_a_person_repo_that_returns_a_person() \
.given_an_auth_token_repo_that_returns_a_token() \
.when_execute_interactor() \
.then_result_should_be_that_token() \
.then_client_secret_key_should_be_validated() \
.then_person_repo_create_guest_person_should_be_called() \
.then_create_auth_token_should_be_called_with_returned_person_id()
def test_invalid_client_secret_key_returns_invalid_entity_exception_and_doesnt_create_person(self):
TestCreateGuestPersonAndReturnAuthToken._ScenarioMaker() \
.given_a_client_secret_key() \
.given_a_client_secret_key_validator_that_doesnt_accept_that_key() \
.given_a_person_repo_that_returns_a_person() \
.given_an_auth_token_repo_that_returns_a_token() \
.when_execute_interactor() \
.then_should_raise_invalid_entity_exception() \
.then_client_secret_key_should_be_validated() \
.then_person_repo_create_guest_person_should_not_be_called() \
.then_create_auth_token_should_not_be_called()
class _ScenarioMaker:
def __init__(self):
self.person = None
self.auth_token = None
self.person_repo = None
self.auth_token_repo = None
self.result = None
self.client_secret_key = None
self.client_secret_key_validator = None
def given_a_client_secret_key(self):
self.client_secret_key = "scrt"
return self
def given_a_client_secret_key_validator_that_accepts_that_key(self):
self.client_secret_key_validator = Mock()
self.client_secret_key_validator.validate.return_value = True
return self
def given_a_client_secret_key_validator_that_doesnt_accept_that_key(self):
self.client_secret_key_validator = Mock()
self.client_secret_key_validator.validate.side_effect = InvalidEntityException(
source='client_secret_key',
code='invalid',
message='Invalid client secret key')
return self
def given_a_person_repo_that_returns_a_person(self):
self.person = Person(id='3')
self.person_repo = Mock()
self.person_repo.create_guest_person.return_value = self.person
return self
def given_an_auth_token_repo_that_returns_a_token(self):
self.auth_token = AuthToken(person_id='3', access_token='A', refresh_token='R')
self.auth_token_repo = Mock()
self.auth_token_repo.create_auth_token.return_value = self.auth_token
return self
def when_execute_interactor(self):
try:
interactor = CreateGuestPersonAndReturnAuthTokenInteractor(
client_secret_key_validator=self.client_secret_key_validator,
person_repo=self.person_repo,
auth_token_repo=self.auth_token_repo)
self.result = interactor.set_params(client_secret_key=self.client_secret_key).execute()
except Exception as e:
self.error = e
return self
def then_result_should_be_that_token(self):
assert self.result == self.auth_token
return self
def then_should_raise_invalid_entity_exception(self):
assert type(self.error) is InvalidEntityException
assert self.error.source == 'client_secret_key'
assert self.error.code == 'invalid'
return self
def then_client_secret_key_should_be_validated(self):
self.client_secret_key_validator.validate.assert_called_once_with(client_secret_key=self.client_secret_key)
return self
def then_person_repo_create_guest_person_should_be_called(self):
self.person_repo.create_guest_person.assert_called_once()
return self
def then_person_repo_create_guest_person_should_not_be_called(self):
self.person_repo.create_guest_person.assert_not_called()
return self
def then_create_auth_token_should_be_called_with_returned_person_id(self):
self.auth_token_repo.create_auth_token.assert_called_once_with(person_id=self.person.id)
return self
def then_create_auth_token_should_not_be_called(self):
self.auth_token_repo.create_auth_token.assert_not_called()
return self
class TestAuthenticateInteractor:
def test_correct_access_token_returns_person_id(self):
TestAuthenticateInteractor.ScenarioMaker() \
.given_an_access_token() \
.given_an_auth_token() \
.given_an_auth_repo_that_returns_that_auth_token() \
.when_authenticate_interactor_is_executed() \
.then_should_call_repo_get_auth_token_with_access_token() \
.then_should_return_auth_token_person_id()
def test_wrong_access_token_returns_none(self):
TestAuthenticateInteractor.ScenarioMaker() \
.given_an_access_token() \
.given_an_auth_repo_that_raises_entity_does_not_exist() \
.when_authenticate_interactor_is_executed() \
.then_should_return_none()
class ScenarioMaker:
def __init__(self):
self.result = None
self.repo = None
self.access_token = None
self.auth_token = None
def given_an_access_token(self):
self.access_token = '<PASSWORD>'
return self
def given_an_auth_token(self):
self.auth_token = AuthToken(person_id='1', access_token='A', refresh_token='R')
return self
def given_an_auth_repo_that_returns_that_auth_token(self):
self.repo = Mock()
self.repo.get_auth_token.return_value = self.auth_token
return self
def given_an_auth_repo_that_raises_entity_does_not_exist(self):
self.repo = Mock()
self.repo.get_auth_token.side_effect = EntityDoesNotExistException
return self
def when_authenticate_interactor_is_executed(self):
self.result = AuthenticateInteractor(self.repo).set_params(access_token=self.access_token).execute()
return self
def then_should_call_repo_get_auth_token_with_access_token(self):
self.repo.get_auth_token.assert_called_once_with(access_token=self.access_token)
return self
def then_should_return_auth_token_person_id(self):
assert self.result == self.auth_token.person_id
return self
def then_should_return_none(self):
assert self.result is None
return self
class TestRegisterUsernameAndEmailInteractor:
def test_correct_username_and_email_when_profile_doesnt_exist(self):
TestRegisterUsernameAndEmailInteractor.ScenarioMaker() \
.given_a_person_validator_that_returns(True) \
.given_a_person_repo_that_returns_on_get(Person(id='3', email='b', is_email_confirmed=False)) \
.given_a_person_repo_that_returns_on_update(Person(id='4', email='o', is_email_confirmed=False)) \
.given_a_profile_validator_that_returns(True) \
.given_a_profile_repo_that_returns_on_get(False) \
.given_a_confirmation_token_repo_that_returns('KT') \
.when_execute(logged_person_id='1', username='u', email='e') \
.then_should_call_person_repo_get_with(id='1') \
.then_should_call_person_validator_with(Person(id='3', email='e', is_email_confirmed=False)) \
.then_should_call_profile_repo_get_with(person_id='1', logged_person_id='1') \
.then_should_call_profile_validator_with(Profile(person_id='1', username='u')) \
.then_should_call_profile_repo_create_with(Profile(person_id='1', username='u')) \
.then_should_call_person_repo_update_with(Person(id='3', email='e', is_email_confirmed=False)) \
.then_should_call_confirmation_token_repo_delete_with(person_id='1') \
.then_should_call_confirmation_token_repo_create_with(person_id='1') \
.then_should_call_mailer_with(confirmation_token='KT', username='u', email='e') \
.then_should_return(True)
def test_correct_username_and_email_when_profile_exists(self):
TestRegisterUsernameAndEmailInteractor.ScenarioMaker() \
.given_a_person_validator_that_returns(True) \
.given_a_person_repo_that_returns_on_get(Person(id='3', email='b', is_email_confirmed=False)) \
.given_a_person_repo_that_returns_on_update(Person(id='4', email='o', is_email_confirmed=False)) \
.given_a_profile_validator_that_returns(True) \
.given_a_profile_repo_that_returns_on_get(Profile(person_id='7', username='p')) \
.given_a_confirmation_token_repo_that_returns('KT') \
.when_execute(logged_person_id='1', username='u', email='e') \
.then_should_call_person_repo_get_with(id='1') \
.then_should_call_person_validator_with(Person(id='3', email='e', is_email_confirmed=False)) \
.then_should_call_profile_repo_get_with(person_id='1', logged_person_id='1') \
.then_should_call_profile_validator_with(Profile(person_id='7', username='u')) \
.then_should_call_profile_repo_update_with(Profile(person_id='7', username='u')) \
.then_should_call_person_repo_update_with(Person(id='3', email='e', is_email_confirmed=False)) \
.then_should_call_confirmation_token_repo_delete_with(person_id='1') \
.then_should_call_confirmation_token_repo_create_with(person_id='1') \
.then_should_call_mailer_with(confirmation_token='KT', username='u', email='e') \
.then_should_return(True)
def test_incorrect_email_raises_invalid_entity_exception(self):
TestRegisterUsernameAndEmailInteractor.ScenarioMaker() \
.given_a_person_validator_that_returns(
error=InvalidEntityException(source='e', code='i', message='m')) \
.given_a_person_repo_that_returns_on_get(Person(id='3', email='b', is_email_confirmed=False)) \
.when_execute(logged_person_id='1', username='u', email='e') \
.then_should_call_person_repo_get_with(id='1') \
.then_should_call_person_validator_with(Person(id='3', email='e', is_email_confirmed=False)) \
.then_should_call_profile_repo_get_with(False) \
.then_should_call_profile_validator_with(False) \
.then_should_call_profile_repo_update_with(False) \
.then_should_call_person_repo_update_with(False) \
.then_should_call_confirmation_token_repo_delete_with(False) \
.then_should_call_confirmation_token_repo_create_with(False) \
.then_should_call_mailer_with(False) \
.then_should_raise(InvalidEntityException(source='e', code='i', message='m'))
def test_incorrect_username_raises_invalid_entity_exception(self):
TestRegisterUsernameAndEmailInteractor.ScenarioMaker() \
.given_a_person_validator_that_returns(True) \
.given_a_person_repo_that_returns_on_get(Person(id='3', email='b', is_email_confirmed=False)) \
.given_a_profile_validator_that_returns(
error=InvalidEntityException(source='u', code='i', message='m')) \
.given_a_profile_repo_that_returns_on_get(False) \
.when_execute(logged_person_id='1', username='u', email='e') \
.then_should_call_person_repo_get_with(id='1') \
.then_should_call_person_validator_with(Person(id='3', email='e', is_email_confirmed=False)) \
.then_should_call_profile_repo_get_with(person_id='1', logged_person_id='1') \
.then_should_call_profile_validator_with(Profile(person_id='1', username='u')) \
.then_should_call_profile_repo_update_with(False) \
.then_should_call_person_repo_update_with(False) \
.then_should_call_confirmation_token_repo_delete_with(False) \
.then_should_call_confirmation_token_repo_create_with(False) \
.then_should_call_mailer_with(False) \
.then_should_raise(InvalidEntityException(source='u', code='i', message='m'))
def test_cannot_register_once_email_is_confirmed(self):
TestRegisterUsernameAndEmailInteractor.ScenarioMaker() \
.given_a_person_validator_that_returns(True) \
.given_a_person_repo_that_returns_on_get(Person(id='3', email='b', is_email_confirmed=True)) \
.when_execute(logged_person_id='1', username='u', email='e') \
.then_should_call_person_repo_get_with(id='1') \
.then_should_call_person_validator_with(False) \
.then_should_call_profile_repo_get_with(False) \
.then_should_call_profile_validator_with(False) \
.then_should_call_profile_repo_update_with(False) \
.then_should_call_person_repo_update_with(False) \
.then_should_call_confirmation_token_repo_delete_with(False) \
.then_should_call_confirmation_token_repo_create_with(False) \
.then_should_call_mailer_with(False) \
.then_should_raise(
ConflictException(source='person', code='already_registered',
message='Person already registered'))
def test_no_logged_person_id_raises_unauthorized(self):
TestRegisterUsernameAndEmailInteractor.ScenarioMaker() \
.when_execute(logged_person_id=None, username='u', email='e') \
.then_should_call_person_repo_get_with(False) \
.then_should_call_person_validator_with(False) \
.then_should_call_profile_repo_get_with(False) \
.then_should_call_profile_validator_with(False) \
.then_should_call_profile_repo_update_with(False) \
.then_should_call_person_repo_update_with(False) \
.then_should_call_confirmation_token_repo_delete_with(False) \
.then_should_call_confirmation_token_repo_create_with(False) \
.then_should_call_mailer_with(False) \
.then_should_raise(NoLoggedException())
class ScenarioMaker:
def __init__(self):
self.person_validator = Mock()
self.person_repo = Mock()
self.profile_validator = Mock()
self.profile_repo = Mock()
self.confirmation_token_repo = Mock()
self.mailer_service = Mock()
def given_a_person_validator_that_returns(self, is_correct=False, error=None):
if not is_correct:
self.person_validator.validate.side_effect = error
else:
self.person_validator.validate.return_value = True
return self
def given_a_person_repo_that_returns_on_get(self, person):
if person is False:
self.person_repo.get_person.side_effect = EntityDoesNotExistException()
else:
self.person_repo.get_person.return_value = person
return self
def given_a_person_repo_that_returns_on_update(self, person):
self.person_repo.update_person.return_value = person
return self
def given_a_profile_validator_that_returns(self, is_correct=False, error=None):
if not is_correct:
self.profile_validator.validate.side_effect = error
else:
self.profile_validator.validate.return_value = True
return self
def given_a_profile_repo_that_returns_on_get(self, profile):
if profile is False:
self.profile_repo.get_profile.side_effect = EntityDoesNotExistException()
else:
self.profile_repo.get_profile.return_value = profile
return self
def given_a_confirmation_token_repo_that_returns(self, confirmation_token):
self.confirmation_token_repo.create_confirmation_token.return_value = confirmation_token
return self
def when_execute(self, logged_person_id, username, email):
try:
self.result = RegisterUsernameAndEmailInteractor(
person_repo=self.person_repo, person_validator=self.person_validator,
profile_repo=self.profile_repo, profile_validator=self.profile_validator,
confirmation_token_repo=self.confirmation_token_repo,
mailer_service=self.mailer_service) \
.set_params(logged_person_id=logged_person_id, username=username, email=email).execute()
except Exception as e:
self.error = e
return self
def then_should_call_person_repo_get_with(self, id):
if id is False:
self.person_repo.get_person.assert_not_called()
else:
self.person_repo.get_person.assert_called_once_with(id=id)
return self
def then_should_call_person_validator_with(self, person):
if person is False:
self.person_validator.validate.assert_not_called()
else:
self.person_validator.validate.assert_called_once_with(person)
return self
def then_should_call_profile_repo_get_with(self, person_id, logged_person_id=None):
if person_id is False:
self.profile_repo.get_profile.assert_not_called()
else:
self.profile_repo.get_profile.assert_called_once_with(person_id=person_id,
logged_person_id=logged_person_id)
return self
def then_should_call_profile_validator_with(self, profile):
if profile is False:
self.profile_validator.validate.assert_not_called()
else:
self.profile_validator.validate.assert_called_once_with(profile)
return self
def then_should_call_profile_repo_create_with(self, profile):
if profile is False:
self.profile_repo.create_profile.assert_not_called()
else:
self.profile_repo.create_profile.assert_called_once_with(profile)
return self
def then_should_call_profile_repo_update_with(self, profile):
if profile is False:
self.profile_repo.update_profile.assert_not_called()
else:
self.profile_repo.update_profile.assert_called_once_with(profile)
return self
def then_should_call_person_repo_update_with(self, person):
if person is False:
self.person_repo.update_person.assert_not_called()
else:
self.person_repo.update_person.assert_called_once_with(person)
return self
def then_should_call_confirmation_token_repo_delete_with(self, person_id):
if person_id is False:
self.confirmation_token_repo.delete_confirmation_tokens.assert_not_called()
else:
self.confirmation_token_repo.delete_confirmation_tokens.assert_called_once_with(person_id=person_id)
return self
def then_should_call_confirmation_token_repo_create_with(self, person_id):
if person_id is False:
self.confirmation_token_repo.create_confirmation_token.assert_not_called()
else:
self.confirmation_token_repo.create_confirmation_token.assert_called_once_with(person_id=person_id)
return self
def then_should_call_mailer_with(self, confirmation_token, username=None, email=None):
if confirmation_token is False:
self.mailer_service.send_ask_confirmation_mail.assert_not_called()
else:
self.mailer_service.send_ask_confirmation_mail.assert_called_once_with(
confirmation_token=confirmation_token, username=username, email=email)
return self
def then_should_return(self, result):
assert self.result == result
return self
def then_should_raise(self, error):
assert self.error == error
return self
class TestConfirmEmailInteractor:
def test_confirm_email_returns_person_confirmed(self):
TestConfirmEmailInteractor.ScenarioMaker() \
.given_a_logged_person_id() \
.given_a_confirmation_token() \
.given_a_confirmation_token_repo_that_returns_that_confirmation_token() \
.given_an_updated_person() \
.given_a_person() \
.given_a_person_repo_that_returns_those_persons_on_get_and_update() \
.when_confirm_email_interactor_is_executed() \
.then_should_call_confirmation_token_repo_get_person_id_with_confirmation_token() \
.then_should_delete_all_confirmation_tokens_for_that_person() \
.then_should_call_person_repo_get() \
.then_should_call_person_repo_update_with_is_email_confirmed_true() \
.then_should_return_true()
def test_unauthenticated_raises_unauthorized(self):
TestConfirmEmailInteractor.ScenarioMaker() \
.when_confirm_email_interactor_is_executed() \
.then_should_raise_unauthorized() \
.then_should_not_delete_all_confirmation_tokens_for_that_person() \
.then_should_not_call_person_repo_update()
def test_no_confirmation_token_raises_unauthorized(self):
TestConfirmEmailInteractor.ScenarioMaker() \
.given_a_logged_person_id() \
.given_a_confirmation_token_repo_that_raises_entity_does_not_exist() \
.when_confirm_email_interactor_is_executed() \
.then_should_raise_invalid_params_for_wrong_confirmation_token() \
.then_should_not_delete_all_confirmation_tokens_for_that_person() \
.then_should_not_call_person_repo_update()
def test_not_coincident_person_id_raises_unauthorized(self):
TestConfirmEmailInteractor.ScenarioMaker() \
.given_a_logged_person_id() \
.given_a_confirmation_token_repo_that_returns_another_person_id() \
.when_confirm_email_interactor_is_executed() \
.then_should_raise_invalid_params_for_wrong_confirmation_token() \
.then_should_not_delete_all_confirmation_tokens_for_that_person() \
.then_should_not_call_person_repo_update()
class ScenarioMaker:
def __init__(self):
self.result = None
self.error = None
self.updated_person = None
self.person = None
self.logged_person_id = None
self.confirmation_token = None
self.confirmation_token_repo = Mock()
self.person_repo = Mock()
def given_a_logged_person_id(self):
self.logged_person_id = '2'
return self
def given_a_confirmation_token(self):
self.confirmation_token = 'ABC'
return self
def given_a_confirmation_token_repo_that_returns_that_confirmation_token(self):
self.confirmation_token_repo.get_person_id.return_value = self.logged_person_id
return self
def given_a_confirmation_token_repo_that_returns_another_person_id(self):
self.confirmation_token_repo.get_person_id.return_value = '99'
return self
def given_a_confirmation_token_repo_that_raises_entity_does_not_exist(self):
self.confirmation_token_repo.get_person_id.side_effect = EntityDoesNotExistException()
return self
def given_a_person(self):
self.person = Person(id='4', email='[email protected]', is_email_confirmed=False)
return self
def given_an_updated_person(self):
self.updated_person = Person(id='4', email='[email protected]', is_email_confirmed=True)
return self
def given_a_person_repo_that_returns_those_persons_on_get_and_update(self):
self.person_repo.update_person.return_value = self.updated_person
self.person_repo.get_person.return_value = self.person
return self
def when_confirm_email_interactor_is_executed(self):
try:
interactor = ConfirmEmailInteractor(confirmation_token_repo=self.confirmation_token_repo,
person_repo=self.person_repo)
self.result = interactor.set_params(logged_person_id=self.logged_person_id,
confirmation_token=self.confirmation_token).execute()
except Exception as e:
self.error = e
return self
def then_should_call_confirmation_token_repo_get_person_id_with_confirmation_token(self):
self.confirmation_token_repo.get_person_id \
.assert_called_once_with(confirmation_token=self.confirmation_token)
return self
def then_should_delete_all_confirmation_tokens_for_that_person(self):
self.confirmation_token_repo.delete_confirmation_tokens \
.assert_called_once_with(person_id=self.logged_person_id)
return self
def then_should_call_person_repo_get(self):
self.person_repo.get_person.assert_called_once_with(id=self.logged_person_id)
return self
def then_should_call_person_repo_update_with_is_email_confirmed_true(self):
update_person = Person(id=self.person.id, email=self.person.email, is_email_confirmed=True)
self.person_repo.update_person.assert_called_once_with(update_person)
return self
def then_should_return_true(self):
assert self.result is True
return self
def then_should_raise_unauthorized(self):
assert type(self.error) is NoLoggedException
return self
def then_should_not_delete_all_confirmation_tokens_for_that_person(self):
self.confirmation_token_repo.delete_confirmation_tokens.assert_not_called()
return self
def then_should_not_call_person_repo_update(self):
self.person_repo.update_person.assert_not_called()
return self
def then_should_raise_invalid_params_for_wrong_confirmation_token(self):
assert type(self.error) is InvalidEntityException
assert self.error.source == 'confirmation_token'
assert self.error.code == 'invalid'
assert str(self.error) == 'Invalid confirmation token'
return self
class TestLoginEmailInteractor:
def test_when_email_doesnt_exists(self):
TestLoginEmailInteractor.ScenarioMaker() \
.given_an_email() \
.given_a_person_repo_that_raises_entity_does_not_exist() \
.when_login_email_interactor_executed() \
.then_should_call_get_person_repo_with_the_email() \
.then_should_not_call_login_token_repo() \
.then_should_not_call_mailer_service()
def test_when_email_has_not_been_confirmed(self):
TestLoginEmailInteractor.ScenarioMaker() \
.given_an_email() \
.given_a_person_repo_that_returns_person_without_confirmed_email() \
.when_login_email_interactor_executed() \
.then_should_call_get_person_repo_with_the_email() \
.then_should_not_call_login_token_repo() \
.then_should_not_call_mailer_service()
def test_success(self):
TestLoginEmailInteractor.ScenarioMaker() \
.given_an_email() \
.given_a_person() \
.given_a_person_repo_that_returns_that_person() \
.given_a_profile() \
.given_a_profile_repo_that_returns_that_profile() \
.given_a_login_token() \
.given_a_login_token_repo_that_returns_that_token() \
.when_login_email_interactor_executed() \
.then_should_call_get_person_repo_with_the_email() \
.then_should_call_get_profile_repo_with_the_person_id() \
.then_should_call_delete_login_tokens_with_person_id() \
.then_should_call_create_login_token_with_person_id() \
.then_should_send_mail_with_token_username_to_person_email()
class ScenarioMaker:
def __init__(self):
self.person_repo = Mock()
self.profile_repo = Mock()
self.login_token_repo = Mock()
self.mailer_service = Mock()
def given_an_email(self):
self.email = '<EMAIL>'
return self
def given_a_person(self):
self.person = Person(id='8', email='e', is_email_confirmed=True)
return self
def given_a_profile(self):
self.profile = Profile(person_id=self.person.id, username='u')
return self
def given_a_login_token(self):
self.login_token = '<PASSWORD>'
return self
def given_a_person_repo_that_raises_entity_does_not_exist(self):
self.person_repo.get_person.side_effect = EntityDoesNotExistException()
return self
def given_a_person_repo_that_returns_person_without_confirmed_email(self):
self.person_repo.get_person.return_value = Person(id='5', is_email_confirmed=False)
return self
def given_a_person_repo_that_returns_that_person(self):
self.person_repo.get_person.return_value = self.person
return self
def given_a_profile_repo_that_returns_that_profile(self):
self.profile_repo.get_profile.return_value = self.profile
return self
def given_a_login_token_repo_that_returns_that_token(self):
self.login_token_repo.create_login_token.return_value = self.login_token
return self
def when_login_email_interactor_executed(self):
try:
interactor = LoginEmailInteractor(login_token_repo=self.login_token_repo,
person_repo=self.person_repo,
profile_repo=self.profile_repo,
mailer_service=self.mailer_service)
self.result = interactor.set_params(email=self.email).execute()
except Exception as e:
self.error = e
return self
def then_should_call_get_person_repo_with_the_email(self):
self.person_repo.get_person.assert_called_once_with(email=self.email)
return self
def then_should_call_get_profile_repo_with_the_person_id(self):
self.profile_repo.get_profile.assert_called_once_with(person_id=self.person.id,
logged_person_id=self.person.id)
return self
def then_should_not_call_login_token_repo(self):
self.login_token_repo.delete_login_tokens.assert_not_called()
self.login_token_repo.create_login_token.assert_not_called()
return self
def then_should_not_call_mailer_service(self):
self.mailer_service.send_login_mail.assert_not_called()
return self
def then_should_call_delete_login_tokens_with_person_id(self):
self.login_token_repo.delete_login_tokens.assert_called_once_with(person_id=self.person.id)
return self
def then_should_call_create_login_token_with_person_id(self):
self.login_token_repo.create_login_token.assert_called_once_with(person_id=self.person.id)
return self
def then_should_send_mail_with_token_username_to_person_email(self):
self.mailer_service.send_login_mail.assert_called_once_with(login_token=self.login_token,
username=self.profile.username,
email=self.person.email)
return self
class TestLoginInteractor:
def test_returns_auth_token_and_person(self):
TestLoginInteractor.ScenarioMaker() \
.given_a_login_token() \
.given_a_person_id() \
.given_a_login_token_repo_that_returns_that_person_id() \
.given_a_person() \
.given_a_person_repo_that_returns_that_person() \
.given_an_auth_token() \
.given_an_auth_token_repo_that_returns_that_auth_token() \
.when_login_interactor_is_executed() \
.then_should_call_login_token_repo_get_person_id_with_login_token() \
.then_should_call_login_token_repo_delete_login_token_with_person_id() \
.then_should_call_auth_token_repo_get_auth_token_with_person_id() \
.then_should_return_auth_token()
class ScenarioMaker:
def given_a_login_token(self):
self.login_token = 'tra'
return self
def given_a_person_id(self):
self.person_id = '4'
return self
def given_a_login_token_repo_that_returns_that_person_id(self):
self.login_token_repo = Mock()
self.login_token_repo.get_person_id.return_value = self.person_id
return self
def given_a_person(self):
self.person = Person(id='9', email='e')
return self
def given_a_person_repo_that_returns_that_person(self):
self.person_repo = Mock()
self.person_repo.get_person.return_value = self.person
return self
def given_an_auth_token(self):
self.auth_token = AuthToken('9', 'a', 'r')
return self
def given_an_auth_token_repo_that_returns_that_auth_token(self):
self.auth_token_repo = Mock()
self.auth_token_repo.get_auth_token.return_value = self.auth_token
return self
def when_login_interactor_is_executed(self):
self.result = LoginInteractor(self.person_repo, self.auth_token_repo, self.login_token_repo) \
.set_params(login_token=self.login_token).execute()
return self
def then_should_call_login_token_repo_get_person_id_with_login_token(self):
self.login_token_repo.get_person_id.assert_called_once_with(login_token=self.login_token)
return self
def then_should_call_login_token_repo_delete_login_token_with_person_id(self):
self.login_token_repo.delete_login_tokens.assert_called_once_with(person_id=self.person_id)
return self
def then_should_call_auth_token_repo_get_auth_token_with_person_id(self):
self.auth_token_repo.get_auth_token.assert_called_once_with(person_id=self.person_id)
return self
def then_should_return_auth_token(self):
self.result == self.auth_token
return self
class TestBlockIntearctor:
def test_block_interactor_raises_not_logged(self):
TestBlockIntearctor.ScenarioMaker() \
.given_a_permissions_validator_that_raises_no_logged() \
.when_block(logged_person_id='4', target_username='blocked') \
.then_should_call_permissions_validator('4') \
.then_should_not_call_block() \
.then_should_raise_no_logged_exception()
def test_already_blocked_returns_true(self):
TestBlockIntearctor.ScenarioMaker() \
.given_a_permissions_validator_that_validates() \
.given_a_block_repo_that_returns_to_block_exists(True) \
.given_a_profile_repo_that_returns_profile_with_person_id('33') \
.when_block(logged_person_id='4', target_username='blocked') \
.then_should_call_permissions_validator('4') \
.then_should_get_profile_with(username='blocked', logged_person_id='4') \
.then_should_call_block_exists('4', '33') \
.then_should_not_call_block() \
.then_should_return_true()
def test_self_block_raises_conflict_exception(self):
TestBlockIntearctor.ScenarioMaker() \
.given_a_permissions_validator_that_validates() \
.given_a_block_repo_that_returns_to_block_exists(False) \
.given_a_profile_repo_that_returns_profile_with_person_id('4') \
.when_block(logged_person_id='4', target_username='myself') \
.then_should_call_permissions_validator('4') \
.then_should_get_profile_with(username='myself', logged_person_id='4') \
.then_should_not_call_block() \
.then_should_raise_conflict_exception()
def test_block_unsaves_target_id_experiences_block_and_returns_true(self):
TestBlockIntearctor.ScenarioMaker() \
.given_a_permissions_validator_that_validates() \
.given_a_block_repo_that_returns_to_block_exists(False) \
.given_a_block_repo_that_returns_to_block(True) \
.given_a_experience_repo_that_returns([
Experience('t', 'd', id='11', author_id='9'),
Experience('t', 'd', id='12', author_id='33'),
Experience('t', 'd', id='13', author_id='9'),
Experience('t', 'd', id='14', author_id='33'),
Experience('t', 'd', id='15', author_id='9'),
Experience('t', 'd', id='16', author_id='9')]) \
.given_a_profile_repo_that_returns_profile_with_person_id('33') \
.when_block(logged_person_id='4', target_username='blocked') \
.then_should_call_permissions_validator('4') \
.then_should_call_block_exists('4', '33') \
.then_should_get_saved_experiences('4') \
.then_should_get_profile_with(username='blocked', logged_person_id='4') \
.then_should_unsave('4', ['12', '14']) \
.then_should_call_block('4', '33') \
.then_should_return_true()
class ScenarioMaker:
def __init__(self):
self.permissions_validator = Mock()
self.block_repo = Mock()
self.experience_repo = Mock()
self.profile_repo = Mock()
self.unsave_experience_interactor = Mock()
self.unsave_experience_interactor.set_params.return_value = self.unsave_experience_interactor
def given_a_permissions_validator_that_validates(self):
self.permissions_validator.return_value = True
return self
def given_a_permissions_validator_that_raises_no_logged(self):
self.permissions_validator.validate_permissions.side_effect = NoLoggedException()
return self
def given_a_block_repo_that_returns_to_block_exists(self, exists):
self.block_repo.block_exists.return_value = exists
return self
def given_a_block_repo_that_returns_to_block(self, exists):
self.block_repo.block.return_value = exists
return self
def given_a_profile_repo_that_returns_profile_with_person_id(self, person_id):
self.profile_repo.get_profile.return_value = Profile(person_id=person_id)
return self
def given_a_experience_repo_that_returns(self, experiences):
self.experience_repo.get_saved_experiences.return_value = {'results': experiences, 'next_offset': None}
return self
def when_block(self, logged_person_id, target_username):
try:
self.result = BlockInteractor(permissions_validator=self.permissions_validator,
block_repo=self.block_repo,
experience_repo=self.experience_repo,
profile_repo=self.profile_repo,
save_unsave_experience_interactor=self.unsave_experience_interactor) \
.set_params(logged_person_id=logged_person_id, target_username=target_username).execute()
except Exception as e:
self.error = e
return self
def then_should_call_permissions_validator(self, person_id):
self.permissions_validator.validate_permissions.assert_called_once_with(logged_person_id=person_id)
return self
def then_should_call_block_exists(self, creator_id, target_id):
self.block_repo.block_exists.assert_called_once_with(creator_id=creator_id, target_id=target_id)
return self
def then_should_not_call_block(self):
self.block_repo.block.assert_not_called()
return self
def then_should_get_saved_experiences(self, person_id):
self.experience_repo.get_saved_experiences.assert_called_once_with(logged_person_id=person_id,
offset=0, limit=1000000)
return self
def then_should_unsave(self, person_id, experiences_ids):
self.unsave_experience_interactor.set_params.assert_has_calls(
[call(action=SaveUnsaveExperienceInteractor.Action.UNSAVE,
experience_id=id, logged_person_id=person_id) for id in experiences_ids])
self.unsave_experience_interactor.execute.assert_has_calls([call() for id in experiences_ids])
return self
def then_should_call_block(self, creator_id, target_id):
self.block_repo.block.assert_called_once_with(creator_id=creator_id, target_id=target_id)
return self
def then_should_get_profile_with(self, username, logged_person_id):
self.profile_repo.get_profile.assert_called_once_with(username=username, logged_person_id=logged_person_id)
return self
def then_should_raise_no_logged_exception(self):
assert type(self.error) is NoLoggedException
return self
def then_should_raise_conflict_exception(self):
assert type(self.error) is ConflictException
assert self.error == ConflictException(source='person', code='conflict', message='Cannot block yourself')
return self
def then_should_return_true(self):
assert self.result is True
return self
```
#### File: people/tests/unit_test_views.py
```python
from mock import Mock
from people.entities import AuthToken, Person
from people.views import PeopleView, PersonView, EmailConfirmationView, LoginEmailView, LoginView, BlockView
from people.serializers import serialize_auth_token
class TestPeopleView:
def test_post_returns_auth_token_serialized_and_201(self):
TestPeopleView._ScenarioMaker() \
.given_an_auth_token() \
.given_an_interactor_that_returns_that_auth_token() \
.given_a_client_secret_key() \
.when_post_is_called_with_that_key() \
.then_interactor_receives_that_key() \
.then_response_status_is_201() \
.then_response_body_is_auth_token_serialized()
class _ScenarioMaker:
def __init__(self):
self.interactor_mock = Mock()
self.interactor_mock.set_params.return_value = self.interactor_mock
self.auth_token = None
self.client_secret_key = None
self.response = None
def given_an_auth_token(self):
self.auth_token = AuthToken(person_id='2', access_token='A', refresh_token='R')
return self
def given_a_client_secret_key(self):
self.client_secret_key = 'scrt_ky'
return self
def given_an_interactor_that_returns_that_auth_token(self):
self.interactor_mock.execute.return_value = self.auth_token
return self
def when_post_is_called_with_that_key(self):
view = PeopleView(create_guest_person_and_return_auth_token_interactor=self.interactor_mock)
self.body, self.status = view.post(client_secret_key=self.client_secret_key)
return self
def then_interactor_receives_that_key(self):
self.interactor_mock.set_params.assert_called_once_with(client_secret_key=self.client_secret_key)
return self
def then_response_status_is_201(self):
assert self.status == 201
return self
def then_response_body_is_auth_token_serialized(self):
assert self.body == serialize_auth_token(self.auth_token)
return self
class TestPersonView:
def test_patch_returns_person_serialized_and_200(self):
TestPersonView._ScenarioMaker() \
.given_a_username() \
.given_an_email() \
.given_a_logged_person_id() \
.given_a_person() \
.given_an_interactor_that_returns_true() \
.when_patch_is_called_with_that_params() \
.then_interactor_receives_that_params() \
.then_response_status_is_204() \
.then_response_body_should_be_none()
class _ScenarioMaker:
def __init__(self):
self.interactor_mock = Mock()
self.interactor_mock.set_params.return_value = self.interactor_mock
self.username = None
self.email = None
self.logged_person_id = None
self.person = None
self.response = None
def given_a_username(self):
self.username = 'usr.nm'
return self
def given_an_email(self):
self.email = '<EMAIL>'
return self
def given_a_logged_person_id(self):
self.logged_person_id = '4'
return self
def given_a_person(self):
self.person = Person(id='8', email='b', is_email_confirmed=False)
return self
def given_an_interactor_that_returns_true(self):
self.interactor_mock.execute.return_value = True
return self
def when_patch_is_called_with_that_params(self):
view = PersonView(register_username_and_email_interactor=self.interactor_mock)
self.body, self.status = view.patch(logged_person_id=self.logged_person_id,
username=self.username, email=self.email)
return self
def then_interactor_receives_that_params(self):
self.interactor_mock.set_params.assert_called_once_with(logged_person_id=self.logged_person_id,
username=self.username, email=self.email)
return self
def then_response_status_is_204(self):
assert self.status == 204
return self
def then_response_body_should_be_none(self):
assert self.body is None
return self
class TestEmailConfirmationView:
def test_post_returns_204(self):
TestEmailConfirmationView._ScenarioMaker() \
.given_a_logged_person_id() \
.given_a_confirmation_token() \
.given_an_interactor_that_returns_true() \
.when_post_is_called_with_that_params() \
.then_interactor_receives_that_params() \
.then_response_status_is_204() \
.then_response_body_should_be_empty()
class _ScenarioMaker:
def __init__(self):
self.interactor_mock = Mock()
self.interactor_mock.set_params.return_value = self.interactor_mock
self.logged_person_id = None
self.confirmation_token = None
self.response = None
def given_a_logged_person_id(self):
self.logged_person_id = '4'
return self
def given_a_confirmation_token(self):
self.confirmation_token = 'ABC'
return self
def given_an_interactor_that_returns_true(self):
self.interactor_mock.execute.return_value = True
return self
def when_post_is_called_with_that_params(self):
view = EmailConfirmationView(confirm_email_interactor=self.interactor_mock)
self.body, self.status = view.post(logged_person_id=self.logged_person_id,
confirmation_token=self.confirmation_token)
return self
def then_interactor_receives_that_params(self):
self.interactor_mock.set_params.assert_called_once_with(logged_person_id=self.logged_person_id,
confirmation_token=self.confirmation_token)
return self
def then_response_status_is_204(self):
assert self.status == 204
return self
def then_response_body_should_be_empty(self):
assert self.body is None
return self
class TestLoginEmailView:
def test_post_returns_204(self):
TestLoginEmailView.ScenarioMaker() \
.given_an_email() \
.when_post_is_called_with_that_params() \
.then_interactor_receives_that_params() \
.then_response_status_is_204()
class ScenarioMaker:
def __init__(self):
self.interactor_mock = Mock()
self.interactor_mock.set_params.return_value = self.interactor_mock
self.email = None
self.response = None
def given_an_email(self):
self.email = 'e'
return self
def when_post_is_called_with_that_params(self):
view = LoginEmailView(login_email_interactor=self.interactor_mock)
self.body, self.status = view.post(email=self.email)
return self
def then_interactor_receives_that_params(self):
self.interactor_mock.set_params.assert_called_once_with(email=self.email)
return self
def then_response_status_is_204(self):
assert self.status == 204
assert self.body is None
return self
class TestLoginView:
def test_post_returns_200_and_person_and_auth_token(self):
TestLoginView.ScenarioMaker() \
.given_a_login_token() \
.given_an_auth_token() \
.given_an_interactor_that_returns_auth_token() \
.when_post_is_called_with_that_params() \
.then_interactor_receives_that_params() \
.then_response_status_is_200() \
.then_response_content_is_auth_token_serialized()
class ScenarioMaker:
def given_a_login_token(self):
self.login_token = 'e'
return self
def given_an_auth_token(self):
self.auth_token = AuthToken('9', 'a', 'r')
return self
def given_an_interactor_that_returns_auth_token(self):
self.interactor_mock = Mock()
self.interactor_mock.set_params.return_value = self.interactor_mock
self.interactor_mock.execute.return_value = self.auth_token
return self
def when_post_is_called_with_that_params(self):
view = LoginView(login_interactor=self.interactor_mock)
self.body, self.status = view.post(token=self.login_token)
return self
def then_interactor_receives_that_params(self):
self.interactor_mock.set_params.assert_called_once_with(login_token=self.login_token)
return self
def then_response_status_is_200(self):
assert self.status == 200
return self
def then_response_content_is_auth_token_serialized(self):
assert self.body == serialize_auth_token(self.auth_token)
return self
class TestBlockView:
def test_block_returns_201(self):
TestBlockView.ScenarioMaker() \
.given_an_interactor_that_returns_true() \
.when_post_is_called_with(logged_person_id='5', username='bloo') \
.then_interactor_receives(logged_person_id='5', username='bloo') \
.then_response_status_is_201()
class ScenarioMaker:
def given_an_interactor_that_returns_true(self):
self.interactor_mock = Mock()
self.interactor_mock.set_params.return_value = self.interactor_mock
self.interactor_mock.execute.return_value = True
return self
def when_post_is_called_with(self, logged_person_id, username):
view = BlockView(block_interactor=self.interactor_mock)
self.body, self.status = view.post(logged_person_id=logged_person_id, username=username)
return self
def then_interactor_receives(self, logged_person_id, username):
self.interactor_mock.set_params.assert_called_once_with(logged_person_id=logged_person_id,
target_username=username)
self.interactor_mock.execute.assert_called_once_with()
return self
def then_response_status_is_201(self):
assert self.status == 201
assert self.body is None
return self
```
#### File: pachatary-api/people/views.py
```python
from pachatary.decorators import serialize_exceptions
from .serializers import serialize_auth_token
class PeopleView:
def __init__(self, create_guest_person_and_return_auth_token_interactor=None):
self.create_guest_person_and_return_auth_token_interactor = create_guest_person_and_return_auth_token_interactor
@serialize_exceptions
def post(self, client_secret_key, logged_person_id=None):
auth_token = self.create_guest_person_and_return_auth_token_interactor \
.set_params(client_secret_key=client_secret_key).execute()
body = serialize_auth_token(auth_token)
status = 201
return body, status
class PersonView:
def __init__(self, register_username_and_email_interactor=None):
self.register_username_and_email_interactor = register_username_and_email_interactor
@serialize_exceptions
def patch(self, logged_person_id, username, email):
self.register_username_and_email_interactor \
.set_params(logged_person_id=logged_person_id, username=username, email=email).execute()
body = None
status = 204
return body, status
class EmailConfirmationView:
def __init__(self, confirm_email_interactor=None):
self.confirm_email_interactor = confirm_email_interactor
@serialize_exceptions
def post(self, logged_person_id, confirmation_token):
self.confirm_email_interactor.set_params(logged_person_id=logged_person_id,
confirmation_token=confirmation_token).execute()
body = None
status = 204
return body, status
class LoginEmailView:
def __init__(self, login_email_interactor=None):
self.login_email_interactor = login_email_interactor
@serialize_exceptions
def post(self, email, logged_person_id=None):
self.login_email_interactor.set_params(email=email).execute()
body = None
status = 204
return body, status
class LoginView:
def __init__(self, login_interactor=None):
self.login_interactor = login_interactor
@serialize_exceptions
def post(self, token, logged_person_id=None):
auth_token = self.login_interactor.set_params(login_token=token).execute()
body = serialize_auth_token(auth_token)
status = 200
return body, status
class BlockView:
def __init__(self, block_interactor=None):
self.block_interactor = block_interactor
@serialize_exceptions
def post(self, username, logged_person_id):
self.block_interactor.set_params(logged_person_id=logged_person_id, target_username=username).execute()
body = None
status = 201
return body, status
```
#### File: pachatary-api/profiles/entities.py
```python
class Profile:
def __init__(self, person_id=None, username=None, bio='', picture=None, is_me=False):
self._person_id = person_id
self._username = username
self._bio = bio
self._picture = picture
self._is_me = is_me
@property
def person_id(self):
return self._person_id
@property
def username(self):
return self._username
@property
def bio(self):
return self._bio
@property
def picture(self):
return self._picture
@property
def is_me(self):
return self._is_me
def builder(self):
return Profile.Builder(self)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
class Builder:
def __init__(self, profile):
self._person_id = profile.person_id
self._username = profile.username
self._bio = profile.bio
self._picture = profile.picture
self._is_me = profile.is_me
def bio(self, bio):
self._bio = bio
return self
def username(self, username):
self._username = username
return self
def build(self):
return Profile(person_id=self._person_id, username=self._username,
bio=self._bio, picture=self._picture, is_me=self._is_me)
```
#### File: redirects/tests/test_integration.py
```python
from django.conf import settings
from django.test import TestCase, Client
from django.urls import reverse
from experiences.models import ORMExperience
from people.models import ORMPerson
from profiles.models import ORMProfile
class RedirectConfirmEmailTestCase(TestCase):
def test_when_called_redirect_view_redirects_to_apps_url(self):
RedirectConfirmEmailTestCase.ScenarioMaker() \
.when_call_get_email_confirmation() \
.then_response_should_be_a_redirect_to_app_deeplink_with_params()
class ScenarioMaker:
def when_call_get_email_confirmation(self):
client = Client()
self.response = client.get('{}?{}'.format(reverse('email-confirmation-redirect'), 'token=ABXZ'))
return self
def then_response_should_be_a_redirect_to_app_deeplink_with_params(self):
assert self.response.status_code == 302
assert self.response['Location'] == '{}{}?token=ABXZ'.format(settings.APP_DEEPLINK_DOMAIN,
'/people/me/email-confirmation')
return self
class RedirectLoginEmailTestCase(TestCase):
def test_when_called_redirect_view_redirects_to_apps_url(self):
RedirectLoginEmailTestCase.ScenarioMaker() \
.when_call_login_email_redirect() \
.then_response_should_be_a_redirect_to_app_deeplink_with_params()
class ScenarioMaker:
def when_call_login_email_redirect(self):
client = Client()
self.response = client.get('{}?{}'.format(reverse('login-redirect'), 'token=ABXZ'))
return self
def then_response_should_be_a_redirect_to_app_deeplink_with_params(self):
assert self.response.status_code == 302
assert self.response['Location'] == '{}{}?token=ABXZ'.format(settings.APP_DEEPLINK_DOMAIN,
'/people/me/login')
return self
class RedirectExperienceTestCase(TestCase):
def test_when_there_is_a_dynamic_link_wraps_public_domain_url(self):
RedirectExperienceTestCase.ScenarioMaker() \
.given_an_experience_on_db(title='a', description='d', share_id='AsdE43E4', pic='url') \
.given_a_public_domain('http://pachatary.com') \
.given_a_dynamic_link('http://dynamic.link/link={}&other=param') \
.when_call_experience_redirect('AsdE43E4') \
.then_response_should_be_a_redirect_to(
'http://dynamic.link/link=http://pachatary.com/e/AsdE43E4&other=param'
'&st=a&sd=d&si=%2Fmedia%2Furl.small')
def test_when_there_is_no_dynamic_link_returns_deep_link(self):
RedirectExperienceTestCase.ScenarioMaker() \
.given_a_deep_link_domain('pachatary://app') \
.given_a_dynamic_link('') \
.when_call_experience_redirect('AsdE43E4') \
.then_response_should_be_a_redirect_to('pachatary://app/experiences/AsdE43E4')
class ScenarioMaker:
def given_an_experience_on_db(self, title, description, share_id, pic):
orm_person = ORMPerson.objects.create()
ORMProfile.objects.create(person=orm_person, username='u')
experience = ORMExperience.objects.create(title=title, description=description,
share_id=share_id, author=orm_person)
experience.picture = pic
experience.save()
return self
def given_a_public_domain(self, public_domain):
settings.PUBLIC_DOMAIN = public_domain
return self
def given_a_dynamic_link(self, dynamic_link):
settings.DYNAMIC_LINK = dynamic_link
return self
def given_a_deep_link_domain(self, deep_link_domain):
settings.APP_DEEPLINK_DOMAIN = deep_link_domain
return self
def when_call_experience_redirect(self, share_id):
client = Client()
self.response = client.get(reverse('experience-redirect', args=[share_id]))
return self
def then_response_should_be_a_redirect_to(self, url):
assert self.response.status_code == 302
assert self.response['Location'] == url
return self
class RedirectProfileTestCase(TestCase):
def test_when_there_is_a_dynamic_link_wraps_public_domain_url(self):
RedirectProfileTestCase.ScenarioMaker() \
.given_a_profile(username='a_b.c', bio='my info', pic='url') \
.given_a_public_domain('http://pachatary.com') \
.given_a_dynamic_link('http://dynamic.link/link={}&other=param') \
.when_call_profile_redirect('a_b.c') \
.then_response_should_be_a_redirect_to(
'http://dynamic.link/link=http://pachatary.com/p/a_b.c&other=param'
'&st=%40a_b.c&sd=my+info&si=%2Fmedia%2Furl.small')
def test_when_there_is_no_dynamic_link_returns_deep_link(self):
RedirectProfileTestCase.ScenarioMaker() \
.given_a_deep_link_domain('pachatary://app') \
.given_a_dynamic_link('') \
.when_call_profile_redirect('a_b.c') \
.then_response_should_be_a_redirect_to('pachatary://app/profiles/a_b.c')
class ScenarioMaker:
def given_a_profile(self, username, bio, pic):
orm_person = ORMPerson.objects.create()
profile = ORMProfile.objects.create(username=username, bio=bio, person=orm_person)
profile.picture = pic
profile.save()
return self
def given_a_public_domain(self, public_domain):
settings.PUBLIC_DOMAIN = public_domain
return self
def given_a_dynamic_link(self, dynamic_link):
settings.DYNAMIC_LINK = dynamic_link
return self
def given_a_deep_link_domain(self, deep_link_domain):
settings.APP_DEEPLINK_DOMAIN = deep_link_domain
return self
def when_call_profile_redirect(self, username):
client = Client()
self.response = client.get(reverse('profile-redirect', args=[username]))
return self
def then_response_should_be_a_redirect_to(self, url):
assert self.response.status_code == 302
assert self.response['Location'] == url
return self
class RedirectOpenTestCase(TestCase):
def test_when_there_is_a_dynamic_link_wraps_public_domain_url(self):
RedirectOpenTestCase.ScenarioMaker() \
.given_a_public_domain('http://pachatary.com') \
.given_a_dynamic_link('http://dynamic.link/link={}&other=param') \
.when_call_open_redirect() \
.then_response_should_be_a_redirect_to('http://dynamic.link/link=http://pachatary.com/open&other=param')
def test_when_there_is_no_dynamic_link_returns_deep_link(self):
RedirectOpenTestCase.ScenarioMaker() \
.given_a_deep_link_domain('pachatary://app') \
.given_a_dynamic_link('') \
.when_call_open_redirect() \
.then_response_should_be_a_redirect_to('pachatary://app/open')
class ScenarioMaker:
def given_a_public_domain(self, public_domain):
settings.PUBLIC_DOMAIN = public_domain
return self
def given_a_dynamic_link(self, dynamic_link):
settings.DYNAMIC_LINK = dynamic_link
return self
def given_a_deep_link_domain(self, deep_link_domain):
settings.APP_DEEPLINK_DOMAIN = deep_link_domain
return self
def when_call_open_redirect(self):
client = Client()
self.response = client.get(reverse('open-redirect'))
return self
def then_response_should_be_a_redirect_to(self, url):
assert self.response.status_code == 302
assert self.response['Location'] == url
return self
```
#### File: pachatary-api/scenes/validators.py
```python
from pachatary.exceptions import InvalidEntityException, EntityDoesNotExistException
class SceneValidator:
MIN_TITLE_LENGHT = 1
MAX_TITLE_LENGHT = 80
MIN_LATITUDE = -90
MAX_LATITUDE = +90
MIN_LONGITUDE = -180
MAX_LONGITUDE = +180
def __init__(self, experience_repo):
self.experience_repo = experience_repo
def validate_scene(self, scene):
if scene.title is None:
raise InvalidEntityException(source='title', code='empty_attribute', message='Title cannot be empty')
if type(scene.title) is not str:
raise InvalidEntityException(source='title', code='wrong_type', message='Title must be string')
if len(scene.title) < SceneValidator.MIN_TITLE_LENGHT or len(scene.title) > SceneValidator.MAX_TITLE_LENGHT:
raise InvalidEntityException(source='title', code='wrong_size',
message='Title must be between 1 and 80 chars')
if scene.description is not None and type(scene.description) is not str:
raise InvalidEntityException(source='description', code='wrong_type', message='Description must be string')
if scene.latitude is None:
raise InvalidEntityException(source='latitude', code='empty_attribute', message='Latitude cannot be empty')
if not isinstance(scene.latitude, (int, float, complex)):
raise InvalidEntityException(source='latitude', code='wrong_type', message='Latitude must be numeric')
if scene.latitude < SceneValidator.MIN_LATITUDE or scene.latitude > SceneValidator.MAX_LATITUDE:
raise InvalidEntityException(source='latitude', code='wrong_size',
message='Latitude must be between -90 and +90')
if scene.longitude is None:
raise InvalidEntityException(source='longitude', code='empty_attribute',
message='Longitude cannot be empty')
if not isinstance(scene.longitude, (int, float, complex)):
raise InvalidEntityException(source='longitude', code='wrong_type', message='Longitude must be numeric')
if scene.longitude < SceneValidator.MIN_LONGITUDE or scene.longitude > SceneValidator.MAX_LONGITUDE:
raise InvalidEntityException(source='longitude', code='wrong_size',
message='Longitude must be between -180 and +180')
if scene.experience_id is None:
raise InvalidEntityException(source='experience_id', code='empty_attribute',
message='Experience id cannot be empty')
try:
self.experience_repo.get_experience(scene.experience_id)
except EntityDoesNotExistException:
raise InvalidEntityException(source='experience_id', code='does_not_exist',
message='Experience does not exist')
return True
class ScenePermissionsValidator:
def __init__(self, scene_repo, experience_permissions_validator):
self.scene_repo = scene_repo
self.experience_permissions_validator = experience_permissions_validator
def validate_permissions(self, logged_person_id, has_permissions_to_modify_scene):
scene = self.scene_repo.get_scene(id=has_permissions_to_modify_scene)
return self.experience_permissions_validator.validate_permissions(
logged_person_id=logged_person_id,
has_permissions_to_modify_experience=scene.experience_id)
``` |
{
"source": "jordigilh/assisted-test-infra",
"score": 2
} |
#### File: test_infra/helper_classes/infra_env.py
```python
import json
import logging
import os
from typing import List, Optional
import test_infra.utils.waiting
from junit_report import JunitTestCase
from test_infra import consts, utils
from test_infra.assisted_service_api import InventoryClient, models
from test_infra.helper_classes.config import BaseInfraEnvConfig
from test_infra.helper_classes.nodes import Nodes
class InfraEnv:
def __init__(self, api_client: InventoryClient, config: BaseInfraEnvConfig, nodes: Optional[Nodes] = None):
self._config = config
self.nodes = nodes
self.api_client = api_client
try:
infra_env = self._create()
except BaseException:
logging.exception("create")
raise
self._config.infra_env_id = self.id = infra_env.id
def _create(self):
if self._config.ignition_config_override:
ignition_config_override = json.dumps(self._config.ignition_config_override)
else:
ignition_config_override = None
return self.api_client.create_infra_env(
self._config.entity_name.get(),
pull_secret=self._config.pull_secret,
ssh_public_key=self._config.ssh_public_key,
openshift_version=self._config.openshift_version,
cluster_id=self._config.cluster_id,
static_network_config=self._config.static_network_config,
ignition_config_override=ignition_config_override,
proxy=self._config.proxy,
)
def update_config(self, **kwargs):
"""
Note that kwargs can contain values for overriding BaseInfraEnvConfig arguments.
The name (key) of each argument must match to one of the BaseInfraEnvConfig arguments.
If key doesn't exists in config - KeyError exception is raised
"""
logging.info(f"Updating infra-env {self.id} configurations to {kwargs}")
for k, v in kwargs.items():
if not hasattr(self._config, k):
raise KeyError(f"The key {k} is not present in {self._config.__class__.__name__}")
setattr(self._config, k, v)
def prepare_infraenv(self, **kwargs):
self.update_config(**kwargs)
logging.info(f"Preparing for installation with infra-env configurations: infraenv_config={self._config}")
self.nodes.controller.log_configuration()
if self._config.download_image:
self.download_image(
iso_download_path=self._config.iso_download_path,
)
self.nodes.notify_iso_ready()
self.nodes.start_all()
self.wait_until_hosts_are_discovered(allow_insufficient=True)
@JunitTestCase()
def download_image(self, iso_download_path=None):
iso_download_path = iso_download_path or self._config.iso_download_path
# ensure file path exists before downloading
if not os.path.exists(iso_download_path):
utils.recreate_folder(os.path.dirname(iso_download_path), force_recreate=False)
self.api_client.download_infraenv_image(
infraenv_id=self.id,
image_path=iso_download_path,
)
@JunitTestCase()
def wait_until_hosts_are_discovered(self, nodes_count: int, allow_insufficient=False):
statuses = [consts.NodesStatus.KNOWN_UNBOUND]
if allow_insufficient:
statuses.append(consts.NodesStatus.INSUFFICIENT_UNBOUND)
test_infra.utils.waiting.wait_till_all_infra_env_hosts_are_in_status(
client=self.api_client,
infra_env_id=self.id,
nodes_count=nodes_count,
statuses=statuses,
timeout=consts.NODES_REGISTERED_TIMEOUT,
)
def update_host(self, host_id: str, host_role: Optional[str] = None, host_name: Optional[str] = None):
self.api_client.update_host(infra_env_id=self.id, host_id=host_id, host_role=host_role, host_name=host_name)
def bind_host(self, host_id: str, cluster_id: str) -> None:
self.api_client.bind_host(infra_env_id=self.id, host_id=host_id, cluster_id=cluster_id)
def unbind_host(self, host_id: str) -> None:
self.api_client.unbind_host(infra_env_id=self.id, host_id=host_id)
def delete_host(self, host_id: str) -> None:
self.api_client.deregister_host(infra_env_id=self.id, host_id=host_id)
def get_discovery_ignition(self) -> str:
return self.api_client.get_discovery_ignition(infra_env_id=self.id)
def patch_discovery_ignition(self, ignition_info: str) -> str:
self.api_client.patch_discovery_ignition(infra_env_id=self.id, ignition_info=ignition_info)
def get_details(self) -> models.infra_env.InfraEnv:
return self.api_client.get_infra_env(infra_env_id=self.id)
def update_proxy(self, proxy: models.Proxy) -> None:
self.update_config(proxy=proxy)
infra_env_update_params = models.InfraEnvUpdateParams(proxy=self._config.proxy)
self.api_client.update_infra_env(infra_env_id=self.id, infra_env_update_params=infra_env_update_params)
def select_host_installation_disk(self, host_id: str, disk_paths: List[dict]) -> None:
self.api_client.select_installation_disk(infra_env_id=self.id, host_id=host_id, disk_paths=disk_paths)
``` |
{
"source": "jordij/menorkayak",
"score": 3
} |
#### File: core/templatetags/core_tags.py
```python
from django.template import Library, TemplateSyntaxError
register = Library()
class RomanError(Exception):
pass
class OutOfRangeError(RomanError):
pass
class NotIntegerError(RomanError):
pass
ROMAN_NUMBER_MAP = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def to_roman(n):
"""convert integer to Roman numeral"""
if not isinstance(n, int):
try:
n = int(n)
except ValueError:
raise NotIntegerError("non-integers cannot be converted")
if not (0 < n < 4000):
raise OutOfRangeError("number out of range (must be 1..3999)")
result = ""
for numeral, integer in ROMAN_NUMBER_MAP:
while n >= integer:
result += numeral
n -= integer
return result
@register.filter
def roman_number(value):
"""
Converts a number to its roman value
Example usage::
{{ 2007|roman_number }}
{{ "2007"|roman_number }}
{{ pub_date|date:"Y"|roman_number }}
"""
try:
value = to_roman(value)
except RomanError as e:
raise TemplateSyntaxError("roman_number error: %s" % str(e))
return value
``` |
{
"source": "jordij/nzhuts",
"score": 2
} |
#### File: nzhuts/core/signals.py
```python
from django.db.models.signals import post_save
from django.dispatch import receiver
from nzhuts.core.models import HutPage, HutPageFacility
@receiver(post_save, sender=HutPage)
def post_hut_save(sender, instance, **kwargs):
existing_facilities = instance.facilities.all()
# remove tags that don't exist anymore
for existing_facility in existing_facilities:
if existing_facility.name not in instance.raw_facilities:
instance.facilities.remove(existing_facility)
# add tags
if instance.raw_facilities:
for facility in instance.raw_facilities:
instance.facilities.add(facility)
``` |
{
"source": "JordiManyer/bddc",
"score": 2
} |
#### File: src/libs_v0/bddc.py
```python
import numpy as np
import math
import scipy.sparse.linalg
from scipy.sparse import csr_matrix as csr
from scipy.sparse import bmat
# Class containing all the data that should be distributed per proc
class fineProc():
def __init__(self):
self.nI = 0 # Number of interior nodes
self.nB = 0 # Number of interface nodes
self.nC = 0 # Number of local constraints
self.nodesI = [] # List of interor nodes
self.nodesB = [] # List of interface nodes
self.constr = [] # List of constraints
self.Aii = None # Interior-Interior matrix
self.Aib = None # Interior-Interface matrix
self.Abb = None # Interface-Interface matrix
self.C = None # Constraints matrix
self.Wc = None # Weights for constraints
self.Wb = None # Weights for interface nodes
self.Phi = None # Local coarse eigenvectors
self.Lambda = None # Local coarse lagrange multipliers
self.invAii = None
self.invFine = None
class coarseProc():
def __init__(self):
self.nC = 0 # Total number of constraints
self.S0 = None # Coarse system matrix
class bddc():
def __init__(self, n, A, mesh):
self.n = n
self.nP = mesh.nP
self.fine = []
for i in range(self.nP):
self.fine.append(fineProc())
self.initFine(n, A, mesh)
self.coarse = coarseProc()
self.initCoarse(n, A, mesh)
def initFine(self, n, A, mesh):
# Collect local indexes
for k in range(n):
if (mesh.parts[k] == -2 ): # Interface node - edge
for j in range(len(mesh.nodeMap[k])):
iP = mesh.parts[mesh.nodeMap[k][j]]
if ( iP >= 0 and k not in self.fine[iP].nodesB):
self.fine[iP].nB += 1
self.fine[iP].nodesB.append(k)
elif (mesh.parts[k] == -3 ): # Interface node - corner
for j in mesh.nodeMap[k]:
for i in mesh.nodeMap[j]:
iP = mesh.parts[i]
if ( iP >= 0 and k not in self.fine[iP].nodesB):
self.fine[iP].nB += 1
self.fine[iP].nodesB.append(k)
else: # Interior node
iP = mesh.parts[k]
self.fine[iP].nI += 1
self.fine[iP].nodesI.append(k)
# Collect local matrices
for i in range(self.nP):
self.fine[i].Aii = A[np.ix_(self.fine[i].nodesI,self.fine[i].nodesI)]
self.fine[i].Aib = A[np.ix_(self.fine[i].nodesI,self.fine[i].nodesB)]
self.fine[i].Abb = A[np.ix_(self.fine[i].nodesB,self.fine[i].nodesB)]
# Manage constraints: Select local objects and create C
for i in range(mesh.nO):
for j in range(len(mesh.objects[i].parts)):
iP = mesh.objects[i].parts[j]
self.fine[iP].nC += 1
self.fine[iP].constr.append(i)
for i in range(self.nP):
rows = np.zeros(self.fine[i].nC+1,dtype=int)
cols = np.zeros(self.fine[i].nB,dtype=int)
data = np.zeros(self.fine[i].nB)
for j in range(self.fine[i].nC):
iC = self.fine[i].constr[j]
nNodes = len(mesh.objects[iC].nodes)
rows[j+1] = rows[j]
for k in range(nNodes):
local_index = self.fine[i].nodesB.index(mesh.objects[iC].nodes[k])
cols[rows[j+1]] = local_index
data[rows[j+1]] = 1/nNodes # Mean along the object
rows[j+1] += 1
self.fine[i].C = csr((data,cols,rows), shape=(self.fine[i].nC,self.fine[i].nB))
# Invert local problems
for i in range(self.nP):
self.fine[i].invAii = scipy.sparse.linalg.factorized(self.fine[i].Aii)
Aaux = bmat([[self.fine[i].Aii , self.fine[i].Aib , None ] ,
[self.fine[i].Aib.transpose() , self.fine[i].Abb , self.fine[i].C.transpose() ] ,
[None , self.fine[i].C , None ] ])
self.fine[i].invFine = scipy.sparse.linalg.factorized(Aaux)
return
def initCoarse(self, n, A, mesh):
# Get weights for interface nodes
for i in range(self.nP):
self.fine[i].Wc = np.zeros((self.fine[i].nC,1))
self.fine[i].Wb = np.zeros((self.fine[i].nB,1))
for j in range(self.fine[i].nC): # Weighting with partition count
self.fine[i].Wc[j] = 1.0/len(mesh.objects[self.fine[i].constr[j]].parts)
for k in mesh.objects[self.fine[i].constr[j]].nodes:
self.fine[i].Wb[self.fine[i].nodesB.index(k)] = self.fine[i].Wc[j]
# Get local eigenvectors from Newmann problem
for i in range(self.nP):
self.fine[i].Phi = np.zeros((self.fine[i].nB, self.fine[i].nC))
self.fine[i].Lambda = np.zeros((self.fine[i].nC, self.fine[i].nC))
for j in range(self.fine[i].nC):
x = np.zeros(self.fine[i].nI + self.fine[i].nB + self.fine[i].nC)
x[self.fine[i].nI + self.fine[i].nB + j] = 1.0
y = self.fine[i].invFine(x)
self.fine[i].Phi[:,j] = y[self.fine[i].nI:self.fine[i].nI+self.fine[i].nB]
self.fine[i].Lambda[:,j] = y[self.fine[i].nI+self.fine[i].nB:]
# Assemble coarse problem
# TODO: This should be done directly in CSR format, using a connectivity graph
# to find the fill-in structure.
self.coarse.nC = mesh.nO
S0 = np.zeros((self.coarse.nC,self.coarse.nC))
for i in range(self.nP):
S0i = -self.fine[i].Phi.transpose()@self.fine[i].C.transpose()@self.fine[i].Lambda
for j in range(self.fine[i].nC): # Weighting with partition count
S0i[j,:] *= self.fine[i].Wc[j]
S0i[:,j] *= self.fine[i].Wc[j]
S0[np.ix_(self.fine[i].constr,self.fine[i].constr)] += S0i
self.coarse.S0 = csr(S0)
# Factorize coarse system
self.coarse.invS0 = scipy.sparse.linalg.factorized(self.coarse.S0)
return
def interiorCorrection(self, r):
z = np.zeros((self.n,1))
for i in range(self.nP):
z[self.fine[i].nodesI] = self.fine[i].invAii(r[self.fine[i].nodesI])
return z
def applyBDDC(self, r):
z = np.zeros((self.n,1))
# First interior correction
for i in range(self.nP):
z[self.fine[i].nodesI] = self.fine[i].invAii(r[self.fine[i].nodesI])
r[self.fine[i].nodesB] -= self.fine[i].Aib.transpose() @ z[self.fine[i].nodesI]
# Fine Correction
for i in range(self.nP):
x = np.zeros((self.fine[i].nI + self.fine[i].nB + self.fine[i].nC,1))
x[self.fine[i].nI:self.fine[i].nI+self.fine[i].nB] = r[self.fine[i].nodesB]
y = self.fine[i].invFine(x)
z[self.fine[i].nodesB] += self.fine[i].Wb * y[self.fine[i].nI:self.fine[i].nI+self.fine[i].nB]
# Coarse Correction
r0 = np.zeros((self.coarse.nC,1))
for i in range(self.nP):
r0i = self.fine[i].Phi.transpose()@r[self.fine[i].nodesB]
r0[self.fine[i].constr] += self.fine[i].Wc * r0i
z0 = self.coarse.invS0(r0)
for i in range(self.nP):
z0i = self.fine[i].Phi @ z0[self.fine[i].constr]
z[self.fine[i].nodesB] += self.fine[i].Wb * z0i
# Second interior correction
for i in range(self.nP):
aux = self.fine[i].Aib @ z[self.fine[i].nodesB]
z[self.fine[i].nodesI] -= self.fine[i].invAii(aux)
return z.reshape((z.size,1))
```
#### File: src/libs_v0/mesh.py
```python
import numpy as np
import math
from collections import deque
class object():
def __init__(self):
self.nodes = [] # List of nodes
self.parts = [] # List of adjacent partitions
self.type = -1 # -1 :: undefined, 0 :: vertex, 1:: edge, 2:: face
class mesh():
def __init__(self, nodeAdjMap, partition):
self.n = partition.size
self.nodeMap = nodeAdjMap # Node adjacency map, i.e nodeMap[i] = {Neighbors of node i}
self.nP = np.amax(partition) + 1 # Number of partitions
self.parts = partition # Mesh partition
self.nI = np.zeros(self.nP) # Number of interior DOFs for each partition
self.nB = 0 # Number of DOFs in the interface
self.nO = 0 # Number of objects
self.objects = [] # List of objects
self.getObjects()
def getObjects(self):
# First loop: Catalog DOF data
numNbors = np.zeros(self.n,dtype=int)
bNodes = deque()
for i in range(self.n):
if (self.parts[i] == -1):
bNodes.append(i)
self.nB += 1
for j in range(len(self.nodeMap[i])):
if (self.parts[self.nodeMap[i][j]] == -1):
numNbors[i] += 1
else:
self.nI[self.parts[i]] += 1
# Second loop: Group interface nodes into objects
while (len(bNodes) != 0): # While not all have been visited
obj = object()
q = deque()
q.append(bNodes.pop()) # Starting point
while (len(q) != 0): # While nodes remain in the queue
k = q.pop()
if (self.parts[k] == -1): # If the node has not been visited
parts = [] # Neighboring partitions to this node
for j in range(len(self.nodeMap[k])):
if (self.parts[self.nodeMap[k][j]] >= 0):
parts.append(self.parts[self.nodeMap[k][j]])
if (obj.nodes == []): # This is the first node in the object
obj.nodes.append(k) # Add node to object
obj.parts = parts # Add parts to object
self.parts[k] = -2 # Mark the node as visited
for j in range(len(self.nodeMap[k])): # Add neighbors to queue as candidates
if (self.parts[self.nodeMap[k][j]] == -1):
q.append(self.nodeMap[k][j])
elif (len(obj.parts) == len(parts)): # This is NOT the first node in the object
obj.nodes.append(k) # Add node to object
self.parts[k] = -2 # Mark the node as visited
for j in range(len(self.nodeMap[k])): # Add neighbors to queue as candidates
if (self.parts[self.nodeMap[k][j]] == -1):
q.append(self.nodeMap[k][j])
# When no more nodes remain in the queue, add object if not empty
if (len(obj.nodes) != 0):
self.nO += 1
self.objects.append(obj)
# Third loop: Object classification (faces/edges/corners)
for i in range(self.nO):
if (len(self.objects[i].nodes) == 1): # corner
self.objects[i].type = 0
c = self.objects[i].nodes[0]
self.parts[c] = -3
for j in self.nodeMap[c]:
for k in self.nodeMap[j]:
if (self.parts[k] >= 0 and self.parts[k] not in self.objects[i].parts):
self.objects[i].parts.append(self.parts[k])
else : # edge
self.objects[i].type = 1
return
```
#### File: bddc/src/main_checkObjects.py
```python
import sys
sys.path.append("./libs_v1")
import numpy as np
from scipy.sparse import csr_matrix as csr
from scipy.sparse.linalg import spsolve as spsolve
import matplotlib.pyplot as plt
import alyaIO as IO
from fineProc import *
from coarseProc import *
from bddc import *
from pcg import *
def getGlobalOrdering(nP, fine):
# Initialise
for iP in range(nP):
fine[iP].globalTag = np.zeros(fine[iP].n,dtype=int)
counter = 0
# Interior edges
for iP in range(nP):
nI = fine[iP].nI
fine[iP].globalTag[:nI] = np.arange(counter,counter+nI)
counter += nI
# Boundary edges
d = {}
for iP in range(nP):
nI = fine[iP].nI
nB = fine[iP].nB
for iB in range(nI,nI+nB):
iB_com = np.where(fine[iP].com_loc == iB)[0][0]
iB_glob = fine[iP].com_glob[iB_com]
if (iB_glob in d.keys()) :
fine[iP].globalTag[iB] = d[iB_glob]
else :
fine[iP].globalTag[iB] = counter
d[iB_glob] = counter
counter += 1
# Get global tags for interior/boundary dofs only
for iP in range(nP):
fine[iP].nodesI = fine[iP].globalTag[:fine[iP].nI]
fine[iP].nodesB = fine[iP].globalTag[fine[iP].nI:]
###############################################################################
###############################################################################
inputDir = 'DOF166796'
inputPath = '../input/' + inputDir
caseName = ''
nP = 0
if (inputDir == 'DOF40' or inputDir == 'DOF3564' ):
caseName = 'wire-TRI03'
nP = 4
elif (inputDir == 'DOF33062'):
caseName = 'TS-TAPE-MIXED-ANG'
nP = 8
elif (inputDir == 'DOF166796' ):
caseName = 'TS-SLAB-HEX08-BULK'
nP = 15
elif (inputDir == 'DOF669780' ):
caseName = 'TS-SLAB-HEX08-BULK'
nP = 23
# Read data from Alya
fine = []
print('> READING INPUTS')
for i in range(nP):
fine.append(fineProc())
IO.readFineProc(fine[i], inputDir, caseName, i)
fine[i].getObjects()
fine[i].getConstraints()
coarse = coarseProc()
coarse.createGlobalOrdering(nP,fine)
getGlobalOrdering(nP, fine)
# List of object Global ID's for each fine proc.
Olist=np.empty(nP,dtype=object)
for iP in range(nP):
Olist[iP] = coarse.com_obj[coarse.com_size[iP]:coarse.com_size[iP+1]]
# print('>> Object classification: ')
# for iP in range(nP):
# print(' >> Proc ', iP, ': ', Olist[iP])
# print('>> Object ID per proc: ')
# for iP in range(nP):
# print(' >> Proc ', iP, ': ')
# print(' >> Edg = ', fine[iP].globalTag)
# print(' >> ID1 = ', fine[iP].obj_id1[:fine[iP].nO])
# print(' >> ID2 = ', fine[iP].obj_id2[:fine[iP].nO])
# for iO in range(fine[iP].nO):
# print(' >> ', fine[iP].obj_dofs[fine[iP].obj_size[iO]:fine[iP].obj_size[iO+1]])
print('>> Checking pathological cases: ')
if (fine[0].dim == 3):
for iP in range(nP):
# For each boundary dof, list of neighboring processors (inverse of the com_XXX arrays).
numProcs = np.zeros(fine[iP].nB,dtype=int)
procs = -np.ones((fine[iP].nB*fine[iP].nP),dtype=int)
for iP2 in range(fine[iP].nP):
for e in range(fine[iP].com_size[iP2],fine[iP].com_size[iP2+1]):
ie = fine[iP].com_loc[e]-fine[iP].nI
procs[ie*fine[iP].nP + numProcs[ie]] = fine[iP].procs[iP2]
numProcs[ie] += 1
# For each local object, list of neighboring processors.
objProcs = []
for iO in range(fine[iP].nO):
edges = fine[iP].obj_dofs[fine[iP].obj_size[iO]:fine[iP].obj_size[iO+1]]
ie = edges[0] - fine[iP].nI
objProcs.append(set(procs[ie*fine[iP].nP:ie*fine[iP].nP+numProcs[ie]]))
# For each local object
for iO in range(fine[iP].nO):
edges = fine[iP].obj_dofs[fine[iP].obj_size[iO]:fine[iP].obj_size[iO+1]]
signs = fine[iP].obj_sign[fine[iP].obj_size[iO]:fine[iP].obj_size[iO+1]]
nodes = fine[iP].edgeNodes[edges,:]
# For each edge in this local object
for e in edges:
ie = e - fine[iP].nI
pe = set(procs[ie*fine[iP].nP:ie*fine[iP].nP+numProcs[ie]])
# A) Make sure all object edges have the same neighboring processors.
if (objProcs[iO] != pe):
print('>>> ERROR :: Obj ', iO, ' and edge ', e, ' have diff neighboring procs.')
print(' > pe = ', pe)
print(' > pobj = ', objProcs[iO])
# B) Make sure all connected face edges share a subset of the neighboring processors.
# C) Make sure no face edge is connected twice (weird things could hapen...)
nbors = fine[iP].eemap[fine[iP].eemap_size[e]:fine[iP].eemap_size[e+1]]
for e2 in nbors:
ie2 = e2 - fine[iP].nI
if (ie2 >= 0 and numProcs[ie2] == 1): # If neighbor == face edge
connected = (fine[iP].obj_node[ie2 ,0] != -1) or (fine[iP].obj_node[ie2 ,1] != -1)
pe2 = set(procs[ie2*fine[iP].nP:ie2*fine[iP].nP+numProcs[ie2]])
if (connected and not pe2.issubset(pe)):
print('>>> ERROR :: Different neighboring procs for obj ', iO, ' and edges [', e, ',', e2,']')
print(' > pe = ', pe)
print(' > pe2 = ', pe2)
two_connected = (fine[iP].obj_node[ie2 ,0] != -1) and (fine[iP].obj_node[ie2 ,1] != -1)
if (two_connected):
print('>>> WARNING :: Face edge ', e2, ' is double connected.')
# D) If two objects share the same neighboring processors, make sure they are disconnected.
# WARNING:: This is incorrect, I think.... TODO: Revise this
for iO2 in range(fine[iP].nO):
if (iO2 != iO and objProcs[iO] == objProcs[iO2]):
edges2 = fine[iP].obj_dofs[fine[iP].obj_size[iO2]:fine[iP].obj_size[iO2+1]]
nodes2 = fine[iP].edgeNodes[edges2,:]
for dof in nodes:
if (dof in nodes2):
print('>>> ERROR :: Objects [', iO, ',', iO2,'] share nbors but are connected.')
print('>> Checking object global ordering: ')
for iO in range(coarse.nO):
procs = []
iprocs = []
for iP in range(nP):
if (iO in Olist[iP]):
procs.append(iP)
iprocs.append(np.where(Olist[iP] == iO)[0][0])
edges = fine[procs[0]].obj_dofs[fine[procs[0]].obj_size[iprocs[0]]:fine[procs[0]].obj_size[iprocs[0]+1]]
signs = fine[procs[0]].obj_sign[fine[procs[0]].obj_size[iprocs[0]]:fine[procs[0]].obj_size[iprocs[0]+1]]
nodes = fine[procs[0]].edgeNodes[edges,:]
perm = np.argsort(fine[procs[0]].globalTag[edges])
g_edges = np.sort(fine[procs[0]].globalTag[edges])
g_signs = signs[perm]
g_nodes = nodes[perm,:]
for iP in range(len(procs)):
edges2 = fine[procs[iP]].obj_dofs[fine[procs[iP]].obj_size[iprocs[iP]]:fine[procs[iP]].obj_size[iprocs[iP]+1]]
signs2 = fine[procs[iP]].obj_sign[fine[procs[iP]].obj_size[iprocs[iP]]:fine[procs[iP]].obj_size[iprocs[iP]+1]]
nodes2 = fine[procs[iP]].edgeNodes[edges2,:]
perm2 = np.argsort(fine[procs[iP]].globalTag[edges2])
g_edges2 = np.sort(fine[procs[iP]].globalTag[edges2])
g_signs2 = signs2[perm2]
g_nodes2 = nodes2[perm2,:]
if ((g_edges != g_edges2).any()):
print('>>> ERROR :: Different edges for object ', iO, ' and procs [', procs[0], ',', procs[iP],']')
print(' ', g_edges, g_edges2)
if ((g_signs != g_signs2).any()):
print('>>> ERROR :: Different signs for object ', iO, ' and procs [', procs[0], ',', procs[iP],']')
print(' ', g_edges, g_signs)
print(' ', g_edges2, g_signs2)
print(g_nodes)
print(g_nodes2)
``` |
{
"source": "jordimarinvalle/pyd3",
"score": 3
} |
#### File: pyd3/pyd3/id3gateway.py
```python
import os
import sys
import mimetypes
from mutagen.id3 import ID3, TRCK, TALB, TPE1, TPE2, TIT2, TCON, TDRC, COMM, TCMP, APIC
stdout_encoding = sys.stdout.encoding
class Id3Gw():
f = ''
id3 = None
def __init__(self, f):
self.f = f
self.id3 = self.open(f)
def open(self, f):
"""
Open an audio file to get and/or set data into it's ID3.
Arguments:
:param f: string e.g.: /path/to/file/file.mp3
:return: mutagen ID3 object
"""
try: return ID3(f)
except: return ID3()
def delete(self):
"""Delete ID3 data."""
self.id3.delete()
def save(self, f=''):
"""Save ID3 data."""
if not f: f = self.f
try:
self.id3.save(filename=f, v1=2)
except:
raise Exception ("Except error: ID3 from file %s was NOT saved." %(os.basename(self.f)))
def get_id3(self):
"""
Get basic ID3 data.
:return: dictionary with basic ID3 data such as trackn, album, artist, title, genre, year, comment, band, compilation.
"""
return {
'trackn': self.get_trackn(),
'album': self.get_album(),
'artist': self.get_artist(),
'title': self.get_title(),
'genre': self.get_genre(),
'year': self.get_year(),
'comment': self.get_comment(),
'band': self.get_band(),
'compilation': self.get_compilation(),
}
def set_id3_tag_tune(self, id3_tag, id3_tag_value):
case = {
'trackn': self.set_trackn,
'album' : self.set_album,
'artist' : self.set_artist,
'band' : self.set_band,
'title' : self.set_title,
'genre' : self.set_genre,
'year' : self.set_year,
'comment': self.set_comment,
'compilation': self.set_compilation,
}
case[id3_tag](id3_tag_value)
def set_trackn(self, value):
"""
TRCK Track number/Position.
Arguments:
:param value: string -- value to set on mutagen object
"""
self.id3.add(TRCK(encoding=3, text=value.decode(stdout_encoding)))
def set_album(self, value):
"""
TALB Album/Movie/Show title.
Arguments:
:param value: string -- value to set on mutagen object
"""
self.id3.add(TALB(encoding=3, text=value.decode(stdout_encoding)))
def set_artist(self, value):
"""
TPE1 Lead performer(s)/Soloist(s).
Arguments:
:param value: string -- value to set on mutagen object
"""
self.id3.add(TPE1(encoding=3, text=value.decode(stdout_encoding)))
def set_band(self, value):
"""
TPE2 Band/orchestra/accompaniment.
Arguments:
:param value: string -- value to set on mutagen object
"""
if value is "VA":
self.set_compilation("1")
self.id3.add(TPE2(encoding=3, text=value.decode(stdout_encoding)))
def set_compilation(self, value):
"""
TPE2 Compilation.
Arguments:
:param value: string -- value to set on mutagen object
"""
self.id3.add(TCMP(encoding=3, text=value.decode(stdout_encoding)))
def set_title(self, value):
"""
TIT2 Title/songname/content description.
Arguments:
:param value: string -- value to set on mutagen object
"""
self.id3.add(TIT2(encoding=3, text=value.decode(stdout_encoding)))
def set_genre(self, value):
"""
TCON Content type.
Arguments:
:param value: string -- value to set on mutagen object
"""
self.id3.add(TCON(encoding=3, text=value.decode(stdout_encoding)))
def set_year(self, value):
"""
TDRC Year (replaced for TYER in v2.3).
Arguments:
:param value: string -- value to set on mutagen object
"""
self.id3.add(TDRC(encoding=3, text=value.decode(stdout_encoding)))
def set_comment(self, text):
"""
COMM User defined text information frame
Arguments:
:param text: string -- text to set on mutagen object
:return: mutagen COMM object
"""
text = 'Add PyD3 into your life.'
desc = 'PyD3'
self.id3.add(COMM(encoding=3, text=value.decode(stdout_encoding)), desc=desc)
def set_picture(self, f, type, encoding=3):
"""
APIC image
Attributes for mutagen.id3.APIC:
encoding -- text encoding for the description
mime -- a MIME type (e.g. image/jpeg) or '-->' if the data is a URI
type -- the source of the image (3 is the album front cover)
desc -- a text description of the image
data -- raw image data, as a byte string
Arguments:
:param f : string -- image file path to set/add on mutagen object
:param type : integer -- value which determine the assignement of the image given to id3 data:
The metadata can also contain images of the following types:
cover (front) = 3, cover (back) = 4, Media (e.g. label side of CD) = 6, ....
:param encoding : integer -- encoding type
:return: mutagen APIC object
"""
return self.id3.add(
APIC(encoding = encoding,
mime = mimetypes.guess_type(f)[0],
type = type,
desc = str(type),
data = open(f).read()
)
)
def get_trackn(self):
"""
Get TRCK Track number/Position from a ID3.
:return: unicode track number
"""
value = str(self.id3.get('TRCK', ''))
if '/' in value:
value = value.split('/')[0]
try:
value = str(int(value))
except ValueError:
pass
return unicode(value)
def get_album(self):
"""
Get TALB Album/Movie/Show title from a ID3.
:return: unicode album
"""
return unicode(self.id3.get('TALB', ''))
def get_artist(self):
"""
Get TPE1 Lead performer(s)/Soloist(s) from a ID3.
:return: unicode artist
"""
return unicode(self.id3.get('TPE1', ''))
def get_band(self):
"""
Get TPE2 Band/orchestra/accompaniment from a ID3.
:return: unicode band
"""
#if value is "VA":
# self.set_compilation("1")
return unicode(self.id3.get('TPE2', ''))
def get_compilation(self):
"""
Get TCMP Compilation from a ID3.
:return: unicode compilation
"""
return unicode(self.id3.get('TCMP', ''))
def get_title(self):
"""
Get TIT2 Title/songname/content description from a ID3.
:return: unicode title
"""
return unicode(self.id3.get('TIT2', ''))
def get_genre(self):
"""
Get TCON Content type from a ID3.
:return: unicode genre
"""
return unicode(self.id3.get('TCON', ''))
def get_year(self):
"""
Get TDRC Year (replaced for TYER in v2.3) from a ID3.
:return: unicode year
"""
return unicode(self.id3.get('TDRC', ''))
def get_comment(self):
"""
Get COMM User defined text information frame from a ID3.
:return: unicode comment
"""
return unicode('Add PyD3 into your life.')
def get_picture(self):
"""
Get APIC Attached picture from a ID3.
:return: binary image
"""
return self.id3.get('APIC', None)
#def __unicode__(self):
# self.get_id3()
```
#### File: pyd3/pyd3/slugy.py
```python
import string
import unicodedata
def get_valid_chars():
"""Get valid characters.
Valid characters are the ones which are on the following list:
-_()[] abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
Return: string
"""
return "-_()[] %s%s" % (string.ascii_letters, string.digits)
def get_unicode_nde_str(string_of_chars):
"""
Get a unicode normalized string (NFKD).
Also the string is decoded (UTF8) and encoded (ASCII).
Arguments:
:param str: string
Return: unicode string
"""
return unicodedata.normalize('NFKD', string_of_chars.decode('UTF-8', 'ignore')).encode('ASCII', 'ignore')
def slugy(string_of_chars, separator = '_', lower=True):
"""
Transform a string to a slugy string.
Slugy string will only can contain ascii character and few more valid characters
-- such as middle dash, underscore, round brackets, space.
Characters which don't complain conditions will be removed.
In case that separator param is set, words are splitted by it.
In case that lower param is not set, string will be returned on lowercase mode.
Arguments:
:param chars: string
:param separator: string -- (a single char fits best). '_' char as default value.
:param lower: boolean -- True as default value.
"""
slugy_string = ""
try:
u_chars = ""
valid_chars = get_valid_chars()
string_of_chars = str(string_of_chars) #We can't assume that it will be a string ;)
u_chars = get_unicode_nde_str(string_of_chars)
except UnicodeEncodeError:
for char in string_of_chars:
try: u_chars += get_unicode_nde_str(char)
except: pass
string_of_chars = string.replace(str(u_chars), " ", separator)
slugy_string = ''.join(char for char in string_of_chars if char in valid_chars).strip()
return slugy_string.lower() if lower else slugy_string
``` |
{
"source": "jordimarinvalle/requeues",
"score": 3
} |
#### File: requeues/pimpamqueues/simplequeue.py
```python
import redis
from pimpamqueues import QUEUE_COLLECTION_OF_ELEMENTS
from pimpamqueues import Tools
from pimpamqueues.exceptions import PimPamQueuesError
from pimpamqueues.exceptions import PimPamQueuesElementWithoutValueError
class SimpleQueue(object):
'''
A lightweight queue. Simple Queue.
'''
QUEUE_TYPE_NAME = 'simple'
def __init__(self, id_args, collection_of=QUEUE_COLLECTION_OF_ELEMENTS,
keep_previous=True, redis_conn=None):
'''
Create a SimpleQueue object.
Arguments:
:id_args -- list, list's values will be used to name the queue
:collection_of -- string (default: QUEUE_COLLECTION_OF_ELEMENTS),
a type descriptor of queued elements
:keep_previous -- boolean (default: true),
a flag to create a fresh queue or not
:redis_conn -- redis.client.Redis (default: None), a redis
connection will be created using the default
redis.client.Redis connection params.
'''
self.id_args = id_args
self.collection_of = collection_of
if redis_conn is None:
redis_conn = redis.Redis()
self.redis = redis_conn
self.key_queue = self.get_key_queue()
if keep_previous is False:
self.delete()
def __str__(self):
'''
Return a string representation of the class.
Returns: string
'''
return '<SimpleQueue: %s (%s)>' % (self.key_queue, self.num())
def get_key_queue(self):
'''
Get a key id that will be used to store/retrieve data from
the redis server.
Returns: string
'''
return 'queue:%s:type:%s:of:%s' % ('.'.join(self.id_args),
SimpleQueue.QUEUE_TYPE_NAME,
self.collection_of)
def push(self, element, to_first=False):
'''
Push a element into the queue. Element can be pushed to the first or
last position (by default is pushed to the last position).
Arguments:
:element -- string
:to_first -- boolean (default: False)
Raise:
:PimPamQueuesElementWithoutValueError, if element has not a value
Returns: long, the number of queued elements
'''
if element in ('', None):
raise PimPamQueuesElementWithoutValueError()
return self.push_some([element, ], to_first)
def push_some(self, elements, to_first=False, num_block_size=None):
'''
Push a bunch of elements into the queue. Elements can be pushed to the
first or last position (by default are pushed to the last position).
Arguments:
:elements -- a collection of strings
:to_first -- boolean (default: false)
:num_block_size -- integer (default: none)
Returns: long, the number of queued elements
'''
try:
elements = list(elements)
if to_first:
elements.reverse()
block_slices = Tools.get_block_slices(
num_elements=len(elements),
num_block_size=num_block_size
)
pipe = self.redis.pipeline()
for s in block_slices:
some_elements = elements[s[0]:s[1]]
if to_first:
pipe.lpush(self.key_queue, *some_elements)
else:
pipe.rpush(self.key_queue, *some_elements)
return pipe.execute().pop()
except Exception as e:
raise PimPamQueuesError(e.message)
def pop(self, last=False):
'''
Pop a element from the queue. Element can be popped from the begining
or the ending of the queue (by default pops from the begining).
If no element is poped, it returns None
Arguments:
:last -- boolean (default: false)
Returns: string, the popped element, or, none, if no element is popped
'''
if last:
return self.redis.rpop(self.key_queue)
return self.redis.lpop(self.key_queue)
def num(self):
'''
Get the number of elements that are queued.
Returns: integer, the number of elements that are queued
'''
return self.redis.llen(self.key_queue)
def is_empty(self):
'''
Check if the queue is empty.
Returns: boolean, true if queue is empty, otherwise false
'''
return True if self.num() is 0 else False
def is_not_empty(self):
'''
Check if the queue is not empty.
Returns: boolean, true if queue is not empty, otherwise false
'''
return not self.is_empty()
def elements(self, queue_from=0, queue_to=-1):
'''
Get some (or even all) queued elements, by the order that they are
queued. By default it returns all queued elements.
Note
====
Elements are not popped.
Arguments:
:queue_from -- integer (default: 0)
:queue_to -- integer (default: -1)
Returns: list
'''
return self.redis.lrange(self.key_queue, queue_from, queue_to)
def first_elements(self, num_elements=10):
'''
Get the N first queued elements, by the order that they are
queued. By default it returns the first ten elements.
Note
====
Elements are not popped.
Arguments:
:num_elements -- integer (default: 10)
Returns: list
'''
queue_to = num_elements - 1
return self.elements(queue_to=queue_to)
def remove(self, element):
'''
Remove a element from the queue.
Arguments:
:element -- string
Returns: boolean, return true if element was removed, otherwise false
'''
return True if self.redis.lrem(self.key_queue, element) else False
def delete(self):
'''
Delete the queue with all its elements.
Returns: boolean, true if queue has been deleted, otherwise false
'''
return True if self.redis.delete(self.key_queue) else False
``` |
{
"source": "jordimarinvalle/tictactoexxl",
"score": 3
} |
#### File: tictactoexxl/tests/test_game.py
```python
import pytest
from tictactoexxl.game import Game
from tictactoexxl.board import Board
from tictactoexxl.board import BoardPosition
from tictactoexxl.player import Player
class TestTicTacToeXXLGame(object):
board = None
player1 = None
player2 = None
PLAYER1_NAME = "ttt"
PLAYER1_MOVE_REPRESENTATION = "M"
PLAYER2_NAME = "tttxxl"
PLAYER2_MOVE_REPRESENTATION = "W"
def setup_method(self, _):
self.board = Board()
self.player1 = Player(self.PLAYER1_NAME,
self.PLAYER1_MOVE_REPRESENTATION)
self.player2 = Player(self.PLAYER2_NAME,
self.PLAYER2_MOVE_REPRESENTATION)
self.game = Game(board=self.board,
players=[self.player1, self.player2])
def test_game_winning_n_in_a_row_ok_1(self):
assert Game.is_winning_n_in_a_row_ok(num_players=2,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=3) is True
def test_game_winning_n_in_a_row_ok_2(self):
assert Game.is_winning_n_in_a_row_ok(num_players=4,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=3) is True
def test_game_winning_n_in_a_row_ok_3(self):
assert Game.is_winning_n_in_a_row_ok(num_players=3,
board_dim_x=2,
board_dim_y=4,
n_in_a_row=3) is True
def test_game_winning_n_in_a_row_ko_1(self):
assert Game.is_winning_n_in_a_row_ok(num_players=2,
board_dim_x=5,
board_dim_y=5,
n_in_a_row=6) is False
def test_game_winning_n_in_a_row_ko_2(self):
assert Game.is_winning_n_in_a_row_ok(num_players=5,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=3) is False
def test_game_winning_n_in_a_row_ko_3(self):
assert Game.is_winning_n_in_a_row_ok(num_players=5,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=4) is False
def test_game_winning_n_in_a_row_ko_4(self):
assert Game.is_winning_n_in_a_row_ok(num_players=3,
board_dim_x=2,
board_dim_y=5,
n_in_a_row=5) is False
def test_game_players(self):
assert len(self.game.players) is 2
def test_game_get_players_move_representations(self):
set_1 = set(self.game.get_players_move_representations())
set_2 = set([self.PLAYER1_MOVE_REPRESENTATION,
self.PLAYER2_MOVE_REPRESENTATION])
assert set_2.difference(set_1) == set()
def test_game_player_make_a_move(self):
board_position = BoardPosition("a", "1")
self.game.player_make_a_move(self.player1, board_position)
slot_value = self.game.board.get_slot_value(board_position)
assert slot_value is self.player1.move_repr
def test_game_has_player_won(self):
board_position_1 = BoardPosition("a", "1")
self.game.player_make_a_move(self.player1, board_position_1)
board_position_2 = BoardPosition("a", "2")
self.game.player_make_a_move(self.player1, board_position_2)
board_position_3 = BoardPosition("a", "3")
self.game.player_make_a_move(self.player1, board_position_3)
assert self.game.has_player_won(self.player1, board_position_3) is True
if __name__ == '__main__':
pytest.main()
```
#### File: tictactoexxl/tests/test_grid.py
```python
import pytest
from tictactoexxl.grid import Grid
from tictactoexxl.grid import GridPosition
class TestTicTacToeXXLGrid(object):
grid = None
DIM_X = 3
DIM_Y = 3
def setup_method(self, _):
self.grid = Grid(dim_x=self.DIM_X, dim_y=self.DIM_Y)
def test_grid_dimensions(self):
assert self.grid.dim_x == self.DIM_X
assert self.grid.dim_y == self.DIM_Y
def test_grid_create_grid(self):
grid = Grid.create_grid(self.DIM_X, self.DIM_Y)
slots = []
for x, y_list_and_values in grid.items():
for y, value in y_list_and_values.items():
slots.append(y)
assert len(slots) is (self.DIM_X * self.DIM_Y)
class TestTicTacToeXXLGridPosition(object):
POSITION_X_0 = 0
POSITION_Y_0 = 0
POSITION_X_KO = -1
POSITION_Y_KO = -1
def setup_method(self, _):
pass
def test_grid_transform_board_coordinate_x(self):
assert GridPosition.transform_board_coordinate_x("a") is 0
def test_grid_transform_board_coordinate_y(self):
assert GridPosition.transform_board_coordinate_y("1") is 0
def test_grid_coordinates(self):
grid_position = GridPosition(self.POSITION_X_0, self.POSITION_Y_0)
assert grid_position.get_coordinates() == (self.POSITION_X_0,
self.POSITION_Y_0)
def test_all_coordinates_x(self):
expected_all_coordinates = [0, 1, 2, ]
all_coordinates = GridPosition.get_x_all_coordinates(3)
for coordinate_x in expected_all_coordinates:
all_coordinates.remove(coordinate_x)
assert len(all_coordinates) is 0
def test_all_coordinates_y(self):
expected_all_coordinates = [0, 1, 2, ]
all_coordinates = GridPosition.get_y_all_coordinates(3)
for coordinate_y in expected_all_coordinates:
all_coordinates.remove(coordinate_y)
assert len(all_coordinates) is 0
def test_grid_position_valid(self):
grid_position = GridPosition(self.POSITION_X_0, self.POSITION_Y_0)
assert GridPosition.exists_position(grid_position) is True
def test_grid_position_invalid(self):
grid_position = GridPosition(self.POSITION_X_KO, self.POSITION_Y_KO)
assert GridPosition.exists_position(grid_position) is False
if __name__ == '__main__':
pytest.main()
```
#### File: tictactoexxl/tictactoexxl/exception.py
```python
class TicTacToeXXLError(Exception):
'''
Board custom base exception.
Keyword arguments:
message -- explanation of the error
'''
MESSAGE = None
def __init__(self, message=None):
error_message = ""
if self.MESSAGE:
error_message = self.MESSAGE
if message:
error_message = message
self.parameter = error_message
def __str__(self):
return self.parameter
``` |
{
"source": "jordimart/findmenu-ng-django",
"score": 3
} |
#### File: backend/profiles/models.py
```python
from django.db import models
from ..core.models import TimestampedModel
from ..authentication.models import User
class Profile(TimestampedModel):
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(blank=True,)
image = models.URLField(blank=True)
city = models.CharField(blank=True, max_length=40)
name = models.CharField(blank=True, max_length=40)
first_name = models.CharField(blank=True, max_length=40)
last_name = models.CharField(blank=True, max_length=40)
date_birth = models.CharField(blank=True, max_length=40)
friends = models.IntegerField(default=0)
restaurants = models.IntegerField(default=0)
def __str__(self):
return self.user.username
```
#### File: backend/profiles/serializers.py
```python
from rest_framework import serializers
from .models import Profile
class ProfileSerializer(serializers.ModelSerializer):
username = serializers.CharField(source='user.username')
email = serializers.CharField(source='user.email')
created_at = serializers.CharField(source='user.created_at')
bio = serializers.CharField(allow_blank=True, required=False)
image = serializers.SerializerMethodField()
city = serializers.CharField(allow_blank=True, required=False)
name = serializers.CharField(allow_blank=True, required=False)
first_name = serializers.CharField(allow_blank=True, required=False)
last_name = serializers.CharField(allow_blank=True, required=False)
date_birth = serializers.CharField(allow_blank=True, required=False)
friends = serializers.IntegerField(default=0, required=False)
restaurants = serializers.IntegerField(default=0, required=False)
class Meta:
model = Profile
fields = ('username', 'bio', 'image', 'city', 'name', 'first_name', 'last_name', 'date_birth', 'friends',
'restaurants', 'email', 'created_at')
read_only_fields = ('username', 'email')
def get_image(self, obj):
if obj.image:
return obj.image
return 'static/images/userdefault.jpg'
``` |
{
"source": "jordimas/bert-extractive-summarizer",
"score": 2
} |
#### File: bert-extractive-summarizer/evaluation/metrics.py
```python
import os
from rouge import Rouge
import json
def main():
## print("Calculates metrics")
with open(f"split/summary.ca", "r") as fh_ref, open(f"hypo/hypos.ca", "r") as fh_hyp:
refs = fh_ref.readlines()
hyps = fh_hyp.readlines()
rouge = Rouge()
scores = rouge.get_scores(hyps, refs, avg=True)
formatted_scores = json.dumps(scores, indent=4)
# print(type(scores))
print(f"Avg: {formatted_scores}")
print(f"ref lines {len(refs)} - hypo lines {len(hyps)}")
if __name__ == "__main__":
main()
``` |
{
"source": "jordimas/fullstop-deep-punctuation-prediction",
"score": 3
} |
#### File: jordimas/fullstop-deep-punctuation-prediction/augmentation.py
```python
import os
import re
import codecs
import pandas as pd
class Token(object):
def __init__(self, literal, output, first_task, second_task):
#Original im Text
self.literal = literal
#lowercased
self.output = output
#daten für satzsegmentierung
self.first_task = first_task
#daten für subtask 2
self.second_task = second_task
class Augmentation(object):
def __init__(self, rootdir='C:/Users/Anne/Desktop/korpora/bundestagsdebatten/sessions/csv', leipzigdir='C:/Users/Anne/Desktop/test'):
self.rootdir = rootdir
self.leipzigdir = leipzigdir
self.lines = []
self.tokens = []
def read_df(self):
outfile = codecs.open('bundestag_aug.txt', 'wb', 'utf8')
for subdir, dirs, files in os.walk(self.rootdir):
for file in files:
df = pd.read_csv(os.path.join(self.rootdir, file), index_col=0)
data = df[~(pd.isnull(df['speaker_key']))]
for item in data['text'].astype('str').values:
#utterances = re.split(r'(\n)+', item)
#vielleicht nicht durch \n splitten, um den größeren dokumentkontext zu behalten
#utterances = list(filter(lambda x: x != '\n', utterances))
#self.lines.extend(utterances)
text = re.sub(r'\n', ' ', item)
self.lines.append(text)
for line in self.lines:
for i, token in enumerate(line.split()):
literal = token
output = ''.join([i for i in literal.lstrip('"„').lower() if i.isalnum()])
first_task = 0
second_task = ''.join([i for i in literal[-1] if not i.isalnum()])
if (not second_task or len(second_task) == 0) and i < len(line.split())-1:
second_task = ''.join([i for i in line.split()[i+1][0] if not i.isalnum()])
print("{}\t{}\t{}".format(output, first_task, second_task), file=outfile)
#self.tokens.append(Token(literal, output, first_task, second_task))
def read_leipzig(self):
leipzig1 = codecs.open(os.path.join(self.leipzigdir, 'deu_news_2015_1M-sentences.txt'), 'rb', 'utf8')
leipzig2 = codecs.open(os.path.join(self.leipzigdir, 'deu_mixed-typical_2011_1M-sentences.txt'), 'rb', 'utf8')
lines = leipzig1.readlines()
lines.extend(leipzig2.readlines())
leipzig1.close()
leipzig2.close()
for line in lines:
items = re.split(r'\t', line)
try:
#wir entfernen ein paar Zeichen, bei denen wir uns nicht sicher sind
text = re.sub(r'[-–&]', '', items[1])
for i, token in enumerate(text.split()):
literal = token
output = ''.join([i for i in literal.lstrip('"„').lower() if i.isalnum()])
first_task = 0 if i < len(text.split())-1 else 1
second_task = ''.join([i for i in literal[-1] if not i.isalnum()])
#catch "" of next word
if (not second_task or len(second_task) == 0) and first_task == 0:
second_task = ''.join([i for i in text.split()[i+1][0] if not i.isalnum()])
self.tokens.append(Token(literal, output, first_task, second_task))
except Exception:
print(items)
```
#### File: jordimas/fullstop-deep-punctuation-prediction/dataset.py
```python
import re
import io
import os
from zipfile import ZipFile
from tqdm import tqdm
import pickle
import random
def map_task_two_label(label):
if label not in [':', '?', '-', ',', '.']: #label != "0" and label != "." and label != "," and label != "?" and label != "!" and label != ";" and :
return "0"
return label
def load_from_zip(data_zip: str, data_set: str, lang: str, subtask: int = 1):
"""
Loads every file from the dataset into an array.
Subtask is either 1 or 2.
"""
if data_set == "aug":
relevant_dir = "" # all files are relevant..
else:
relevant_dir = os.path.join('sepp_nlg_2021_data', lang, data_set)
relevant_dir = re.sub(r'\\', '/', relevant_dir)
count_words = 0
all_gt_labels, all_predicted_labels = list(), list() # aggregate all labels over all files
with ZipFile(data_zip, 'r') as zf: # load ground truth
fnames = zf.namelist()
gt_tsv_files = [
fname for fname in fnames
if fname.startswith(relevant_dir) and fname.endswith('.tsv')
]
data = []
for i, gt_tsv_file in enumerate(tqdm(gt_tsv_files), 1):
#print(i, gt_tsv_file)
basename = os.path.basename(gt_tsv_file)
# get ground truth labels
with io.TextIOWrapper(zf.open(gt_tsv_file), encoding="utf-8") as f:
lines = f.read().strip().split('\n')
rows = [line.split('\t') for line in lines]
words = [row[0] for row in rows]
if subtask == 1:
labels = [row[subtask] for row in rows]
else:
labels = [map_task_two_label(row[subtask]) for row in rows]
if len(words) != len(labels):
raise Exception( "word / label missmatch in file " + gt_tsv_file)
data.append([words,labels])
count_words += len(words)
print(f"Corpus words: {count_words}")
return data
def load(data_zip: str, data_set: str, lang: str, subtask: int = 1):
"""
Subtask is either 1 or 2.
"""
path = f"{data_zip}_{data_set}_{lang}_{subtask}.pickle"
if os.path.isfile(path):
print("loading data from pickle "+ path)
with open(path, 'rb') as f:
return pickle.load(f)
else:
print("loading from zip")
data = load_from_zip(data_zip, data_set, lang, subtask)
print("write cache file to:" + path)
with open(path, 'wb') as f:
pickle.dump(data, f)
return data
def transform_to_language_model_file(data_zip: str,data_set:str, lang: str,result_path):
data = load(data_zip,data_set,lang,subtask=1)
text_file = open(result_path, "w", encoding="utf-8")
for document in tqdm(data):
word_count = 0 # count words per line
for word,sentence_end in zip(document[0],document[1]):
if sentence_end == '1':
word_count = 0
text_file.write(word + "\n")
else:
word_count += 1
text_file.write(word + " ")
text_file.close()
def data_augmentation(data,data_proportion = 1.0 ):
"""
Perform data augmentation for task 1.
Recombines random sentences to new documents.
"""
print("running data augmentaion")
sentences = []
for document in tqdm(data):
words, labels = [], []
for word,sentence_end in zip(document[0],document[1]):
words += [word]
labels += [sentence_end]
if sentence_end == '1':
sentences+=[[words,labels]]
words, labels = [], []
random.shuffle(sentences)
if data_proportion <= 1.0:
sentences = sentences[:int(len(sentences)*data_proportion)]
else:
sentences = sentences * int(data_proportion)
result = []
pbar = tqdm(total=len(sentences))
while len(sentences) > 0:
new_sentences = [[],[]]
random_len = random.randrange(3,30)
for i in range(0,random_len,1):
sentence = sentences.pop(0)
new_sentences[0] += sentence[0]
new_sentences[1] += sentence[1]
if len(sentences) == 0:
break
result += [new_sentences]
pbar.update(random_len)
pbar.close()
print("done.")
return result
if __name__ =="__main__":
#data = load("data/sepp_nlg_2021_train_dev_data_v5.zip","train","de",subtask=2)
data = load("data/leipzig_aug_de.zip","aug","de",subtask=1)
#data_aug = data_augmentaion(data[:50])
#print(data_aug)
classes = {}
for item in data:
for label in item[1]:
if label in classes:
classes[label] +=1
else:
classes[label] = 1
import pprint
pprint.pprint(classes)
#transform_to_language_model_file("data/sepp_nlg_2021_train_dev_data.zip","train","de", "data/sepp_nlg_train_de2.txt")
#transform_to_language_model_file("data/sepp_nlg_2021_train_dev_data.zip","dev","de", "data/sepp_nlg_dev_de2.txt")
```
#### File: jordimas/fullstop-deep-punctuation-prediction/model_trainer.py
```python
from datasets import load_dataset, load_metric, concatenate_datasets
from dataset import load
from datasets import Dataset, Features, ClassLabel, Value
from transformers import AutoTokenizer
from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, classification_report, confusion_matrix
import numpy as np
import numpy.ma as ma
from transformers import DataCollatorForTokenClassification
from tqdm import tqdm
import datetime
import random
from tools import print_cm
class ModelTrainer():
def __init__(self, task:int, model:str,run_name:str, data_percentage:float,use_token_type_ids:bool, opimizer_config, tokenizer_config,languages,do_hyperparameter_search = False, **args):
self.task = task
self.model_checkpoint = model
self.run_name = run_name
self.batch_size = 8
self.label_all_tokens = True
self.data_factor = data_percentage # train and test on x percent of the data
self.opimizer_config = opimizer_config
self.tokenizer_config = tokenizer_config
self.languages = languages
self.use_token_type_ids = use_token_type_ids
self.do_hyperparameter_search = do_hyperparameter_search
if self.task == 1:
self.label_2_id = {"0":0, "1":1}
else:
self.label_2_id = {"0":0, ".":1, ",":2, "?":3, "-":4, ":":5}
self.id_2_label = list(self.label_2_id.keys())
print(f"ModelTrainer.__init__: {opimizer_config}")
def tokenize_and_align_data(self,data,stride=0):
if self.model_checkpoint == "camembert/camembert-large":
# this model has a wrong maxlength value, so we need to set it manually
self.tokenizer.model_max_length = 512
tokenizer_settings = {'is_split_into_words':True,'return_offsets_mapping':True,
'padding':False, 'truncation':True, 'stride':stride,
'max_length':self.tokenizer.model_max_length, 'return_overflowing_tokens':True}
tokenized_inputs = self.tokenizer(data[0], **tokenizer_settings)
labels = []
for i,document in enumerate(tokenized_inputs.encodings):
doc_encoded_labels = []
last_word_id = None
for word_id in document.word_ids:
if word_id == None: #or last_word_id == word_id:
doc_encoded_labels.append(-100)
else:
#document_id = tokenized_inputs.overflow_to_sample_mapping[i]
#label = examples[task][document_id][word_id]
label = data[1][word_id]
doc_encoded_labels.append(self.label_2_id[label])
last_word_id = word_id
labels.append(doc_encoded_labels)
tokenized_inputs["labels"] = labels
return tokenized_inputs
def to_dataset(self,data,stride=0):
labels, token_type_ids, input_ids, attention_masks = [],[],[],[]
for item in tqdm(data):
result = self.tokenize_and_align_data(item,stride=stride)
labels += result['labels']
if self.use_token_type_ids:
token_type_ids += result['token_type_ids']
input_ids += result['input_ids']
attention_masks += result['attention_mask']
if self.use_token_type_ids:
return Dataset.from_dict({'labels': labels, 'token_type_ids':token_type_ids, 'input_ids':input_ids, 'attention_mask':attention_masks})
else:
return Dataset.from_dict({'labels': labels, 'input_ids':input_ids, 'attention_mask':attention_masks})
def compute_metrics_generator(self):
def metrics(pred):
mask = np.less(pred.label_ids,0) # mask out -100 values
labels = ma.masked_array(pred.label_ids,mask).compressed()
preds = ma.masked_array(pred.predictions.argmax(-1),mask).compressed()
if self.task == 1:
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average="binary")
else:
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average="macro")
print("\n----- report -----\n")
report = classification_report(labels, preds,target_names=self.label_2_id.keys())
print(report)
print("\n----- confusion matrix -----\n")
cm = confusion_matrix(labels,preds,normalize="true")
print_cm(cm,self.id_2_label)
acc = accuracy_score(labels, preds)
return {
'f1': f1,
'precision': precision,
'recall': recall,
'accuracy':acc,
}
return metrics
def run_training(self):
val_data = []
train_data = []
for language in self.languages:
val_data += load("data/sepp_nlg_2021_train_dev_data_v5.zip","dev",language,subtask=self.task)
train_data += load("data/sepp_nlg_2021_train_dev_data_v5.zip","train",language,subtask=self.task)
#todo: implement augmentaion
aug_data =[]# load("data/bundestag_aug.zip","aug","de",subtask=task)
#aug_data += load("data/leipzig_aug_de.zip","aug","de",subtask=task)
## tokenize data
self.tokenizer = AutoTokenizer.from_pretrained(self.model_checkpoint,**self.tokenizer_config)
#train_data = train_data[:int(len(train_data)*data_factor)] # limit data to x%
#aug_data = aug_data[:int(len(aug_data)*data_factor)] # limit data to x%
print("tokenize training data")
tokenized_dataset_train = self.to_dataset(train_data,stride=100)
del train_data
#tokenized_dataset_aug = to_dataset(aug_data,stride=100)
#del aug_data
if self.data_factor < 1.0:
train_split = tokenized_dataset_train.train_test_split(train_size=self.data_factor)
tokenized_dataset_train = train_split["train"]
#aug_split = tokenized_dataset_aug.train_test_split(train_size=data_factor)
#tokenized_dataset_aug = aug_split["train"]
#tokenized_dataset_train = concatenate_datasets([tokenized_dataset_aug,tokenized_dataset_train])
tokenized_dataset_train.shuffle(seed=42)
print("tokenize validation data")
val_data = val_data[:int(len(val_data)*self.data_factor)] # limit data to x%
tokenized_dataset_val = self.to_dataset(val_data)
del val_data
## train model
args = TrainingArguments(
output_dir=f"models/{self.run_name}/checkpoints",
run_name=self.run_name,
evaluation_strategy = "epoch",
learning_rate=4e-5,
per_device_train_batch_size=self.batch_size,
per_device_eval_batch_size=self.batch_size,
gradient_accumulation_steps=1,
# num_train_epochs=self.opimizer_config["num_train_epochs"],
num_train_epochs=4,
adafactor=self.opimizer_config["adafactor"],
#weight_decay=0.005,
#weight_decay=2.4793153505992856e-11,
#adam_epsilon=5.005649261324263e-10,
warmup_steps=50,
#lr_scheduler_type="cosine",
report_to=["tensorboard"],
logging_dir='runs/'+self.run_name, # directory for storing logs
logging_first_step=True,
logging_steps=100,
save_steps=10000,
save_total_limit=10,
seed=16,
fp16=True
)
print(f"ModelTrainer.run_training: args: {args}")
data_collator = DataCollatorForTokenClassification(self.tokenizer)
def model_init():
return AutoModelForTokenClassification.from_pretrained(self.model_checkpoint, num_labels=len(self.label_2_id))
trainer = Trainer(
model_init=model_init,
args = args,
train_dataset=tokenized_dataset_train,
eval_dataset=tokenized_dataset_val,
data_collator=data_collator,
tokenizer=self.tokenizer,
compute_metrics=self.compute_metrics_generator()
)
if self.do_hyperparameter_search:
print("----------hyper param search------------")
return self.run_hyperparameter_search(trainer)
else:
trainer.train()
trainer.save_model(f"models/{self.run_name}/final")
return trainer.state.log_history
def run_hyperparameter_search(self, trainer):
import gc
import torch
def my_hp_space(trial):
gc.collect()
torch.cuda.empty_cache()
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-5, 1e-2, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1,5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [8]),
"weight_decay": trial.suggest_float("weight_decay", 1e-12, 1e-1, log=True),
"adam_epsilon": trial.suggest_float("adam_epsilon", 1e-10, 1e-6, log=True),
"gradient_accumulation_steps": trial.suggest_categorical("gradient_accumulation_steps", [1,2,4,8]),
}
def my_objective(metrics):
return metrics['eval_f1']
result = trainer.hyperparameter_search(direction="maximize",n_trials=200,hp_space=my_hp_space, compute_objective=my_objective)
print("---hyper---")
print(result)
print("---hyper---")
return result
if __name__ =="__main__":
trainer = ModelTrainer(task=2,model="dbmdz/bert-base-italian-xxl-uncased",run_name="optim",data_percentage=0.1,use_token_type_ids=True, opimizer_config={"adafactor": False,"num_train_epochs": 3},tokenizer_config={"strip_accent": True, "add_prefix_space":False},languages=["it"], do_hyperparameter_search=True)
result = trainer.run_training()
print(result)
```
#### File: jordimas/fullstop-deep-punctuation-prediction/predict_transformer.py
```python
from transformers import pipeline
from dataset import load
import io
import os
from typing import List
from pathlib import Path
from zipfile import ZipFile
from tqdm import tqdm
def predict_sent_end(model: str, data_zip: str, lang: str, data_set: str, outdir: str,task:str, overwrite: bool = True) -> None:
outdir = os.path.join(outdir, lang, data_set)
Path(outdir).mkdir(parents=True, exist_ok=True)
print(f'using model {model}')
pipe = pipeline("ner", model = model, grouped_entities=False, device=0)
with ZipFile(data_zip, 'r') as zf:
fnames = zf.namelist()
relevant_dir = os.path.join('sepp_nlg_2021_data', lang, data_set)
tsv_files = [
fname for fname in fnames
if fname.startswith(relevant_dir) and fname.endswith('.tsv')
]
for i, tsv_file in enumerate(tqdm(tsv_files), 1):
if not overwrite and Path(os.path.join(outdir, os.path.basename(tsv_file))).exists():
continue
with io.TextIOWrapper(zf.open(tsv_file), encoding="utf-8") as f:
tsv_str = f.read()
lines = tsv_str.strip().split('\n')
rows = [line.split('\t') for line in lines]
words = [row[0] for row in rows]
lines = predict(pipe,words,task)
with open(os.path.join(outdir, os.path.basename(tsv_file)), 'w',
encoding='utf8') as f:
f.writelines(lines)
def overlap_chunks(lst, n, stride=0):
"""Yield successive n-sized chunks from lst with stride length of overlap."""
for i in range(0, len(lst), n-stride):
yield lst[i:i + n]
label_2_id = {"0":0, ".":1, ",":2, "?":3, "-":4, ":":5}
id_2_label = list(label_2_id.keys())
def map_label_task_2(label):
label_id = int(label[-1])
return id_2_label[label_id]
def map_label_task_1(label):
label_id = int(label[-1])
# this way we can use task 2 models for task 1.
# we set just set anything other than . to class 0
if label_id != 1:
label_id = 0
return label_id
def predict(pipe,words, task):
overlap = 5
chunk_size = 230
if len(words) <= chunk_size:
overlap = 0
batches = list(overlap_chunks(words,chunk_size,overlap))
# if the last batch is smaller than the overlap,
# we can just remove it
if len(batches[-1]) <= overlap:
batches.pop()
tagged_words = []
for batch in tqdm(batches):
# use last batch completly
if batch == batches[-1]:
overlap = 0
text = " ".join(batch)
#text = text.replace(" \xad","").replace("\xad","")
result = pipe(text)
assert len(text) == result[-1]["end"], "chunk size too large, text got clipped"
char_index = 0
result_index = 0
for word in batch[:len(batch)-overlap]:
char_index += len(word) + 1
# if any subtoken of an word is labled as sentence end
# we label the whole word as sentence end
label = 0
while result_index < len(result) and char_index > result[result_index]["end"] :
#label += 0 if result[result_index]['entity'] == 'LABEL_0' else 1
if task == "1":
label = map_label_task_1(result[result_index]['entity'])
if task == "2":
label = map_label_task_2(result[result_index]['entity'])
result_index += 1
#if label > 1: # todo: we should not need this line. please check
# print("i should be not needed")
# label = 1
if task == "1":
tagged_words += [f"{word}\t{label}\n"]
if task == "2":
tagged_words += [f"{word}\t-\t{label}\n"]
if len(tagged_words) == len(words):
# tracing script to find predicton errors
for i,x in enumerate(zip(tagged_words,words)):
if x[0].startswith(x[1]) == False:
print(i,x)
assert len(tagged_words) == len(words)
return tagged_words
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='spaCy baseline for subtask 1 of SEPP-NLG 2021')
parser.add_argument("data_zip", help="path to data zip file, e.g. 'data/sepp_nlg_2021_train_dev_data.zip'")
parser.add_argument("language", help="target language ('en', 'de', 'fr', 'it'; i.e. one of the subfolders in the zip file's main folder)")
parser.add_argument("data_set", help="dataset to be evaluated (usually 'dev', 'test'), subfolder of 'lang'")
parser.add_argument("outdir", help="folder to store predictions in, e.g. 'data/predictions' (language and dataset subfolders will be created automatically)")
parser.add_argument("model",help="path to transformers model")
parser.add_argument("task",help="task one or two")
args = parser.parse_args()
predict_sent_end(args.model,args.data_zip, args.language, args.data_set, args.outdir,args.task, True)
``` |
{
"source": "jordimas/nmt-softcatala",
"score": 2
} |
#### File: nmt-softcatala/use-models-tools/model-to-po.py
```python
from __future__ import print_function
import datetime
import polib
from shutil import copyfile
import os
from optparse import OptionParser
import re
import logging
from ctranslate import CTranslate
import pyonmttok
def init_logging(del_logs):
logfile = 'model-to-po.log'
if del_logs and os.path.isfile(logfile):
os.remove(logfile)
logger = logging.getLogger()
hdlr = logging.FileHandler(logfile)
logger.addHandler(hdlr)
logger.setLevel(logging.WARNING)
def _clean_string(result):
CHARS = (
'_', '&', '~', # Accelerators.
)
for c in CHARS:
result = result.replace(c, '')
return result.strip()
def read_parameters():
parser = OptionParser()
parser.add_option(
'-m',
'--model_name',
type='string',
action='store',
default='eng-cat',
dest='model_name',
help="Translation model name. For example 'eng-cat' or 'cat-eng'"
)
parser.add_option(
'-f',
'--po-file',
type='string',
action='store',
dest='po_file',
help='PO File to translate'
)
parser.add_option(
'-t',
'--translated-file',
type='string',
action='store',
dest='translated_file',
default='',
help='Name of the translated file'
)
parser.add_option(
'-x',
'--models',
type='string',
action='store',
dest='models_path',
default='',
help='Path the model directory'
)
(options, args) = parser.parse_args()
if options.po_file is None: # if filename is not given
parser.error('PO file not given')
return options.model_name, options.po_file, options.translated_file,\
options.models_path
def main():
print("Applies a OpenNMT model to translate a PO file")
start_time = datetime.datetime.now()
init_logging(True)
model_name, input_filename, target_filename, models_path = read_parameters()
if len(target_filename) == 0:
target_filename = input_filename + "-ca.po"
copyfile(input_filename, target_filename)
print(f"{models_path} - {model_name}")
openNMT = CTranslate(models_path, model_name)
po_file = polib.pofile(target_filename)
translated = 0
errors = 0
for entry in po_file:
if entry.translated():
continue
if 'fuzzy' in entry.flags or entry.obsolete:
continue
try:
if len(entry.msgid_plural) > 0:
src = _clean_string(entry.msgid)
src_plural = _clean_string(entry.msgid_plural)
tgt = openNMT.translate_parallel(src)
tgt_plural = openNMT.translate_parallel(src_plural)
entry.msgstr_plural[0] = tgt
entry.msgstr_plural[1] = tgt_plural
else:
src = _clean_string(entry.msgid)
tgt = openNMT.translate_parallel(src)
entry.msgstr = tgt
translated = translated + 1
entry.flags.append('fuzzy')
if translated % 500 == 0:
print(translated)
po_file.save(target_filename)
except Exception as e:
logging.error(str(e))
logging.error("Processing: {0}".format(src))
errors = errors + 1
po_file.save(target_filename)
print("Sentences translated: {0}".format(translated))
print("Sentences unable to translate: {0} (NMT errors)".format(errors))
print("Time used: {0}".format(str(datetime.datetime.now() - start_time)))
if __name__ == "__main__":
main()
``` |
{
"source": "jordimassaguerpla/osc-requests-tags",
"score": 3
} |
#### File: jordimassaguerpla/osc-requests-tags/osc-requests-tags.py
```python
import osc.core
import osc.conf
import re
patterns = ["\w+#\w+"]
def do_requests_tags(self, subcmd, opts, project):
"""${cmd_name}: Lists requests with hashtags
This command will list requests for a given project together
with the list of hashtags in the request diff, so that you
can use this information to group them.
${cmd_usage}
${cmd_option_list}
"""
api = self.get_api_url()
requests = get_request_list(api, project = project, req_state =('new', 'review'))
for request in requests:
description = request.to_xml().findall("description")[0].text
req_id = request.to_xml().get("id")
req_diff = request_diff("https://api.suse.de", req_id)
to_print = "id " + req_id
for pattern in patterns:
match = re.findall(pattern, req_diff)
if len(match) > 0:
to_print += str(match)
print to_print
``` |
{
"source": "jordimassaguerpla/salt-toaster",
"score": 2
} |
#### File: tests/ssh/conftest.py
```python
import os
import pytest
from faker import Faker
from saltcontainers.factories import ContainerFactory
@pytest.fixture(scope='module')
def module_config(request, container):
return {
"masters": [
{
"config": {
"container__config__salt_config__extra_configs": {
"thin_extra_mods": {
"thin_extra_mods": "msgpack"
},
"custom_tops": {
"extension_modules": "/salt-toaster/tests/sls/ssh/xmod",
"master_tops": {
"toptest": True
},
},
},
"container__config__salt_config__roster": [container]
}
}
]
}
@pytest.fixture(scope="module")
def container(request, salt_root):
fake = Faker()
obj = ContainerFactory(
config__name='container_{0}_{1}_{2}'.format(fake.word(), fake.word(), os.environ.get('ST_JOB_ID', '')), # pylint: disable=no-member
config__image=request.config.getini('MINION_IMAGE') or request.config.getini('IMAGE'),
config__salt_config=None,
ssh_config={'user': 'root', 'password': '<PASSWORD>'})
# obj.run('zypper --non-interactive rm salt') # Remove salt from the image!!
request.addfinalizer(obj.remove)
return obj
```
#### File: salt-toaster/tests/test_bsc1074227.py
```python
import pytest
import os
from faker import Faker
@pytest.fixture(scope='module')
def module_config(request):
fake = Faker()
return {
'masters': [
{
"config": {
"container__config__salt_config__extra_configs": {
"yaml_utf8": {
'file_roots': {
'base': ["/etc/salt/masterless"]
},
# "yaml_utf8": True
},
},
"container__config__salt_config__sls": [
"tests/sls/unicode/top.sls",
"tests/sls/unicode/unicode.sls",
"tests/sls/unicode/unicode1.sls",
"tests/sls/unicode/unicode2.sls",
"tests/sls/unicode/cocös.txt",
]
},
'minions': [
{
'config': {
"container__config__name": 'minion_{0}_{1}_{2}'.format(fake.word(), fake.word(), os.environ.get('ST_JOB_ID', '')), # pylint: disable=no-member
}
}
]
}
]
}
def test_state_apply_unicode_sls(setup, master, minion):
master.salt_call('--local state.apply', 'unicode')
# master.salt_call('--local state.apply', 'unicode1')
# master.salt_call('--local state.apply', 'unicode2')
# master.salt(minion['id'], 'state.apply', 'unicode')
# master.salt(minion['id'], 'state.apply', 'unicode1')
# master.salt(minion['id'], 'state.apply', 'unicode2')
``` |
{
"source": "jordimassaguerpla/sumaform",
"score": 2
} |
#### File: server/pts/run-pts.py
```python
import sys
import time
import xmlrpclib
import getopt
import requests
import subprocess
import datetime
import salt.client
import salt.key
locust = "{{ grains.get("pts_locust") }}.{{ grains.get("domain") }}"
system_count = {{ grains.get("pts_system_count") }}
system_prefix = "{{ grains.get("pts_system_prefix") }}"
last_clone_prefix = "{{ (grains.get("cloned_channels")|last)['prefix'] }}"
original_system_name = system_prefix + ".{{ grains.get("domain") }}"
enabled_phases = ["onboarding", "patching", "locust"]
manager_url = "http://localhost/rpc/api"
client = xmlrpclib.Server(manager_url, verbose=0)
key = client.auth.login('admin', 'admin')
salt_client = salt.client.LocalClient()
salt_key_manager = salt.key.Key(salt_client.opts)
def parse_arguments():
try:
options, remainder = getopt.getopt(sys.argv[1:], '', ['onboarding-only', 'fio-only', 'patching-only','locust-only'])
except getopt.GetoptError:
sys.exit(1)
global enabled_phases
for opt, arg in options:
if opt in ('--onboarding-only'):
enabled_phases = ["onboarding"]
if opt in ('--fio-only'):
enabled_phases = ["fio"]
elif opt in ('--patching-only'):
enabled_phases = ["onboarding", "patching"]
elif opt in ('--locust-only'):
enabled_phases = ["locust"]
def set_up():
# evil-minion system prefix
evil_minion_system_prefix = system_prefix + "-"
# get server ids
systems = client.system.listSystems(key)
original_system_id = next(s["id"] for s in systems if s["name"].startswith(original_system_name))
evil_minion_system_ids = [s["id"] for s in systems if s["name"].startswith(evil_minion_system_prefix)]
# patch the original minion to the q3 clone
print("Patching original minion with erratas from q3 clone channel")
patch_minions([original_system_id], original_system_name)
# now all evil-minions can copy the original, an be patched as well
print("Patching evil-minions with erratas from q3 clone channel")
patch_minions(evil_minion_system_ids, evil_minion_system_prefix)
# now subscribe the original minion to q4 channels
print("Subscribing original minions to q4 clone channel")
base_channel_label = get_base_channel_label(last_clone_prefix)
children_channel_labels = get_children_channel_labels(base_channel_label)
subscribe_minions_to_channels([original_system_id], base_channel_label, children_channel_labels, original_system_name)
# now all evil-minions can copy the original, an be subscribed to q4 channel as well
print("Subscribing evil-minions to q4 clone channel")
subscribe_minions_to_channels(evil_minion_system_ids, base_channel_label, children_channel_labels, evil_minion_system_prefix)
# patch the original minion to the q4 clone
print("Patching original minion with erratas from q4 clone channel")
patch_minions([original_system_id], original_system_name)
def retry_for_minutes(fun, minutes):
"""Runs fun for up to minutes minutes, every 10 seconds, until it returns True"""
for iteration in range(minutes * 60 / 10 - 1):
if fun():
return
time.sleep(10)
if not fun():
print("Timeout of {} minutes elapsed, aborting".format(minutes))
sys.exit(1)
def check_system_count(retrieve_systems_fun, expected_count, system_prefix, log_msg):
all_systems = retrieve_systems_fun()
systems = [s for s in all_systems if s["name"].startswith(system_prefix)]
actual_count = len(systems)
print(log_msg.format(actual_count))
return actual_count == expected_count
def check_onboarded_system_count(expected_count, system_prefix):
return check_system_count(lambda: client.system.listSystems(key), expected_count, system_prefix, "{} systems are onboarded")
def check_patched_system_count(expected_count, system_prefix):
return check_system_count(lambda: client.system.listOutOfDateSystems(key), expected_count, system_prefix, "{} systems are patchable")
def check_subscribed_to_channels_system_count(channel_label, expected_count, system_prefix):
return check_system_count(lambda: client.channel.software.listSubscribedSystems(key, channel_label), expected_count, system_prefix, "{} systems are subscribed")
def get_base_channel_label(channel_prefix):
allChannels = client.channel.listSoftwareChannels(key)
baseChannel = next(c for c in allChannels if (c["label"].startswith(channel_prefix) and c["parent_label"] == ''))
return baseChannel["label"]
def get_children_channel_labels(parent_channel_label):
children = client.channel.software.listChildren(key, parent_channel_label)
return [c["label"] for c in children]
def subscribe_minions_to_channels(system_ids, base_channel_label, children_channel_labels, system_prefix):
print("Sending command to subscribe to channels: {} systems".format(len(system_ids)))
now = datetime.datetime.now()
for system_id in system_ids:
client.system.scheduleChangeChannels(key, system_id, base_channel_label, children_channel_labels, now)
print("Waiting for {} systems to be subscribed to channels (timeout: 20 minutes)...".format(len(system_ids)))
retry_for_minutes(lambda: check_subscribed_to_channels_system_count(base_channel_label, len(system_ids), system_prefix), 20)
def patch_minions(system_ids, system_prefix):
print("Sending command to patch %d systems" % len(system_ids))
# all minions should have same errata, so we use first server on the list to get it
erratas = client.system.getUnscheduledErrata(key, system_ids[0])
errata_ids = [errata["id"] for errata in erratas]
client.system.scheduleApplyErrata(key, system_ids, errata_ids)
print("Waiting for {} systems to be not patchable anymore (timeout: 20 minutes)...".format(len(system_ids)))
retry_for_minutes(lambda: check_patched_system_count(0, system_prefix), 20)
def patch_all_systems():
systems = client.system.listSystems(key)
system_ids = [system["id"] for system in systems]
patch_minions(system_ids, system_prefix)
def onboard_minions_by_key(minion_key, expected_system_count, system_prefix):
salt_key_manager.accept(minion_key)
print("Waiting for {} systems to be onboarded in SUSE Manager (timeout: 15 minutes)...".format(expected_system_count))
retry_for_minutes(lambda: check_onboarded_system_count(expected_system_count, system_prefix), 15)
print("Waiting for {} systems to be patchable in SUSE Manager (timeout: 20 minutes)...".format(expected_system_count))
retry_for_minutes(lambda: check_patched_system_count(expected_system_count, system_prefix), 20)
def run_locust_http_load(clients_count):
LocustPayload = {
'locust_count': clients_count,
'hatch_rate': 1000
}
res = requests.post('http://'+locust+'/swarm', data=LocustPayload)
print(res.json()["message"])
time.sleep(60)
res = requests.get('http://'+locust+'/stop')
print(res.json()["message"])
parse_arguments()
if "fio" in enabled_phases:
print("Test I/O performance: random reads")
subprocess.call(["fio", "--name", "randread", "--fsync=1", "--direct=1", "--rw=randread", "--blocksize=4k", "--numjobs=8", "--size=512M", "--time_based", "--runtime=60", "--group_reporting"])
print("Test I/O performance: random writes")
subprocess.call(["fio", "--name", "randwrite", "--fsync=1", "--direct=1", "--rw=randwrite", "--blocksize=4k", "--numjobs=8", "--size=512M", "--time_based", "--runtime=60", "--group_reporting"])
if "onboarding" in enabled_phases:
# onboard original minion
print("Onboarding original minion...")
onboard_minions_by_key(original_system_name, 1, original_system_name)
# onboard evil-minions
print("Onboarding evil-minions...")
onboard_minions_by_key("*", system_count, system_prefix)
if "locust" in enabled_phases:
for users in range(50, 450, 25):
run_locust_http_load(users)
if "patching" in enabled_phases:
set_up()
patch_all_systems()
``` |
{
"source": "jordimas/softcatala-web-dataset",
"score": 3
} |
#### File: jordimas/softcatala-web-dataset/wp-to-json.py
```python
import json
import xml.etree.ElementTree as ET
import logging
import html2text
def convert(source_filename, target_filename):
tree = ET.parse(source_filename)
root = tree.getroot()
items = []
words = 0
for entry in root.iter('item'):
#print(f"tag: {entry.tag}")
json_item = {}
publish = False
for item in entry:
#print(item.tag)
if item.tag == "{http://purl.org/rss/1.0/modules/content/}encoded" and item.text is not None:
# See: https://github.com/Alir3z4/html2text/issues/359
h = html2text.HTML2Text()
h.ignore_emphasis = True
h.body_width = 0
text = h.handle(item.text)
json_item['content'] = text
if item.tag == "{http://wordpress.org/export/1.2/}post_id":
json_item['id'] = item.text
if item.tag == "{http://wordpress.org/export/1.2/}status":
if item.text == "publish":
publish = True
if item.tag == "{http://wordpress.org/export/1.2/}post_date":
json_item['date'] = item.text
if item.tag == "title":
json_item['title'] = item.text
#print(item.tag)
if publish and 'content' in json_item and 'title' in json_item:
words += len(json_item['content'].split(" "))
words += len(json_item['title'].split(" "))
items.append(json_item)
with open(target_filename, 'w') as f:
json.dump(items, f, indent=4, ensure_ascii=False, sort_keys=True)
print(f"Exported {len(items)} items with {words} words to {target_filename}")
def main():
print("Converts a WordPress export to a JSON usable dataset")
logging.basicConfig(filename="wp-to-json.log", level=logging.DEBUG, filemode="w")
convert("raw/articles.xml", "dataset/articles.json")
convert("raw/programes.xml", "dataset/programes.json")
if __name__ == "__main__":
main()
``` |
{
"source": "jordim/langify-mobile",
"score": 3
} |
#### File: langify-mobile/engines/ios.py
```python
import re
import os
from engines.base import EngineBase
class EngineIOS(EngineBase):
def __init__(self):
super().__init__()
def persist_file(self,file,data):
for k,v in data.items():
file.write(self.generate_line(k,v))
file.write("\n")
def parse(self):
target = '{}/{}'.format(self.input_folder,self.input_file)
with open(target,'r') as file:
for line in file.readlines():
result = re.match("\"(.*)\"(\s+=\s+)\"(.*)\"", line.strip())
if result:
key = result.group(1)
value = result.group(3)
self.cached[key] = value
return self.cached
def format(self):
return "\"#key#\" = \"#value#\";"
def generate_line(self,k,v):
return self.format().replace("#key#",k).replace("#value#",v)
def output(self):
return 'ios.localizable'
def input(self):
return 'ios.strings'
```
#### File: jordim/langify-mobile/langify.py
```python
from managers.core import CoreManager
import fire
import logging
logger = logging.getLogger('langify')
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
logger.addHandler(ch)
def translate(name="World",
i=None, #input engine ej: 'android'
t=None, #output engine ej: 'android,ios'
langs=None,
c=False, #cache
p=True #print table results
):
if i is None:
raise Exception("Input is mandatory")
if type(t) == tuple:
t = list(t)
elif type(t) == str:
t = [i]
else:
t = [i]
if type(langs) == tuple:
langs = list(langs)
elif type(langs) == str:
langs = [langs]
params = {
'input': i,
'targets': t,
'langs': langs,
'save_cache': c,
'print_table': p
}
core_manager = CoreManager(params)
core_manager.translate()
if __name__ == '__main__':
fire.Fire(translate)
``` |
{
"source": "JordiOrriols/animatronic",
"score": 3
} |
#### File: JordiOrriols/animatronic/calibrate_servos.py
```python
from adafruit_servokit import ServoKit
from tkinter import Tk, Frame, Scale, Label
from common.servo import AniServo, initialize_servos
from scheleton.config import servos_data
# Initialization
kit = ServoKit(channels=16)
initialize_servos(kit, servos_data)
window = Tk()
window.title('Calibrate Phonemes')
window.geometry('850x300')
# Run code
def show_servo_scale(servo: AniServo):
frameControl = Frame(window)
frameControl.pack(side='left', padx=10)
label = '#' + str(servo.getPin())
n = Label(frameControl, fg='black', width=3, text=label)
n.pack(side='top', expand=True)
def print_selection(v):
l.config(text=v)
servo.move_to_angle(int(v))
s = Scale(frameControl, from_=servo.getPhysicalLimitMin(), to=servo.getPhysicalLimitMax(), length=200,
showvalue=0, tickinterval=2, resolution=5, command=print_selection)
s.set(servo.getRestPosition())
s.pack(side='top', expand=True)
l = Label(frameControl, bg='white', fg='black', width=3, text='...')
l.pack(side='top', expand=True)
for servo in servos_data:
show_servo_scale(servo)
window.mainloop()
``` |
{
"source": "jordipiqueselles/tops",
"score": 3
} |
#### File: tops/tops/tops.py
```python
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array
from sklearn.utils.multiclass import unique_labels
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler, OneHotEncoder, LabelEncoder
from sklearn.decomposition import PCA
from abc import ABC, abstractmethod
from functools import partial
import copy
from scipy.optimize import minimize, Bounds
import pandas as pd
from utils.timing import TimeCounterCollection
import warnings
###############
#### UTILS ####
###############
def softmax(x):
"""Compute softmax values for each set of scores in x."""
return np.exp(x) / np.sum(np.exp(x), axis=0)
def is_numeric(elem):
"""
Checks whether the given elemen is a numeric, by trying to sum 1
"""
try:
x = elem + 1
except TypeError:
return False
return True
############################
#### THE TOPS ALGORITHM ####
############################
class ToPs(BaseEstimator, ClassifierMixin):
"""
ToPs classifier
This classifiers builds a tree of predictors. This is a tree where each node has associated a predictor.
The overall prediction for an instance is given by the aggregation of all the predictors we find in the
path along the tree, from the root to a leaf, described by this instance.
Parameters
----------
predictors : list
A list of base predictors. The algorithm will choose the best one for a given node according to a
goodness metric.
split_type : Derived class of tops.BaseSplit
Policy to use when creating a tentative split over a given variable.
goodness_metric : sklearn.metrics or compatible
A function that given an array of y_true and an array of y_pred gives a number that measures the goodness
of this prediction.
min_inst : int
Minimum number of instances in a node
min_prob_hoeffding : float, default: 0.8
A probability value, between 0 and 1. A split will be accepted if the probability that the sons outperform
the parent node in terms of the goodness_metric is greater than min_prob_hoeffding
cv1 : int or float, default: 0.15
It specifies the type of validation used to select the best split-predictor.
If the value is between 0 and 1, cv1 represents the proportion of instances used as validation set 1
If the value is an integer greater than 1, it represents the number of folds in the CV1
cv2 : float, default: 0.10
It represents the proportion of instances for the validation set 2, used to aggregate the predictors
min_inst_val : int, default: 20
Minimum number of instances devoted to validation (V1 or V2) in a node
normalize_data : bool, default: False
Whether to normalize or not the numerical features in the dataset before the training process
file_log : str, default: None
File where to write the logs
var_pca : float, default: None
Not implemented
"""
binary = "binary"
categorical = "categorical"
numerical = "numerical"
########################
#### Public methods ####
########################
def __init__(self, predictors, split_type, goodness_metric, min_inst, min_prob_hoeffding=0.8, cv1=0.15, cv2=0.1,
min_inst_val=20, normalize_data=False, file_log=None, var_pca=None):
# Attributes of the class that take values from the function parameters (explained in the class description)
self.predictors = predictors
self.split_type = split_type
self.goodness_metric = goodness_metric
self.min_inst = min_inst
self.min_prob_hoeffding = min_prob_hoeffding
self.cv1 = cv1
self.cv2 = cv2
self.min_inst_val = min_inst_val
self.normalize_data = normalize_data
self.file_log = file_log
self.var_pca = var_pca
# We need to wrap the predictors to deal with cases when we have only one class, for example
self.predictors_wrapped = [WrapperPredictor(predictor) for predictor in self.predictors]
# Root node of the tree of predictors
self.root_node = None
# Number of different classes in y
self.n_classes = None
# List of unique values of y
self.classes_ = []
self.enc_classes = []
# A list where the ith item contains the type of the variable in the ith column of X,
# before the preprocessing step. The types can be binary, categorical or numerical
self.orig_col_types = []
# A list where the ith item contains a dictionary with metainformation about the ith column of X,
# after the preprocessing step. Keys: 'type', (optional) 'idx_modalities'
self.metadata = []
# For the pca decompostion
self.pca = None
# Object use to normalize the numerical variables of a dataset
self.scaler = StandardScaler()
# Object use to encode as a sequence of numbers, from 0 to n_classes, the classes represented in y
self.label_encoder = LabelEncoder()
# Object to encode the categorical variables of a dataset using the one-hot encoding strategy
self.one_hot_encoder = OneHotEncoder(sparse=False)
self.timers = TimeCounterCollection(['fit_node', 'fit_base', 'predict_base', 'train_val', 'split_node',
'x_y_son', 'idx_val_son', 'aggregate_node', 'predict_node',
'pre_fit', 'get_split', 'init_fit_node', 'eval_goodness1'])
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features. It can contain both numerical
and categorical features
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
"""
self.timers.reset_all()
self.timers.start('pre_fit')
X, y = shuffle(X, y)
df_X = pd.DataFrame(X)
# Analysis of the data, to populate the metadata fields
self._analyze_data(df_X, y)
# Preprocess the data to have a correct type and shape
X = self._preprocess_X(df_X)
y = self._preprocess_y(y)
self._learn_transformation(X)
X = self._transform(X)
# Check the consistency of the data
self._check_data_fit(X, y)
list_val_idx1, list_val_idx2 = self._create_list_val_idx(self.cv1, self.cv2, X.shape[0])
self.timers.stop('pre_fit')
self.root_node = Node(X, y, self, list_val_idx1, list_val_idx2, [])
self.root_node.fit()
self.root_node.split()
self.root_node.aggregate([])
# Return the classifier
self.root_node.clear_data()
self.write_log()
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
# Check is fit had been called
if self.root_node is None:
raise AttributeError("The predictor is not fitted")
# Preprocess the data
df_X = pd.DataFrame(X)
X = self._preprocess_X(df_X)
X = self._transform(X)
# Input validation
self._check_data_predict(X)
prob = self.root_node.predict_proba(X, [])
return prob
def predict(self, X):
prob = self.predict_proba(X)
indices = prob.argmax(axis=1)
return self.classes_[indices]
#########################
#### Private methods ####
#########################
def _analyze_data(self, X, y):
"""Analyzes the input data X, y and populates the label_encoder, scaler, classes_ and metadata attributes
:param X: array-like, shape (n_samples, n_features)
:param y: array-like, shape (n_samples,)
"""
self.label_encoder.fit(y)
self.classes_ = np.unique(y)
# Analyze the type of values in the columns of X
self.orig_col_types = []
for col in X.columns:
if is_numeric(X[col][0]):
un_values = np.unique(X.loc[:, col])
if len(un_values) == 2 and un_values[0] == 0 and un_values[1] == 1:
self.orig_col_types.append(self.binary)
else:
self.orig_col_types.append(self.numerical)
else:
self.orig_col_types.append(self.categorical)
# Fit the transformer for the numerical variables
idx_numerical = [i for i, col_type in enumerate(self.orig_col_types) if col_type == ToPs.numerical]
if len(idx_numerical) > 0:
self.scaler.fit(X.iloc[:, idx_numerical])
# No need for any transformer for the binary ones
idx_binary = [i for i, col_type in enumerate(self.orig_col_types) if col_type == ToPs.binary]
# Fit the transformer for the categorical variables
idx_categorical = [i for i, col_type in enumerate(self.orig_col_types) if col_type == ToPs.categorical]
if len(idx_categorical) > 0:
self.one_hot_encoder.fit(X.iloc[:, idx_categorical])
# Populate metadata
self.metadata = []
for _ in range(len(idx_numerical)):
self.metadata.append({'type': ToPs.numerical})
for _ in range(len(idx_binary)):
self.metadata.append({'type': ToPs.binary})
for i in range(len(idx_categorical)):
idx_first_mod = len(self.metadata)
idx_modalities = [idx_first_mod + j for j in range(len(self.one_hot_encoder.categories_[i]))]
for _ in idx_modalities:
self.metadata.append({'type': ToPs.categorical, 'idx_modalities': idx_modalities})
def _preprocess_X(self, X):
"""Preprocess the X matrix based on the information stored in scaler
"""
# The numerical variables
idx_numerical = [i for i, col_type in enumerate(self.orig_col_types) if col_type == ToPs.numerical]
if len(idx_numerical) > 0:
if self.normalize_data:
X_num = self.scaler.transform(X.iloc[:, idx_numerical])
else:
X_num = X.iloc[:, idx_numerical]
else:
X_num = X.iloc[:, []]
# The binary variables
idx_binary = [i for i, col_type in enumerate(self.orig_col_types) if col_type == ToPs.binary]
X_bin = X.iloc[:, idx_binary]
# The categorical variables
idx_categorical = [i for i, col_type in enumerate(self.orig_col_types) if col_type == ToPs.categorical]
if len(idx_categorical) > 0:
X_cat = self.one_hot_encoder.transform(X.iloc[:, idx_categorical])
else:
X_cat = X.iloc[:, []]
X_preprocessed = np.concatenate((X_num, X_bin, X_cat), axis=1)
try:
return X_preprocessed.astype(np.float32)
except:
warnings.warn('Could not convert to float32')
return X_preprocessed
def _preprocess_y(self, y):
"""Encodes the target variable y using the label_encoder
"""
y_enc = self.label_encoder.transform(y)
return y_enc
def _learn_transformation(self, X):
'''
Learns how to apply a transformation over the dataset. Now, the only transformation available is PCA
:param X: array-like, shape (n_samples, n_features)
'''
if self.var_pca is not None:
self.pca = PCA(self.var_pca)
# TODO Take only a reasonable amount of instances for fitting the PCA
self.pca.fit(X)
self.metadata = [{'type': ToPs.numerical} for _ in range(len(self.pca.components_))]
def _transform(self, X):
'''
Learns how to apply a transformation over the dataset. Now, the only transformation available is PCA
:param X: array-like, shape (n_samples, n_features)
'''
if self.pca is not None:
X = self.pca.transform(X)
return X
def _check_data_fit(self, X, y):
"""Check after the preprocessing step
"""
# Check that X and y have correct shape
check_X_y(X, y)
# Check both X and y are pure np.array (not pd.DataFrame)
check_array(X)
self.enc_classes = unique_labels(y)
self.n_classes = len(self.enc_classes)
# The classes of y go from 0 to n_classes
if not all(a == b for a, b in zip(self.enc_classes, range(self.n_classes))):
raise ValueError("The classes of y must go from 0 to self.n_classes")
def _check_data_predict(self, X):
"""Check after the preprocessing step
"""
# Check both X and y are pure np.array (not pd.DataFrame)
check_array(X)
# X has the expected number of columns
if X.shape[1] != len(self.metadata):
raise ValueError("X doesn't have the expected number of columns")
@staticmethod
def _create_list_val_idx(cv1, cv2, n_inst):
def create_train_val_bitmap(n_inst, offset, idx_start_val, idx_end_val):
train_bitmap = np.ones(n_inst, dtype=np.bool)
train_bitmap[:offset] = False
train_bitmap[offset + idx_start_val:offset + idx_end_val] = False
val_bitmap = np.zeros(n_inst, dtype=np.bool)
val_bitmap[offset + idx_start_val:offset + idx_end_val] = True
return train_bitmap, val_bitmap
if cv2 < 0.5:
offset_cv1 = int(n_inst * cv2)
tuple_val = create_train_val_bitmap(n_inst, 0, 0, offset_cv1)
list_val_idx2 = [tuple_val]
elif cv2 >= 2 and int(cv2) == cv2:
offset_cv1 = 0
size_fold = int(np.ceil(n_inst / cv2))
list_val_idx2 = []
for i in range(cv2):
tuple_val = create_train_val_bitmap(n_inst, 0, size_fold * i, size_fold * (i + 1))
list_val_idx2.append(tuple_val)
else:
raise ValueError('Invalid value for the cv2 ({})'.format(cv2))
if cv1 < 0.5:
n_val_inst = int(n_inst * cv1)
tuple_val = create_train_val_bitmap(n_inst, offset_cv1, 0, n_val_inst)
list_val_idx1 = [tuple_val]
elif cv1 >= 2 and int(cv1) == cv1:
size_fold = int(np.ceil((n_inst - offset_cv1) / cv1))
list_val_idx1 = []
for i in range(cv1):
tuple_val = create_train_val_bitmap(n_inst, offset_cv1, size_fold * i, size_fold * (i + 1))
list_val_idx1.append(tuple_val)
else:
raise ValueError('Invalid value for the cv1 ({})'.format(cv1))
return list_val_idx1, list_val_idx2
def write_log(self):
if self.file_log is not None:
with open(self.file_log, 'a') as f:
f.write(self.__str__())
f.write('..............................................................\n')
def __str__(self):
return self.root_node.__str__()
############################
####### SPLIT TYPES ########
############################
class BaseSplit(ABC):
"""
Base class for doing a split
"""
def __repr__(self):
name = str(self.__class__).split('.')[-1][:-2]
list_params = []
for key, value in self.__dict__.items():
list_params.append(str(key) + '=' + str(value))
return name + '(' + ', '.join(list_params) + ')'
def get_split(self, X, y, idx_var, metadata):
"""From the input data X, y it generates a list of splits using the variable idx_var. Each split is represented
by a function that returns in which bin a new instance must go.
:param X: array-like, shape (n_samples, n_features)
:param y: array-like, shape (n_samples,)
:param idx_var: Index of the variable used to generate the split
:param metadata: Metadata of the dataset
:return: A list -> [(decision_fun, n_bins), ...]
"""
if metadata[idx_var]['type'] == ToPs.binary:
return self._get_split_binary(X, idx_var)
elif metadata[idx_var]['type'] == ToPs.categorical:
return self._get_split_categorical(metadata, idx_var)
elif metadata[idx_var]['type'] == ToPs.numerical:
return self._get_split_numerical(X, y, idx_var, metadata)
else:
raise NotImplementedError("There's no split for the type of variable " + metadata[idx_var])
def _get_split_binary(self, X, idx_var):
n_bins = len(np.unique(X[:, idx_var]))
decision_fun = self.DecisionFunction(self.decision_binary, idx_var)
return [(decision_fun, n_bins)]
def _get_split_categorical(self, metadata, idx_var):
raise NotImplementedError('_get_split_categorical is not implemented')
idx_modalities = metadata[idx_var]['idx_modalities']
return [(partial(self.decision_categorical, idx_modalities=idx_modalities), len(idx_modalities))]
@abstractmethod
def _get_split_numerical(self, X, y, idx_var, metadata):
raise NotImplementedError()
@staticmethod
def decision_binary(X, idx_var):
return X[:, idx_var]
@staticmethod
def decision_categorical(X, idx_modalities):
# An instance of the first modality will be associated with a 0. For the second one, a 1...
return np.dot(X[:, idx_modalities], np.arange(0, len(idx_modalities), 1))
@staticmethod
def decision_numerical(X, idx_var, thresholds):
return np.digitize(X[:, idx_var], thresholds, right=False)
class DecisionFunction:
def __init__(self, fun, idx_var):
self.fun = fun
self.idx_var = idx_var
def __call__(self, X):
return self.fun(X=X, idx_var=self.idx_var)
class SplitImpurity(BaseSplit):
"""
Class used to generate a splits base on the reduction of the gini impurity or the entropy
"""
def __init__(self, imp_fun, min_inst):
self.imp_fun = imp_fun
if min_inst < 1:
raise ValueError("min_inst must be equal or greater than 1")
self.min_inst = min_inst
def _get_split_numerical(self, X, y, idx_var, metadata):
if X.shape[0] < self.min_inst * 2 + 1:
decision_fun = self.DecisionFunction(partial(self.decision_numerical, thresholds=[np.inf]), idx_var)
return [(decision_fun, 1)]
# [(X_col, y), (X_col, y), ...] sorted by X_col
data = sorted(zip(X[:, idx_var].tolist(), y.tolist()))
n_labels = y.max() + 1
# Initialize the class distribution for the two subsets in which the data will be split
clss_dstr_1 = [0] * n_labels
for col_value, clss in data[:self.min_inst]:
clss_dstr_1[clss] += 1
clss_dstr_2 = [0] * n_labels
for col_value, clss in data[self.min_inst:]:
clss_dstr_2[clss] += 1
last_col_value = data[self.min_inst - 1][0]
best_gini = np.inf
best_col_value = np.inf
for col_value, clss in data[self.min_inst:len(data)-self.min_inst+1]:
if last_col_value != col_value:
new_gini = self.gini(clss_dstr_1) + self.gini(clss_dstr_2)
(best_gini, best_col_value) = min((best_gini, best_col_value), (new_gini, col_value))
clss_dstr_1[clss] += 1
clss_dstr_2[clss] -= 1
decision_fun = self.DecisionFunction(partial(self.decision_numerical, thresholds=[best_col_value]), idx_var)
if best_col_value == np.inf:
return [(decision_fun, 1)]
else:
return [(decision_fun, 2)]
@staticmethod
def gini(clss_distr):
N = sum(clss_distr)
return sum(n*(1-n/N) for n in clss_distr)
@staticmethod
def entropy():
raise NotImplementedError()
class SplitPercentile(BaseSplit):
"""
Class used to generate a split with n equally sized bins
"""
def __init__(self, n_bins):
self.n_bins = n_bins
if self.n_bins < 2:
raise ValueError("The variable n_bins must be greater or equal than 1")
def _get_split_numerical(self, X, y, idx_var, metadata):
col_unique = np.unique(X[:, idx_var])
# Check if we have enough elements to fill all the bins
if len(col_unique) > self.n_bins:
n_bins = self.n_bins
else:
n_bins = len(col_unique)
# Only one distinct value in the selected column. All the data in the same bin
if n_bins == 1:
decision_fun = self.DecisionFunction(partial(self.decision_numerical, thresholds=[np.inf]), idx_var)
return [(decision_fun, 1)]
# there's a more efficient method that doesn't need to sort the vector
sort_col = np.sort(col_unique)
idx_thresholds = np.array([i * len(sort_col) // n_bins for i in range(1, n_bins)])
thresholds = sort_col[idx_thresholds]
decision_fun = self.DecisionFunction(partial(self.decision_numerical, thresholds=thresholds), idx_var)
return [(decision_fun, n_bins)]
class SplitOriginal(BaseSplit):
"""
This class is used to create the split used in the original description of ToPs.
It creates 9 binary splits by generating 9 thresholds that divide the dataset into 10 subsets of almost equal size
"""
def __init__(self, list_percentile):
self.list_percentile = list_percentile
if any(percentile <= 0 or 1 <= percentile for percentile in list_percentile):
raise ValueError("The variable percentile must be between 0 and 1")
def _get_split_numerical(self, X, y, idx_var, metadata):
col_unique = np.unique(X[:, idx_var])
# Only one distinct value in the selected column. All the data in the same bin
if len(col_unique) == 1:
decision_fun = self.DecisionFunction(partial(self.decision_numerical, thresholds=[np.inf]), idx_var)
return [(decision_fun, 1)]
# there's a more efficient method that doesn't need to sort the vector
sort_col = np.sort(X[:, idx_var])
# we take only unique values, to avoid unnecessary duplicated splits
list_thresholds = sorted(set(sort_col[int(percentile * len(sort_col))] for percentile in self.list_percentile))
# We remove the first item if it has the value of the first element in the current column in X,
# because it will generate a split with an empty subset
if list_thresholds[0] <= sort_col[0]:
list_thresholds.pop(0)
list_splits = []
for threshold in list_thresholds:
decision_fun = self.DecisionFunction(partial(self.decision_numerical, thresholds=[threshold]), idx_var)
list_splits.append((decision_fun, 2))
return list_splits
class SplitKmeans(BaseSplit):
"""
Class used to create a split using k-means
"""
def __init__(self):
raise NotImplementedError()
def _get_split_numerical(self, X, y, idx_var, metadata):
raise NotImplementedError()
#####################
####### NODE ########
#####################
class Node:
"""
It represents an internal node of ToPs
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features. It can contain both numerical
and categorical features
y : array-like, shape (n_samples,)
Target vector relative to X.
list_val_idx1 : [(array_bool_train, array_bool_val), ...]
Arrays of booleans that are used to select which instances are used for train and validation 1
list_val_idx2 : [(array_bool_train, array_bool_val), ...]
Arrays of booleans that are used to select which instances are used for train and validation 2
prev_pred : [(node, idx, y_hat), ...]
Predictions of the predictors of the ancestor nodes
level : int
Depth of the node
"""
def __init__(self, X, y, tops, list_val_idx1, list_val_idx2, prev_pred, level=0):
self.X = X
self.y = y
self.tops = tops
self.list_val_idx1 = list_val_idx1
self.list_val_idx2 = list_val_idx2
if len(self.list_val_idx2) != 1:
raise ValueError('The list_val_idx2 must have only one training and validation set')
self.prev_pred = prev_pred
self.level = level
# Node from which it takes the best predictor
self.node_predictor = None
# Best predictor trained with the data in this node
self.idx_best_predictor = None
self.list_predictors = []
self.goodness1 = None
self.goodness1_display = None
self.goodness2 = None
self.sons = []
self.decision_fun = None
# self.aggr_fun = None
self.w = None
self.n_inst = X.shape[0]
# self._check_val_idx()
# A bitmap (array of bool) expressing all the instances used for validation
self.all_val_bitmap1 = np.sum([val_bitmap for _, val_bitmap in self.list_val_idx1], axis=0) > 0
self.all_val_bitmap2 = np.sum([val_bitmap for _, val_bitmap in self.list_val_idx2], axis=0) > 0
self.n_inst_val1 = self.all_val_bitmap1.sum()
self.n_inst_val2 = self.all_val_bitmap2.sum()
def fit(self):
"""
Fits the best predictor for this node. Uses the self.list_val_idx to evaluate the goodness of each predictor.
"""
self.tops.timers.start('fit_node')
self.tops.timers.start('init_fit_node')
# All the predictions made at this node using all the available predictors (initialized to nan)
node_all_pred_y = [np.zeros(len(self.y)) + np.nan for _ in range(len(self.tops.predictors_wrapped))]
self.tops.timers.stop('init_fit_node')
self.list_predictors = [copy.deepcopy(predictor) for predictor in self.tops.predictors_wrapped]
for train_bitmap, val_bitmap in self.list_val_idx1:
self.tops.timers.start('train_val')
X_train = self.X[train_bitmap]
X_val = self.X[val_bitmap]
y_train = self.y[train_bitmap]
self.tops.timers.stop('train_val')
for i, my_predictor in enumerate(self.list_predictors):
self.tops.timers.start('fit_base')
my_predictor.fit(X_train, y_train)
self.tops.timers.stop('fit_base')
self.tops.timers.start('predict_base')
y_pred = my_predictor.predict(X_val)
self.tops.timers.stop('predict_base')
node_all_pred_y[i][val_bitmap] = y_pred
# evaluate best goodness
self.tops.timers.start('eval_goodness1')
list_node_goodness = [self.tops.goodness_metric(self.y[self.all_val_bitmap1], y_pred[self.all_val_bitmap1])
for y_pred in node_all_pred_y]
best_node_goodness = max(list_node_goodness)
list_prev_goodness = [(node, idx, self.tops.goodness_metric(self.y[self.all_val_bitmap1], y_pred[self.all_val_bitmap1]))
for node, idx, y_pred in self.prev_pred]
best_parent_node, idx_best_pred, best_prev_goodness = max(list_prev_goodness, key=lambda x: x[2], default=(None, 0, -np.inf))
self.tops.timers.stop('eval_goodness1')
# use the classifier in that node
if best_node_goodness > best_prev_goodness:
self.idx_best_predictor = list_node_goodness.index(best_node_goodness)
# the node that contains the best predictor for this node is itself
self.node_predictor = self
self.goodness1 = best_node_goodness * self.all_val_bitmap1.sum()
self.goodness1_display = best_node_goodness
# Update prev_pred
self.prev_pred.extend((self, idx, node_all_pred_y[idx]) for idx in range(len(self.list_predictors)))
# use the classifier from a parent node
else:
self.node_predictor = best_parent_node
self.idx_best_predictor = idx_best_pred
self.goodness1 = best_prev_goodness * self.all_val_bitmap1.sum()
self.goodness1_display = best_prev_goodness
self.tops.timers.stop('fit_node')
def split(self):
"""
Finds the split that gives the best goodness for the generated sons. If the goodness of the sons improve the
one from the parent node, the split continues recursively. Otherwise, it stops the recursion.
"""
# retrain with the training and V1 data
for predictor in self.list_predictors:
idx = self.list_val_idx2[0][0]
predictor.fit(self.X[idx], self.y[idx])
# stop criteria
if self.X.shape[0] < self.tops.min_inst or len(unique_labels(self.y)) == 1:
return
self.tops.timers.start('split_node')
list_best_sons = [self.DummyNode()]
best_decision_fun = None
for col in range(self.X.shape[1]):
# Conditions to stop spliting with this feature
if self.tops.metadata[col]['type'] == ToPs.categorical:
# We only do the split with the first column representing a modality of a categorical feature
if col != self.tops.metadata[col]['idx_modalities'][0]:
continue
self.tops.timers.start('get_split')
list_splits = self.tops.split_type.get_split(self.X, self.y, col, self.tops.metadata)
self.tops.timers.stop('get_split')
for decision_fun, n_bins in list_splits:
# We cannot continue spliting since all the instances will go to the same bin
if n_bins == 1:
continue
X_idx = decision_fun(self.X)
list_sons = []
for i in range(n_bins):
# here I am creating deep copies
belongs_to_son = X_idx == i
# if the number of variables is high this step can be costly
self.tops.timers.start('x_y_son')
X_son = self.X[belongs_to_son]
y_son = self.y[belongs_to_son]
self.tops.timers.stop('x_y_son')
# we don't create a new tentative son if it doesn't contain any data
if len(y_son) == 0:
continue
self.tops.timers.start('idx_val_son')
list_val_idx1_son = [(train_bitmap[belongs_to_son], val_bitmap[belongs_to_son])
for train_bitmap, val_bitmap in self.list_val_idx1]
list_val_idx2_son = [(train_bitmap[belongs_to_son], val_bitmap[belongs_to_son])
for train_bitmap, val_bitmap in self.list_val_idx2]
# TODO Parametrize the minimum allowed data for the validations
not_enough_train1 = any(train_bitmap.sum() == 0 for train_bitmap, _ in list_val_idx1_son)
not_enough_train2 = any(train_bitmap.sum() == 0 for train_bitmap, _ in list_val_idx2_son)
not_enough_v1 = sum(val_bitmap.sum() for _, val_bitmap in list_val_idx1_son) < self.tops.min_inst_val
not_enough_v2 = sum(val_bitmap.sum() for _, val_bitmap in list_val_idx2_son) < self.tops.min_inst_val
self.tops.timers.stop('idx_val_son')
if not_enough_train1 or not_enough_train2 or not_enough_v1 or not_enough_v2:
dummy_son = self.DummyNode()
list_sons = [dummy_son]
break
# calculate in which son the previous predictions will go
prev_pred_son = [(node, idx, pred[belongs_to_son]) for node, idx, pred in self.prev_pred]
son = Node(X_son, y_son, self.tops, list_val_idx1_son, list_val_idx2_son,
prev_pred_son, self.level + 1)
son.fit()
list_sons.append(son)
# Update the list of best sons found until now
goodness_sons = sum((son.goodness1 for son in list_sons))
goodness_best_sons = sum((son.goodness1 for son in list_best_sons))
if goodness_sons > goodness_best_sons:
list_best_sons = list_sons
best_decision_fun = decision_fun
goodness_best_sons = sum((son.goodness1 for son in list_best_sons))
if self._hoeffding_criteria(goodness_best_sons):
self.sons = list_best_sons
self.decision_fun = best_decision_fun
for son in self.sons:
son.split()
self.tops.timers.stop('split_node')
def _hoeffding_criteria(self, goodness_sons):
goodness_sons = goodness_sons / self.n_inst_val1 + 0.000001
goodness_parent = self.goodness1 / self.n_inst_val1 + 0.000001
epsilon = goodness_sons - goodness_parent
if epsilon <= 0:
return False
if 1 - goodness_parent > 1/6:
# hoeffding
delta1 = np.exp(-2 * epsilon**2 * self.n_inst_val1)
delta2 = delta1
else:
# chernoff
delta1 = np.exp(- epsilon**2 * self.n_inst_val1 / (3 * (1 - goodness_parent)))
delta2 = np.exp(- epsilon**2 * self.n_inst_val1 / (3 * (1 - goodness_sons)))
prob_son_better_than_parent = (1 - delta1/2) * (1 - delta2/2)
return prob_son_better_than_parent > self.tops.min_prob_hoeffding
def aggregate(self, list_prev_y_prob):
"""Creates an ensemble for each path from the root to a leaf. The ensemble is created by aggregating
the predictors found along the path using a weighted average
:param list_prev_y_prob: List of probabilities calculated in the previous nodes
"""
self.tops.timers.start('aggregate_node')
# A bitmap (array of bool) expressing all the instances used for validation
all_val_bitmap = np.sum([val_bitmap for _, val_bitmap in self.list_val_idx2], axis=0) > 0
# Probabilities calculated in this node
y_pred_prob_local = np.zeros((len(self.y), self.tops.n_classes))
for train_bitmap, val_bitmap in self.list_val_idx2:
# X_train = self.X[train_bitmap]
X_val = self.X[val_bitmap]
# y_train = self.y[train_bitmap]
best_pred = self.node_predictor.list_predictors[self.idx_best_predictor]
# best_pred.fit(X_train, y_train)
prob = best_pred.predict_proba(X_val)
for i, clss in enumerate(best_pred.classes_):
# TODO predict for regression and predict_proba for classification
y_pred_prob_local[val_bitmap, clss] = prob[:, i]
list_prev_y_prob.append((self, y_pred_prob_local))
# Calculate the goodness over the V2 set. Only for informative purposes
self.goodness2 = self.tops.goodness_metric(self.y[all_val_bitmap],
y_pred_prob_local[all_val_bitmap].argmax(axis=1))
for predictor in self.list_predictors:
# Train the predictors with all the available data
predictor.fit(self.X, self.y)
if len(self.sons) > 0:
X_idx = self.decision_fun(self.X)
for i in range(len(self.sons)):
idx_son = X_idx == i
list_prev_y_prob_son = [(node, node_y_pred[idx_son]) for node, node_y_pred in list_prev_y_prob]
self.sons[i].aggregate(list_prev_y_prob_son)
else:
# Calculate the aggregation function
list_y_prob = [y_prob[all_val_bitmap] for _, y_prob in list_prev_y_prob]
y_true = self.y[all_val_bitmap]
self._calculate_aggr_fun(list_y_prob, y_true)
# At the end of the aggregation process, check the node is consistent
self._check_node()
self.tops.timers.stop('aggregate_node')
def _calculate_aggr_fun(self, list_y_prob, y_true):
n_inst = list_y_prob[0].shape[0]
n_clss = list_y_prob[0].shape[1]
mask = np.array([list(range(n_clss))] * n_inst) == np.reshape(y_true, (n_inst, 1))
len_w = len(list_y_prob)
# This restriction is the same as sum(w) = 1
# constr = LinearConstraint(np.ones((1, len_w)), 1, 1)
constr = [{'type': 'eq', 'fun': lambda w: w.sum() - 1}]
# Initial guess for w
w0 = np.ones(len_w) / len_w
# The individual w has to be between 0 and 1
bounds = Bounds(0, 1)
# TODO Add gradient function
# Objective function (minimization) // only for accuracy
c = np.array([y_prob[mask] for y_prob in list_y_prob]).transpose() + 0.0001
res = minimize(partial(self._min_fun, c), w0, constraints=constr, bounds=bounds)
self.w = res['x']
@staticmethod
def _min_fun(c, w):
aggr_prob = c.dot(w)
log_prob = np.log(aggr_prob)
# Tambe podria utilitzar l'arrel cubica centrant el 0.5 a 0
return -log_prob.sum()
def predict_proba(self, X, list_y_prob):
"""
:param X: (n_inst x n_vars) Dataset to predict
:param list_y_prob: [np.array_prev_pred, ...] Predictions from ancestor nodes
:return:
"""
y_pred_prob_local = np.zeros((X.shape[0], self.tops.n_classes))
y_pred_prob_aux = self.node_predictor.list_predictors[self.idx_best_predictor].predict_proba(X)
for i, clss in enumerate(self.node_predictor.list_predictors[self.idx_best_predictor].classes_):
y_pred_prob_local[:, clss] = y_pred_prob_aux[:, i]
list_y_prob.append(y_pred_prob_local)
# This is a terminal node
if self.w is not None:
aggr_prob = np.dot(np.array(list_y_prob).transpose(), self.w).transpose()
# return aggr_prob.argmax(axis=1)
return aggr_prob
# This is not a terminal node
# y_pred could be passed by parameter and modified inside the function (a little bit more efficient)
y_pred = np.zeros((X.shape[0], self.tops.n_classes))
X_idx = self.decision_fun(X)
for i in range(len(self.sons)):
idx_son = X_idx == i
X_son = X[idx_son]
if X_son.shape[0] > 0:
list_y_pred_son = [y_prev_pred[idx_son] for y_prev_pred in list_y_prob]
y_pred_son = self.sons[i].predict_proba(X_son, list_y_pred_son)
y_pred[idx_son] = y_pred_son
return y_pred
def _is_leaf(self):
return len(self.sons) == 0
def _check_node(self):
if self._is_leaf():
assert len(self.sons) == 0 and len(self.w) == (self.level + 1) and self.decision_fun is None
else:
assert len(self.sons) > 1 and self.w is None and self.decision_fun is not None
assert len(self.list_predictors) > 0
assert self.node_predictor is not None
assert 0 <= self.idx_best_predictor < len(self.node_predictor.list_predictors)
def clear_data(self):
self.X = None
self.y = None
self.all_val_bitmap1 = None
self.all_val_bitmap2 = None
for son in self.sons:
son.clear_data()
def __str__(self):
# if the node is a leaf, print also the weights of the path
if self.w is not None:
weights = self.level * '\t' + 'Weights: ' + str(self.w) + '\n'
else:
weights = ''
predictor = self.node_predictor.list_predictors[self.node_predictor.idx_best_predictor]
if self.decision_fun is not None:
split_var_str = self.level * '\t' + 'Split var: ' + str(self.decision_fun.idx_var) + '\n'
else:
split_var_str = ''
str_tree = 'nInst: ' + str(self.n_inst) + '\n' + \
self.level * '\t' + 'Predictor: ' + str(predictor)[:30] + '\n' + \
split_var_str + \
self.level * '\t' + 'From node: ' + str(self.node_predictor.level) + '\n' + \
self.level * '\t' + 'Goodness1: ' + str(self.goodness1_display) + '\n' + \
self.level * '\t' + 'Goodness2: ' + str(self.goodness2) + '\n' + \
weights
for i in range(len(self.sons)):
str_tree += (self.level + 1) * '\t' + str(i) + ' -> ' + self.sons[i].__str__()
return str_tree
class DummyNode:
def __init__(self):
self.goodness1 = -np.inf
class WrapperPredictor(BaseEstimator):
"""
Class to wrap a predictor
"""
def __init__(self, predictor):
self.predictor = predictor
self.classes_ = None
def fit(self, X, y):
self.classes_ = unique_labels(y)
if len(self.classes_) > 1:
self.predictor.fit(X, y)
# assert all(self.classes == self.predictor.classes_)
return self
def predict(self, X):
if len(self.classes_) == 1:
return np.ones(X.shape[0]) * self.classes_[0]
else:
return self.predictor.predict(X)
def predict_proba(self, X):
if len(self.classes_) == 1:
aux_prob = np.ones((X.shape[0], 1))
return aux_prob
else:
return self.predictor.predict_proba(X)
def __str__(self):
return self.predictor.__str__()
```
#### File: tops/utils/timing.py
```python
from time import time
class TimeCounter:
def __init__(self):
self.temp_time = 0
self.acum_time = 0
self.n_starts = 0
def reset(self):
self.temp_time = 0
self.acum_time = 0
self.n_starts = 0
def start(self):
if self.n_starts == 0:
self.temp_time = time()
self.n_starts += 1
def stop(self):
if self.n_starts == 1:
self.acum_time += time() - self.temp_time
if self.n_starts > 0:
self.n_starts -= 1
def get_time(self):
return self.acum_time
class TimeCounterCollection:
def __init__(self, list_names):
self.tc_collection = dict((name, TimeCounter()) for name in list_names)
def reset(self, name):
self.tc_collection[name].reset()
def reset_all(self):
for tc in self.tc_collection.values():
tc.reset()
def start(self, name):
self.tc_collection[name].start()
def stop(self, name):
self.tc_collection[name].stop()
def print_times(self):
for name, time_counter in self.tc_collection.items():
print(name, '->', time_counter.get_time())
``` |
{
"source": "jordiplanascuchi/policy-data-analyzer",
"score": 2
} |
#### File: Scrapy/scrapy_official_newspapers/pipelines.py
```python
import hashlib
import os
from scrapy.utils.python import to_bytes
from scrapy.exporters import CsvItemExporter
from sqlalchemy.orm import sessionmaker
from scrapy_official_newspapers.models import Policy, Processing, db_connect, create_table
class ScrapyOfficialNewspapersMySQLPipeline:
def __init__(self):
engine = db_connect()
create_table(engine)
self.session = sessionmaker(bind=engine)
def process_item(self, item, spider):
session = self.session()
processing = Processing(s3_raw=hashlib.sha1(to_bytes(item['doc_url'])).hexdigest())
session.add(processing)
policy = Policy(
country=item['country'],
geo_code=item['geo_code'],
level=item['level'],
data_source=item['data_source'],
title=item['title'],
reference=item['reference'],
authorship=item['authorship'],
resume=item['resume'],
publication_date=item['publication_date'],
enforcement_date=item['enforcement_date'],
url=item['url'],
doc_url=item['doc_url'],
doc_name=item['doc_name'],
doc_type=item['doc_type'],
doc_class=item['doc_class'],
processing = processing
)
session.merge(policy)
print(policy)
session.commit()
from itemadapter import ItemAdapter
from scrapy.exporters import CsvItemExporter
class ScrapyOfficialNewspapersPipeline:
def __init__(self):
dir = "./"
file_name = "Scraped_Documents_local.csv"
file = dir + file_name
self.file = open(file, 'ab')
self.exporter_1 = CsvItemExporter(self.file, include_headers_line = False, encoding = 'Latin1')
self.exporter_2 = CsvItemExporter(self.file, include_headers_line = False, encoding = 'utf-8')
self.exporter_1.start_exporting()
self.exporter_2.start_exporting()
def close_spider(self, spider):
self.exporter_1.finish_exporting()
self.exporter_2.finish_exporting()
self.file.close()
def process_item(self, item, spider):
try:
self.exporter_1.export_item(item)
except:
self.exporter_2.export_item(item)
return item
``` |
{
"source": "jordipons/elmarc",
"score": 3
} |
#### File: elmarc/src/datasets.py
```python
import pandas as pd
def path2gt(file_path, dataset):
if dataset == 'GTZAN':
return gtzan_path2gt(file_path)
elif dataset == 'Ballroom':
return ballroom_path2gt(file_path)
elif dataset == 'ExtendedBallroom':
return extended_ballroom_path2gt(file_path)
elif dataset == 'UrbanSound8K':
return urban_sound_path2gt(file_path)
else:
import ipdb; ipdb.set_trace()
# GTZAN
def gtzan_path2gt(file_path):
tag = file_path[file_path.rfind('/')+1:file_path.rfind('.', 0, -4)]
print(tag)
if tag == 'blues':
return 0
elif tag == 'classical':
return 1
elif tag == 'country':
return 2
elif tag == 'disco':
return 3
elif tag == 'hiphop':
return 4
elif tag == 'jazz':
return 5
elif tag == 'metal':
return 6
elif tag == 'pop':
return 7
elif tag == 'reggae':
return 8
elif tag == 'rock':
return 9
else:
print('Warning: did not find the corresponding ground truth (' + str(tag) + ').')
import ipdb; ipdb.set_trace()
# BALLROOM
def ballroom_path2gt(file_path):
cut_end = file_path[:file_path.rfind('/')]
tag = cut_end[cut_end.rfind('/')+1:]
print(tag)
if tag == 'ChaChaCha':
return 0
elif tag == 'Jive':
return 1
elif tag == 'Quickstep':
return 2
elif tag == 'Rumba':
return 3
elif tag == 'Samba':
return 4
elif tag == 'Tango':
return 5
elif tag == 'VienneseWaltz':
return 6
elif tag == 'Waltz':
return 7
else:
print('Warning: did not find the corresponding ground truth (' + str(tag) + ').')
import ipdb; ipdb.set_trace()
# EXTENDED BALLROOM
def extended_ballroom_path2gt(file_path):
cut_end = file_path[:file_path.rfind('/')]
tag = cut_end[cut_end.rfind('/')+1:]
print(tag)
if tag == 'Chacha':
return 0
elif tag == 'Foxtrot':
return 1
elif tag == 'Jive':
return 2
elif tag == 'Pasodoble':
return 3
elif tag == 'Quickstep':
return 4
elif tag == 'Rumba':
return 5
elif tag == 'Salsa':
return 6
elif tag == 'Samba':
return 7
elif tag == 'Slowwaltz':
return 8
elif tag == 'Tango':
return 9
elif tag == 'Viennesewaltz':
return 10
elif tag == 'Waltz':
return 11
elif tag == 'Wcswing':
return 12
else:
print('Warning: did not find the corresponding ground truth (' + str(tag) + ').')
import ipdb; ipdb.set_trace()
# URBAN SOUND 8K
def urban_sound_path2gt(file_path):
tag = file_path[file_path.rfind('/')+1:]
print(tag)
df = pd.read_csv('/datasets/MTG/users/jpons/urban_sounds/UrbanSound8K/metadata/UrbanSound8K.csv')
return int(df[df.slice_file_name==tag].classID)
```
#### File: elmarc/src/main.py
```python
import numpy as np
import tensorflow as tf
import librosa
import pickle
import time
import os
import datasets
import dl_models
import pandas as pd
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.cross_validation import PredefinedSplit
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.svm import LinearSVC
from elm import GenELMClassifier
from random_layer import RandomLayer
from sklearn import linear_model
from config_file import config_main
config = config_main
svm_params = [
{'kernel': ['rbf'],
'gamma': [1 / (2 ** 3), 1 / (2 ** 5), 1 / (2 ** 7), 1 / (2 ** 9), 1 / (2 ** 11), 1 / (2 ** 13), 'auto'],
'C': [0.1, 2.0, 8.0, 32.0]},
{'kernel': ['linear'],
'C': [0.1, 2.0, 8.0, 32.0]}
]
if config['model_type'] == 'linearSVM':
hyperparameters = [0.1, 1.0, 2.0]
elif config['model_type'] == 'ELM':
hyperparameters = [100, 250, 500, 1200, 1800, 2500] # remove 100 and 250
elif config['model_type'] == 'KNN':
hyperparameters = [1,3,5,10,20,30,50,100]
#------#
# CNNs #
#------#
def count_params(trainable_variables):
"Return number of trainable variables, specifically: tf.trainable_variables()"
return np.sum([np.prod(v.get_shape().as_list()) for v in trainable_variables])
def iterate_minibatches(prefix, audio_paths_list, batchsize):
total_size = len(audio_paths_list)
n_leftover = int(total_size % batchsize)
leftover = n_leftover != 0
for start_i in range(0, len(audio_paths_list) - batchsize + 1, batchsize):
first = True
ground_truth = []
data_names = []
for i in range(start_i,start_i + batchsize,1):
file_path = prefix + audio_paths_list[i]
file_path = file_path[:-1] # remove /n
print(str(i) + ': ' + file_path)
if first:
data = compute_input(file_path,config['sampling_rate'])
first = False
else:
data = np.append(data,compute_input(file_path,config['sampling_rate']), axis=0)
ground_truth.append(datasets.path2gt(file_path, config['dataset']))
data_names.append(file_path)
yield data, ground_truth, data_names
if leftover:
first = True
ground_truth = []
data_names = []
for i in range(start_i + batchsize, start_i + batchsize + n_leftover,1):
file_path = prefix + audio_paths_list[i]
file_path = file_path[:-1] # remove /n
print(str(i) + ': ' + file_path)
if first:
data = compute_input(file_path,config['sampling_rate'])
first = False
else:
data = np.append(data,compute_input(file_path,config['sampling_rate']), axis=0)
ground_truth.append(datasets.path2gt(file_path, config['dataset']))
data_names.append(file_path)
yield data, ground_truth, data_names
def format_cnn_data(prefix, list_audios):
l_audios = open(list_audios, 'r')
audio_paths_list = []
for s in l_audios:
audio_paths_list.append(s)
X = []
Y = []
ID = []
for batch in iterate_minibatches(prefix, audio_paths_list, config['CNN']['batch_size']):
# feature_maps[i][j, k, l, m]
# i: layer where extracted the feature
# j: batch-sample dimension
# k: one feature-map axis
# l: other feature-map axis
# m: feature-map
feature_maps = sess.run(features_definition, feed_dict={x_in: batch[0]})
for j in range(batch[0].shape[0]): # for every song in a batch
tmp_features = np.zeros((len(feature_maps),feature_maps[0].shape[-1]))
for i in range(len(feature_maps)): # for every bunch of extracted features
for m in range(feature_maps[i].shape[-1]): # for every feature-map
if len(feature_maps[i].shape) == 3: # compute mean 1D-feature map
tmp_features[i, m] = np.mean(np.squeeze(feature_maps[i][j, :, m]))
elif len(feature_maps[i].shape) == 4: # compute mean 2D-feature map
tmp_features[i, m] = np.mean(np.squeeze(feature_maps[i][j, :, :, m]))
X.append(tmp_features)
Y.append(batch[1][j])
ID.append(batch[2][j])
print('Shape Annotations: ' + str(np.array(Y).shape))
print('Shape X: ' + str(np.array(X).shape))
print('# IDs: ' + str(np.array(ID).shape[0]))
return X, Y, ID
def compute_input(audio_path, sampling_rate):
# compute spectrogram
audio, sr = librosa.load(audio_path, sr=sampling_rate)
if config['CNN']['signal'] == 'spectrogram':
audio_rep = librosa.feature.melspectrogram(y=audio,
sr=sampling_rate,
hop_length=256,
n_fft=512,
n_mels=config['CNN']['n_mels'],
power=2,
fmin=0.0,
fmax=6000.0).T
# normalize audio representation
print(audio_rep.shape)
src = librosa.core.logamplitude(audio_rep)
# zero-pad, repeat-pad and corpping are different in CNNs for having fixed-lengths patches in CNNs
if len(src) < config['CNN']['n_frames']:
if config['fix_length_by'] == 'zero-pad':
print('Zero padding!')
src_zeros = np.zeros((config['CNN']['n_frames'],config['CNN']['n_mels']))
src_zeros[:len(src)] = src
src = src_zeros
elif config['fix_length_by'] == 'repeat-pad':
print('Repeat and crop to the fixed_length!')
src_repeat = src
while (src_repeat.shape[0] < config['CNN']['n_frames']):
src_repeat = np.concatenate((src_repeat, src), axis=0)
src = src_repeat
src = src[:config['CNN']['n_frames'], :]
else:
print('Cropping audio!')
src = src[:config['CNN']['n_frames'], :]
elif config['CNN']['signal'] == 'waveform':
# zero-pad, repeat-pad and corpping are different in CNNs for having fixed-lengths patches in CNNs
if len(audio) < config['CNN']['n_samples']:
if config['fix_length_by'] == 'zero-pad':
print('Zero padding!')
src_zeros = np.zeros(config['CNN']['n_samples'])
src_zeros[:len(audio)] = audio
src = src_zeros
elif config['fix_length_by'] == 'repeat-pad':
print('Repeat and crop to the fixed_length!')
src_repeat = audio
while (len(src_repeat) < config['CNN']['n_samples']):
src_repeat = np.concatenate((src_repeat, audio), axis=0)
src = src_repeat
src = src[:config['CNN']['n_samples']]
else:
print('Cropping audio!')
src = audio[:config['CNN']['n_samples']]
src = np.expand_dims(src, axis=1) # let the matrix be
audio_rep = np.expand_dims(src, axis=0) # let the tensor be
return audio_rep
def select_cnn_feature_layers(feature_maps, selected_features_list):
selected_features = []
for i in range(len(feature_maps)):
tmp = np.array([])
for j in selected_features_list:
tmp = np.concatenate((tmp, np.squeeze(feature_maps[i][j])))
selected_features.append(tmp)
return selected_features
#-------#
# MFCCs #
#-------#
def extract_mfcc_features(audio, sampling_rate=12000):
src, sr = librosa.load(audio, sr=sampling_rate)
# zero-pad, repeat-pad and corpping are different in CNNs for having fixed-lengths patches in CNNs
if config['fix_length_by'] == 'zero-pad' and len(src) < config['MFCC']['fixed_length']:
print('Zero padding!')
src_zeros = np.zeros(config['MFCC']['fixed_length']) # min length to have 3-frame mfcc's
src_zeros[:len(src)] = src
src = src_zeros
elif config['fix_length_by'] == 'repeat-pad' and len(src) < config['MFCC']['fixed_length']:
print('Repeat padding!')
src_repeat = src
while (len(src_repeat) < config['MFCC']['fixed_length']):
src_repeat = np.concatenate((src_repeat, src), axis=0)
src = src_repeat
elif config['fix_length_by'] == 'crop':
print('Cropping audio!')
src = src[:config['MFCC']['fixed_length']]
print(len(src))
# dmfcc as in https://github.com/keunwoochoi/transfer_learning_music/
mfcc = librosa.feature.mfcc(src, sampling_rate, n_mfcc=config['MFCC']['number'])
dmfcc = mfcc[:, 1:] - mfcc[:, :-1]
ddmfcc = dmfcc[:, 1:] - dmfcc[:, :-1]
return np.concatenate((np.mean(mfcc, axis=1), np.std(mfcc, axis=1),
np.mean(dmfcc, axis=1), np.std(dmfcc, axis=1),
np.mean(ddmfcc, axis=1), np.std(ddmfcc, axis=1)),
axis=0)
def format_mfcc_data(prefix, list_audios):
songs_list = open(list_audios, 'r')
X = []
Y = []
ID = []
n_song = 0
for song in songs_list:
print(str(n_song) + ': ' + song[:-1])
X.append(extract_mfcc_features(prefix + song[:-1], config['sampling_rate']))
Y.append(datasets.path2gt(song[:-1], config['dataset']))
ID.append(song[:-1])
n_song += 1
print(Y)
print(np.array(X).shape)
return X, Y, ID
#-----------------------#
# CLASSIFICATION MODELS #
#-----------------------#
def define_classification_model(h):
if config['model_type'] == 'linearSVM':
return LinearSVC(C=h)
elif config['model_type'] == 'ELM':
rl = RandomLayer(n_hidden=h,activation_func='reclinear',alpha=1)
return GenELMClassifier(hidden_layer = rl)
elif config['model_type'] == 'MLP':
return MLPClassifier(hidden_layer_sizes=(20,), max_iter=600, verbose=10, early_stopping=False)
elif config['model_type'] == 'linear':
return linear_model.SGDClassifier()
elif config['model_type'] == 'KNN':
return KNeighborsClassifier(n_neighbors=h)
if __name__ == '__main__':
#--------------------#
# FEATURE EXTRACTION #
#--------------------#
print(config)
print('Set file name (unique identifier) for the experiment..')
if config['features_type'] == 'MFCC':
experiment_name = str(config['experiment_name']) + '_MFCC_' + str(int(time.time()))
elif config['features_type'] == 'CNN':
experiment_name = str(config['experiment_name']) + '_CNN_' \
+ '_' + str(config['model_type']) \
+ '_' + str(config['CNN']['signal']) \
+ '_' + str(config['CNN']['architecture']) \
+ '_' + str(config['CNN']['selected_features_list']) + '_'+ str(int(time.time()))
print(experiment_name)
if not config['load_extracted_features']: # extract features: MFCC of CNN
print('Extracting features..')
if config['features_type'] == 'CNN':
if config['CNN']['signal'] == 'spectrogram':
x_in = tf.placeholder(tf.float32, [None, None, config['CNN']['n_mels']])
elif config['CNN']['signal'] == 'waveform':
x_in = tf.placeholder(tf.float32, [None, config['CNN']['n_samples'], 1])
features_definition = dl_models.build(config, x_in)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
print('Number of parameters of the model: ' + str(
count_params(tf.trainable_variables()))+'\n')
if config['audios_list'] == False:
print('Extract features for train-set..')
x_train, y_train, id_train = format_cnn_data(prefix=config['audio_path'],
list_audios=config['train_set_list'])
print('Extract features for val-set..')
x_val, y_val, id_val = format_cnn_data(prefix=config['audio_path'],
list_audios=config['val_set_list'])
print('Extract features for test-set..')
x_test, y_test, id_test = format_cnn_data(prefix=config['audio_path'],
list_audios=config['test_set_list'])
else:
x, y, ids = format_cnn_data(prefix=config['audio_path'],
list_audios=config['audios_list'])
elif config['features_type'] == 'MFCC':
if config['audios_list'] == False:
print('Extract features for train-set..')
x_train, y_train, id_train = format_mfcc_data(prefix=config['audio_path'],
list_audios=config['train_set_list'])
print('Extract features for val-set..')
x_val, y_val, id_val = format_mfcc_data(prefix=config['audio_path'],
list_audios=config['val_set_list'])
print('Extract features for test-set..')
x_test, y_test, id_test = format_mfcc_data(prefix=config['audio_path'],
list_audios=config['test_set_list'])
else:
x, y, ids = format_mfcc_data(prefix=config['audio_path'],
list_audios=config['audios_list'])
print('Storing extracted features..')
if not os.path.exists(config['save_extracted_features_folder']):
os.makedirs(config['save_extracted_features_folder'])
if config['audios_list'] == False:
with open(config['save_extracted_features_folder'] + experiment_name + '.pkl', 'wb') as f:
pickle.dump([x_train, y_train, id_train, x_val, y_val, id_val, x_test, y_test, id_test, config], f)
else:
with open(config['save_extracted_features_folder'] + experiment_name + '.pkl', 'wb') as f:
pickle.dump([x, y, ids, config], f)
else: # load extracted features
print('Loading features: ' + config['load_extracted_features'])
if config['audios_list'] == False:
with open(config['load_extracted_features'], 'rb') as f:
x_train, y_train, id_train, x_val, y_val, id_val, x_test, y_test, id_test, config = pickle.load(f)
else:
with open(config['load_extracted_features'], 'rb') as f:
x, y, ids, config = pickle.load(f)
if config['features_type'] == 'CNN':
print('Select CNN features..')
print('Data size (data points, feature vector)..')
if config['audios_list'] == False:
x_train = select_cnn_feature_layers(x_train, config['CNN']['selected_features_list'])
x_val = select_cnn_feature_layers(x_val, config['CNN']['selected_features_list'])
x_test = select_cnn_feature_layers(x_test, config['CNN']['selected_features_list'])
print(np.array(x_train).shape)
print(np.array(x_val).shape)
print(np.array(x_test).shape)
else:
x = select_cnn_feature_layers(x, config['CNN']['selected_features_list'])
print(np.array(x).shape)
#------------#
# CLASSIFIER #
#------------#
if not os.path.exists(config['results_folder']):
os.makedirs(config['results_folder'])
f = open(config['results_folder'] + experiment_name + '.txt','w')
if config['audios_list'] == False:
print('train/val/test partitions are pre-defined!')
if config['model_type'] == 'SVM':
# hyperparameter search in val set
x_dev = np.concatenate((x_train, x_val), axis=0)
y_dev = np.concatenate((y_train, y_val), axis=0)
val_mask = np.concatenate((-np.ones(len(y_train)), np.zeros(len(y_val))), axis=0)
ps = PredefinedSplit(test_fold=val_mask)
svc = SVC()
hps = GridSearchCV(svc, svm_params, cv=ps, n_jobs=3, pre_dispatch=3*8, verbose=config['SVM_verbose']).fit(x_dev, y_dev)
print('Best hyperparameter: ' + str(hps.best_params_))
# define final model
model = SVC()
model.set_params(**hps.best_params_)
else:
score_max = 0
h_max = -1
for h in hyperparameters: # select best params in validation set
print('Now in: ' + str(h))
model = define_classification_model(h)
model.fit(x_train, y_train)
score = accuracy_score(y_val, model.predict(x_val))
print('- Score: ' + str(score))
if score > score_max:
score_max = score
h_max = h
print('Accuracy val set: ' + str(score_max))
print('Best hyperparameter: ' + str(h_max))
f.write('Accuracy val set: ' + str(score_max) + '\n')
f.write('Best hyperparameter: ' + str(h_max))
model = define_classification_model(h_max)
# train model with best hyperparameters and evaluate in test set
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print('Detailed classification report: ')
print(classification_report(y_test, y_pred))
print('Accuracy test set: ')
print(accuracy_score(y_test, y_pred))
print(config)
print('Storing results..')
f.write(str(classification_report(y_test, y_pred)))
f.write('Accuracy test set: ' + str(accuracy_score(y_test, y_pred)) + '\n')
f.write(str(config))
else:
print('10 fold cross-validation!')
if config['dataset'] == 'UrbanSound8K':
print('UrbanSound8K dataset with pre-defined splits!')
df = pd.read_csv('/datasets/MTG/users/jpons/urban_sounds/UrbanSound8K/metadata/UrbanSound8K.csv')
folds_mask = []
for i in ids:
tag = i[i.rfind('/')+1:]
folds_mask.append(int(df[df.slice_file_name==tag].fold))
ps = PredefinedSplit(test_fold=folds_mask)
else:
ps = 10
if config['model_type'] == 'SVM':
svc = SVC()
model = GridSearchCV(svc, svm_params, cv=ps, n_jobs=3, pre_dispatch=3*8, verbose=config['SVM_verbose']).fit(x, y)
print('[SVM] Best score of ' + str(model.best_score_) + ': ' + str(model.best_params_))
f.write('[SVM] Best score of ' + str(model.best_score_) + ': ' + str(model.best_params_))
else:
score_max = 0
h_max = -1
for h in hyperparameters:
print('Now in: ' + str(h))
model = define_classification_model(h)
scores = cross_val_score(model, x, y, cv=ps, scoring='accuracy')
print('- Score: ' + str(scores.mean()))
if scores.mean() > score_max:
h_max = h
score_max = scores.mean()
print(config['model_type'] + ' - score of best model: ' + str(score_max) + ' with ' + str(h_max))
f.write(config['model_type'] + ' - score of best model: ' + str(score_max) + ' with ' + str(h_max))
print(config)
f.write(str(config))
f.close()
# NOTES ON SPECTROGRAM. Mel power spectrogram. Sampling rate: 12k. fmin=0 and fmax=6000. Using shorter clips.
# IDEAS: - Check statistics of input data (zero-mean/one-var)?
# - Only store mean values for features?
``` |
{
"source": "jordipons/EUSIPCO2017",
"score": 3
} |
#### File: EUSIPCO2017/src/spectrograms.py
```python
import os
import librosa
from joblib import Parallel, delayed
import pickle
import glob
import json
import sys
import common
"""
spectrograms.py: computes spectrograms.
Requires pre-precomputing an 'index_file',a .tsv where an index with id,path is defined for a dataset.
The results and parameters of this script are stored in common.DATA_FOLDER/spectrograms/
'index.tsv' stores the 'id,path_spectrogram,path_audio'.
'path_spectrgram' and 'path_audio' absolute reference from common.DATA_FOLDER.
Step 1/5 of the pipeline.
"""
config = {
'spectrograms_code_version': 'eusipco2017',
'audio_folder' : 'audio/MagnaTagATune/', # end it with / !!!
'spectrograms_name' : 'MagnaTT_dieleman_spectrograms',
'original_sr' : 16000,
'resample_sr' : 16000, # if one does not wish to resample, set resample_sr=original_sr
'hop' : 256,
'spectrogram_type' : 'mel',
'n_fft' : 512,
'n_mels' : 128,
'convert_id' : False, # converts the (path) name of a file to its ID name - correspondence in index_file.
'index_file' : 'index/index_MagnaTT.tsv', # index to be converted. THIS IS THE LIST THAT ONE WILL COMPUTE
'audio_ext' : ['mp3'] # in list form
}
# Choi et al.: 'original_sr' : 16000, 'resample_sr' : 12000, 'hop' : 256, 'spectrogram_type' : 'mel', 'n_fft' : 512, 'n_mels' : 96.
# Dieleman et al.: 'original_sr' : 16000, 'resample_sr' : 16000, 'hop' : 256, 'spectrogram_type' : 'mel', 'n_fft' : 512, 'n_mels' : 128.
num_process = 8
compute_spectro = True
path2id = dict()
def compute_spec(audio_file,spectro_file):
# Get actual audio
audio, sr = librosa.load(audio_file, sr=config['original_sr'])
# resample?
if config['original_sr']!=config['resample_sr']:
audio = librosa.resample(audio, sr, config['resample_sr'])
sr=config['resample_sr']
# Compute spectrogram
if config['spectrogram_type']=='cqt':
spec = librosa.cqt(audio, sr=sr, hop_length=config['hop'], n_bins=config['cqt_bins'], real=False)
elif config['spectrogram_type']=='mel':
spec = librosa.feature.melspectrogram(y=audio, sr=sr, hop_length=config['hop'],n_fft=config['n_fft'],n_mels=config['n_mels'])
elif config['spectrogram_type']=='stft':
spec = librosa.stft(y=audio,n_fft=config['n_fft'])
# Write results:
with open(spectro_file, "w") as f:
pickle.dump(spec, f, protocol=-1) # spec shape: MxN.
def do_process(id, audio_file, spectro_file):
try:
if compute_spectro:
if not os.path.exists(spectro_file[:spectro_file.rfind('/')+1]):
os.makedirs(spectro_file[:spectro_file.rfind('/')+1])
compute_spec(audio_file,spectro_file)
fw = open(common.DATA_FOLDER+config['spectro_folder']+"index.tsv","a")
fw.write("%s\t%s\t%s\n" % (id,spectro_file[len(common.DATA_FOLDER):],audio_file[len(common.DATA_FOLDER):]))
fw.close()
print 'Computed spec: %s' % audio_file
else:
if os.path.isfile(spectro_file):
fw = open(config['spectro_folder']+"index.tsv","a")
fw.write("%s\t%s\t%s\n" % (id,spectro_file[len(common.DATA_FOLDER+config['spectro_folder']):],audio_file[len(common.DATA_FOLDER+config['audio_folder']):]))
fw.close()
except Exception as e:
ferrors = open(common.DATA_FOLDER+config['spectro_folder']+"errors.txt","a")
ferrors.write(audio_file+"\n")
ferrors.write(str(e))
ferrors.close()
print 'Error computing spec', audio_file
print str(e)
def process_files(files):
Parallel(n_jobs=num_process)(delayed(do_process)(id, audio_file, spectro_file)
for id, audio_file, spectro_file in files)
## Debug ##
#print 'WARNING: Parallelization is not used!'
#for id, audio_file, spectro_file in files:
# do_process(id, audio_file, spectro_file)
if __name__ == '__main__':
# set spectrograms folder
config['spectro_folder'] = "spectrograms/spectro_%s_%s_%s/" % (config['spectrograms_name'],config['spectrogram_type'],config['spectrograms_code_version'])
if not os.path.exists(common.DATA_FOLDER+config['spectro_folder']):
os.makedirs(common.DATA_FOLDER+config['spectro_folder'])
else:
sys.exit("EXIT: already exists a folder with this name!\nIf you need to compute those again, remove folder.")
# create empty spectrograms index
fw = open(common.DATA_FOLDER+config['spectro_folder']+"index.tsv","w")
fw.close()
# list audios to process: according to 'index_file'
files_to_convert = []
f=open(common.DATA_FOLDER+config["index_file"])
for line in f.readlines():
id, audio = line.strip().split("\t")
if config['convert_id']:
spect = id+".pk"
else:
spect = audio[:audio.rfind(".")]+".pk"
files_to_convert.append((id,common.DATA_FOLDER+config['audio_folder']+audio,common.DATA_FOLDER+config['spectro_folder']+spect))
print str(len(files_to_convert))+' audio files to process!'
# compute spectrogram
process_files(files_to_convert)
# save parameters
json.dump(config, open(common.DATA_FOLDER+config['spectro_folder']+"params.json","w"))
print "Spectrograms folder: "+common.DATA_FOLDER+config['spectro_folder']
# COMMENTS:
## pickle protocol=-1?
## convert_id == FALSE: creates sub-directories - put to false for magna.
## convert_id == TRUE: does not creates sub-directories - in some cases one does not care.
``` |
{
"source": "jordiprats/alexa-skills",
"score": 3
} |
#### File: alexa-skills/alexa-frases-nyonyes/frases_nyonyes.py
```python
import random
import alexandra
app = alexandra.Application()
name_map = {}
frases = {
"Si no tardas mucho, te espero toda la vida.",
"Quiero vivir eternamente en tu sonrisa.",
"Cualquiera en su sano juicio se habría vuelto loco por ti.",
"Cualquier lugar es mi casa si eres tú quien abre la puerta.",
"Sería capaz de reconocer tu cara entre un millón de sueños.",
"Te miraba reír y ahí estaban mis futuros.",
"Te quiero. Creo que solo te lo he dicho un millón de veces.",
"Hoy he descubierto que eres más dulce que la miel.",
"Me hipnotizas y me hechizas a todas horas.",
"Me perdería a todas horas en tus ojos.",
"Tus ojos son el conjuro contra mi mal día.",
"Y una cosa puedo jurar: yo que me enamoraré de tus alas, jamás te las voy a querer cortar.",
"Viajar a Marte o al cuarto de la plancha. Pero contigo.",
"Mirar es una cosa, que me mires tú es otro verbo diferente.",
"Eres la excepción a todo eso que dije que nunca haría",
"Te quiero libre, y me quiero libre contigo",
"¿Y tú que sabes del amor? Yo te sé a ti de memoria",
"Soy feliz cuando me miras.",
"Comenzaste robándome una sonrisa y ahora te has quedado mi corazón",
"Aún en la oscuridad puedo sentir la luz de tu sonrisa",
"Te veo y se me va el sueño",
"Me gustas, me gustas, me gustas, me gustas... ¿sabes que me gustas?",
"Me haces sentir mis instintos más salvajes: te devoraría a besos.",
"Quiero hacer contigo lo que la primavera hace con los cerezos.",
"Quédate con quien te bese el alma, la piel te la besa cualquiera.",
}
def getRandom():
global frases
return random.choice(list(frases))
@app.launch
def launch_handler(item):
return alexandra.respond(ssml="<speak>"+getRandom()+"</speak>")
@app.intent('amor')
def octoalert_intent():
return alexandra.respond(ssml="<speak>"+getRandom()+"</speak>")
if __name__ == '__main__':
app.run('0.0.0.0', 9997, debug=True)
```
#### File: alexa-skills/alexa-frases-rajoy/frases.py
```python
import random
import alexandra
app = alexandra.Application()
name_map = {}
frases = {
"ESTO ES COMO EL AGUA QUE CAE DEL CIELO SIN QUE SEPAMOS POR QUÉ",
"DESPUÉS DEL AÑO 14 VIENE EL 15",
"NO HE DORMIDO NADA, NO ME PREGUNTEN DEMASIADO",
"DIJE QUE BAJARÍA LOS IMPUESTOS Y LOS ESTOY SUBIENDO",
"LOS CATALANES HACEN COSAS",
"A VECES ESTAMOS PENSANDO SIEMPRE EN LO MATERIAL, Y A VECES LOS SERES HUMANOS SOMOS SOBRE TODO PERSONAS",
"LO MÁS IMPORTANTE QUE SE PUEDE HACER POR VOSOTROS ES LO QUE VOSOTROS PODRAÍS HACER POR VOSOTROS",
"TODO ES FALSO SALVO ALGUNA COSA",
"A VECES LA MEJOR DECISIÓN ES NO TOMAR NINGUNA DECISIÓN",
"LA SEGUNDA YA TAL",
"NI A HITLER NI A STALIN LES HAN NOMBRADO PERSONAS NON GRATAS EN PONTEVEDRA",
"UNA COSA ES SER SOLIDARIO Y OTRA ES SER SOLIDARIO A CAMBIO DE NADA",
"¿USTEDES PIENSAN ANTES DE HABLAR O HABLAN TRAS PENSAR?",
"TENEMOS QUE FABRICAR MÁQUINAS QUE PERMITAN SEGUIR FABRICANDO MÁQUINAS, PORQUE LO QUE NUNCA VA A HACER LA MÁQUINA ES FABRICAR MÁQUINAS A SU VEZ",
"SOMOS SENTIMIENTOS Y TENEMOS SERES HUMANOS",
"IT’S VERY DIFICULT TODO ESTO",
"¿Y LA EUROPEA?",
}
def getRandom():
global frases
return random.choice(list(frases))
@app.launch
def launch_handler(item):
return alexandra.respond(ssml="<speak>"+getRandom()+"</speak>")
@app.intent('rajoy')
def octoalert_intent():
return alexandra.respond(ssml="<speak>"+getRandom()+"</speak>")
if __name__ == '__main__':
app.run('0.0.0.0', 9998, debug=True)
``` |
{
"source": "jordiprats/django-ampa",
"score": 2
} |
#### File: cole/migrations/0022_auto_20210306_1528.py
```python
from django.db import migrations, models
def forward(apps, schema_editor):
User = apps.get_model("cole", "User")
for user in User.objects.all():
user.email=user.email.lower()
user.save()
class Migration(migrations.Migration):
dependencies = [
('cole', '0021_user_representant'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'ordering': ['email']},
),
migrations.AddIndex(
model_name='user',
index=models.Index(fields=['email'], name='cole_user_email_c6e51b_idx'),
),
migrations.RunPython(forward)
]
```
#### File: cole/migrations/0024_entitat_instance.py
```python
from django.db import migrations, models
def forward(apps, schema_editor):
Entitat = apps.get_model("cole", "Entitat")
try:
entitat_instance = Entitat.objects.all()[0]
if entitat_instance:
return
except:
pass
entitat_instance = Entitat()
entitat_instance.save()
class Migration(migrations.Migration):
dependencies = [
('cole', '0023_entitat_codi_registre'),
]
operations = [
migrations.RunPython(forward)
]
```
#### File: cole/migrations/0033_cleanup_hone.py
```python
from django.db import migrations, models
def forward(apps, schema_editor):
Alumne = apps.get_model("cole", "Alumne")
for alumne in Alumne.objects.all():
if alumne.telf_tutor1:
alumne.telf_tutor1 = alumne.telf_tutor1.replace(" ", "")
if alumne.telf_tutor2:
alumne.telf_tutor2 = alumne.telf_tutor2.replace(" ", "")
alumne.save()
class Migration(migrations.Migration):
dependencies = [
('cole', '0032_cleanup'),
]
operations = [
migrations.RunPython(forward)
]
```
#### File: ampa/cole/models.py
```python
from django.contrib.postgres.fields import ArrayField
from django.contrib.auth.models import AbstractUser
from django.utils.text import slugify
from django.conf import settings
from django.urls import reverse
from django.db import models
import unidecode
import uuid
import re
class User(AbstractUser):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
slug = models.SlugField(
default='',
editable=False,
max_length=100,
unique=True,
)
email = models.EmailField(max_length=256, unique=True)
invite = models.CharField(max_length=256)
name = models.CharField(max_length=256, blank=True, null=True, default='')
representant = models.ForeignKey('peticions.Representant', on_delete=models.SET_NULL, related_name='users', default=None, blank=True, null=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.username, allow_unicode=False)
super().save(*args, **kwargs)
def __str__(self):
if self.name:
return self.name
else:
return self.email
def save(self, *args, **kwargs):
self.email = self.email.lower().strip()
classes_delegat = Classe.objects.filter(email_delegat__iexact=self.email.lower().strip())
for classe in classes_delegat:
classe.save()
classes_subdelegat = Classe.objects.filter(email_subdelegat__iexact=self.email.lower().strip())
for classe in classes_subdelegat:
classe.save()
super().save(*args, **kwargs)
class Meta:
ordering = ['email']
indexes = [
models.Index(fields=['email']),
]
class Modalitat(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=256)
ordre = models.IntegerField(default=1)
def __str__(self):
return self.name
class Meta:
ordering = ['ordre']
indexes = [
models.Index(fields=['ordre']),
]
class Curs(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
curs = models.CharField(max_length=256, default='')
modalitat = models.ForeignKey(Modalitat, on_delete=models.CASCADE, related_name='cursos', blank=True, null=True, default=None)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
if self.modalitat:
return self.curs+'/'+str(self.modalitat)
else:
return self.curs
class Meta:
unique_together = ('curs', 'modalitat')
ordering = ['-curs']
indexes = [
models.Index(fields=['curs']),
]
class Etapa(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
nom = models.CharField(max_length=256, default='')
ordre = models.IntegerField(default=1)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.nom
class Meta:
ordering = ['ordre', 'nom']
indexes = [
models.Index(fields=['nom']),
]
class Classe(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
nom = models.CharField(max_length=256, default='')
alias = models.CharField(max_length=256, default='', blank=True, null=True,)
curs = models.ForeignKey(Curs, on_delete=models.CASCADE, related_name='classes', blank=True, null=True, default=None)
etapa = models.ForeignKey(Etapa, on_delete=models.CASCADE, related_name='classes', blank=True, null=True, default=None)
delegat = models.ForeignKey(User, on_delete=models.CASCADE, related_name='delegatsclasses')
subdelegat = models.ForeignKey(User, on_delete=models.CASCADE, related_name='subdelegatsclasses', blank=True, null=True)
ultim_email = models.DateTimeField(blank=True, null=True, default=None)
ready_to_send = models.BooleanField(default=False)
latest_export = models.CharField(max_length=256, blank=True, null=True, default=None)
waiting_export = models.BooleanField(default=False)
tutor = models.CharField(max_length=256, blank=True, null=True, default='')
nom_delegat = models.CharField(max_length=256, blank=True, null=True, default='')
telefon_delegat = models.CharField(max_length=256, blank=True, null=True, default='')
email_delegat = models.CharField(max_length=256, blank=True, null=True, default='')
nom_subdelegat = models.CharField(max_length=256, blank=True, null=True, default='')
telefon_subdelegat = models.CharField(max_length=256, blank=True, null=True, default='')
email_subdelegat = models.CharField(max_length=256, blank=True, null=True, default='')
def _get_validada(self):
classe_validada = True
for alumne in Alumne.objects.filter(classe=self):
if not alumne.validat:
classe_validada = False
return classe_validada
validada = property(_get_validada)
def _is_procesant(self):
return self.alumnes.count() == 0 and self.uploads.count() != 0
is_procesant = property(_is_procesant)
def _is_upload_error(self):
return self.uploads.filter(error=True).count() > 0 and self.uploads.filter(error=False, processed=True).count() == 0
is_upload_error = property(_is_upload_error)
def _get_full_nom(self):
try:
if self.alias:
str_nom = self.nom+' ('+self.alias+')'
else:
str_nom = self.nom
return str_nom
except:
return str(self.id)
full_nom = property(_get_full_nom)
def __str__(self):
return self._get_full_nom()
def save(self, *args, **kwargs):
if self.email_delegat:
self.email_delegat = self.email_delegat.lower().strip()
delegat = User.objects.filter(email__iexact=self.email_delegat).first()
if delegat:
self.delegat = delegat
if self.email_subdelegat:
self.email_subdelegat = self.email_subdelegat.lower().strip()
subdelegat = User.objects.filter(email__iexact=self.email_subdelegat).first()
if subdelegat:
self.subdelegat = subdelegat
super().save(*args, **kwargs)
class Meta:
ordering = ['-curs', 'etapa','nom']
unique_together = ('nom', 'curs', 'etapa', 'delegat')
class Alumne(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
#id_nen nom cognom1 cognom2 naixement pare telf1 mare telf2 email cessio signatura
# TODO: petar num_llista
num_llista = models.IntegerField(default=0)
nom = models.CharField(max_length=256)
cognom1 = models.CharField(max_length=256)
cognom2 = models.CharField(max_length=256, default=None, blank=True, null=True)
nom_unaccented = models.CharField(max_length=256, default="", blank=True, null=True)
cognom1_unaccented = models.CharField(max_length=256, default="", blank=True, null=True)
cognom2_unaccented = models.CharField(max_length=256, default="", blank=True, null=True)
naixement = models.DateTimeField(default=None, blank=True, null=True)
tutor1 = models.CharField(max_length=256, default='', blank=True, null=True)
telf_tutor1 = models.CharField(max_length=256, default='', blank=True, null=True)
email_tutor1 = models.TextField(max_length=600, default=None, blank=True, null=True)
tutor1_cessio = models.BooleanField(default=False, help_text="Accepto que les meves dades es facilitin al delegat i al grup classe per finalitats de comunicacions: enviament de mails, creació grup whatsapp, etc. acceptant fer un ús responsable i no facilitar a tercers les dades del grup classe que proporcionarà el delegat")
tutor2 = models.CharField(max_length=256, default='', blank=True, null=True)
telf_tutor2 = models.CharField(max_length=256, default='', blank=True, null=True)
email_tutor2 = models.TextField(max_length=600, default=None, blank=True, null=True)
tutor2_cessio = models.BooleanField(default=False, help_text="Accepto que les meves dades es facilitin al delegat i al grup classe per finalitats de comunicacions: enviament de mails, creació grup whatsapp, etc. acceptant fer un ús responsable i no facilitar a tercers les dades del grup classe que proporcionarà el delegat")
alta = models.DateTimeField(blank=True, null=True)
baixa = models.DateTimeField(blank=True, null=True)
validat = models.BooleanField(default=False, help_text='He comprovat totes les dades i són correctes')
updated_at = models.DateTimeField(auto_now=True)
classes = models.ManyToManyField(Classe, related_name='alumnes')
def _get_mailing_emails(self):
emails = []
if self.tutor1_cessio and self.validat:
emails += re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", self.email_tutor1.lower())
if self.tutor2_cessio and self.validat:
emails += re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.[a-z]+", self.email_tutor2.lower())
return emails
emails = property(_get_mailing_emails)
mailing_emails = property(_get_mailing_emails)
def _get_print_name(self):
composite_name = self.nom
if self.cognom1:
composite_name+=' '+self.cognom1
if self.cognom2:
composite_name+=' '+self.cognom2
return composite_name
print_name = property(_get_print_name)
def _get_classe_actual(self):
try:
return self.classes.order_by('-curs').first()
except:
return None
classe = property(_get_classe_actual)
def _get_extrainfo_hash(self):
attachments_dict = {}
if self.extrainfo:
for extrainfo in self.extrainfo.all():
if extrainfo.attachment:
if extrainfo.descripcio:
attachments_dict[extrainfo.descripcio] = extrainfo.id
else:
attachments_dict[extrainfo.attachment.filename] = extrainfo.id
else:
if extrainfo.descripcio:
attachments_dict[extrainfo.descripcio] = extrainfo.id
else:
attachments_dict[str(extrainfo.id)] = extrainfo.id
return attachments_dict
extrainfo_hash = property(_get_extrainfo_hash)
def save(self, *args, **kwargs):
if self.nom:
self.nom_unaccented = unidecode.unidecode(self.nom.replace('·', '.')).lower()
if self.cognom1:
self.cognom1_unaccented = unidecode.unidecode(self.cognom1.replace('·', '.')).lower()
if self.cognom2:
self.cognom2_unaccented = unidecode.unidecode(self.cognom2.replace('·', '.')).lower()
if self.telf_tutor1:
self.telf_tutor1 = self.telf_tutor1.replace(" ", "")
if self.telf_tutor2:
self.telf_tutor2 = self.telf_tutor2.replace(" ", "")
super().save(*args, **kwargs)
def __str__(self):
return self._get_print_name()
class Meta:
ordering = ['num_llista', 'cognom1', 'cognom2' ]
class FileAttachment(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
filename = models.CharField(max_length=256)
upload_path = models.CharField(max_length=256)
filepath = models.CharField(max_length=256)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def _is_image(self):
return re.search('\.jpg$', self.filename) or re.search('\.jpeg$', self.filename) or re.search('\.png$', self.filename)
is_image = property(_is_image)
def _get_static_url(self):
return settings.STATIC_DOMAIN+'uploads/'+self.upload_path+'/'+self.filename
static_url = property(_get_static_url)
def __str__(self):
return self.filename
def delete(self, *args, **kwargs):
try:
os.remove(self.filepath)
except:
pass
super(FileAttachment, self).delete(*args, **kwargs)
class Meta:
ordering = ['-created_at']
indexes = [
models.Index(fields=['-created_at',]),
]
class ExtraInfoAlumne(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
alumne = models.ForeignKey(Alumne, on_delete=models.CASCADE, related_name='extrainfo')
descripcio = models.CharField(max_length=256, default='', blank=True, null=True)
dades = models.TextField(max_length=600, default='', blank=True, null=True)
attachment = models.ForeignKey(FileAttachment, on_delete=models.CASCADE, related_name='files', blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['-updated_at']
indexes = [
models.Index(fields=['updated_at',]),
models.Index(fields=['-updated_at',]),
]
MAILING_STATUS_DRAFT = '0'
MAILING_STATUS_PROGRAMAT = '1'
MAILING_STATUS_ENVIANT = '2'
MAILING_STATUS_ENVIAT = '3'
MAILING_STATUS_ERROR_GENERAL = 'E'
MAILING_STATUS = [
(MAILING_STATUS_DRAFT, 'borrador'),
(MAILING_STATUS_PROGRAMAT, 'enviament programat'),
(MAILING_STATUS_ENVIANT, 'enviant...'),
(MAILING_STATUS_ENVIAT, 'enviament completat'),
(MAILING_STATUS_ERROR_GENERAL, 'error general d\'enviament')
]
class Mailing(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
#subject, html_message, email_from, email_reply_to, recipient_list
subject = models.CharField(max_length=256)
html_message = models.TextField(max_length=10000, default=None, blank=True, null=True)
email_from = models.CharField(max_length=256, default='', blank=True, null=True)
email_reply_to = models.CharField(max_length=256, default=None, blank=True, null=True)
classes = models.ManyToManyField(Classe, related_name='mailings')
etapa = models.ForeignKey(Etapa, on_delete=models.CASCADE, related_name='mailings', blank=True, null=True, default=None)
curs = models.ForeignKey(Curs, on_delete=models.CASCADE, related_name='mailings', blank=True, null=True, default=None)
attachments = models.ManyToManyField(FileAttachment, related_name='mailings')
nomes_delegats = models.BooleanField(default=False)
status = models.CharField(
max_length=1,
choices=MAILING_STATUS,
default=MAILING_STATUS_DRAFT,
)
progress = models.IntegerField(default=0)
#emails_sent = ArrayField(models.CharField(max_length=200))
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def _get_recipient_emails(self):
mailing_emails = set()
if not self.curs:
classes = self.curs.classes.all()
elif not self.etapa:
classes = self.etapa.classes.all()
elif not self.curs:
classes = self.curs.classes.all()
else:
classes = self.classes.all()
for classe in classes:
for alumne in classe.alumnes.all():
for email in alumne.mailing_emails:
mailing_emails.add(email)
return mailing_emails
recipient_list = property(_get_recipient_emails)
def _get_local_attachment_hash(self):
attachments_dict = {}
for attachment in self.attachments.all():
attachments_dict[attachment.filename] = attachment.filepath
return attachments_dict
localfile_attachment_hash = property(_get_local_attachment_hash)
def _get_attachment_hash(self):
attachments_dict = {}
for attachment in self.attachments.all():
attachments_dict[attachment.filename] = attachment.static_url
return attachments_dict
attachment_hash = property(_get_attachment_hash)
def _get_images_hash(self):
attachments_dict = {}
for attachment in self.attachments.all():
if attachment.is_image:
attachments_dict[attachment.filename] = attachment.static_url
return attachments_dict
images_hash = property(_get_images_hash)
def get_manual_unsubscribe_links(self, email):
links = set()
for classe in self.classes.all():
for alumne in classe.alumnes.all():
if email in alumne.mailing_emails:
url = reverse('form.pares.edit.alumne', kwargs={ 'alumne_id': alumne.id })
links.add(url)
return links
def __str__(self):
return self.subject
class Meta:
ordering = ['-updated_at', 'status']
indexes = [
models.Index(fields=['-updated_at', 'status']),
models.Index(fields=['status']),
]
class FileUpload(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
filepath = models.CharField(max_length=256)
owner = models.ForeignKey(User, on_delete=models.CASCADE, related_name='uploads', blank=True, null=True, default=None)
classe = models.ForeignKey(Classe, on_delete=models.CASCADE, related_name='uploads', blank=True, null=True, default=None)
processed = models.BooleanField(default=False)
error = models.BooleanField(default=False)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.filepath
class Meta:
ordering = ['-updated_at']
indexes = [
models.Index(fields=['updated_at',]),
models.Index(fields=['-updated_at',]),
]
class EmailSent(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
mailing = models.ForeignKey(Mailing, on_delete=models.CASCADE, related_name='sent', blank=True, null=True, default=None)
email = models.EmailField(max_length=256)
sent = models.BooleanField(default=False)
error = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# ???
class DocumentTemplate(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=256)
html_message = models.TextField(max_length=50000, default='', blank=True, null=True)
class WordTemplate(FileAttachment):
pass
class Entitat(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=256, blank=True, null=True, default='')
logo = models.ForeignKey(FileAttachment, on_delete=models.CASCADE, related_name='entitats', blank=True, null=True)
codi_registre = models.CharField(max_length=256, blank=True, null=True, default='')
likable_issues = models.BooleanField(default=False)
```
#### File: cole/templatetags/ampa_version.py
```python
from django import template
from django.template.defaultfilters import stringfilter
import markdown as md
import os
register = template.Library()
@register.filter(is_safe=True, needs_autoescape=False)
@stringfilter
def ampa_version(value):
return os.getenv('AMPA_APP_VERSION', '3.14159265359')
```
#### File: cole/views/alumne_views.py
```python
from django.contrib.auth.decorators import user_passes_test, login_required
from django.contrib.auth import update_session_auth_hash
from django.core.files.storage import FileSystemStorage
from django.db.models.functions import Concat
from django.shortcuts import render, redirect
from django.db.models import Value as V
from django.contrib import messages
from django.conf import settings
from django.db.models import Q
from cole.forms import *
import time
import sys
import os
def alumne_signup(request):
print("alumne_signup")
try:
query = request.GET.get('q', '').lower().strip()
if query:
results = Alumne.objects.annotate(
full_name=Concat('nom_unaccented', V(' '), 'cognom1_unaccented', V(' '), 'cognom2_unaccented', )
).filter(
full_name__iexact=query
)
if len(results) != 1:
instance = None
else:
alumne_instance = results[0]
form = EditAlumneParesForm(alumne_instance)
# make sure the user is not already registered
if alumne_instance.tutor1 or alumne_instance.tutor2:
alumne_instance = None
form = None
elif alumne_instance.email_tutor1 or alumne_instance.email_tutor2:
alumne_instance = None
form = None
elif alumne_instance.telf_tutor1 or alumne_instance.telf_tutor2:
alumne_instance = None
form = None
else:
instance = None
return render(request, 'alumnes/signup.html', {
'form': form,
'instance': alumne_instance,
})
except Exception as e:
# if request.user.is_staff:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(str(e))
messages.error(request, str(e))
return redirect('home')
@login_required
def edit_alumne(request, classe_id, alumne_id=None):
try:
if request.user.is_staff:
classe_instance = Classe.objects.filter(id=classe_id).first()
else:
classe_instance = Classe.objects.filter(id=classe_id).filter(Q(delegat=request.user) | Q(subdelegat=request.user)).first()
if alumne_id:
alumne_instance = Alumne.objects.filter(classes=classe_instance, id=alumne_id).first()
new_alumne = False
else:
alumne_instance = Alumne()
new_alumne = True
view_hash = {
'alumne_id': alumne_id,
'classe_id': classe_id,
'classe_nom': classe_instance.nom,
'alumne_instance': alumne_instance,
'staff_view': request.user.is_staff,
'new_alumne': new_alumne
}
if not alumne_id:
view_hash['extrainfo_hash'] = alumne_instance.extrainfo_hash
if request.method == 'POST':
form = EditAlumneForm(request.POST, staff_view=request.user.is_staff, instance=alumne_instance)
view_hash['form'] = form
if form.is_valid():
form.save()
classe_instance.alumnes.add(alumne_instance)
messages.info(request, 'Dades guardades correctament')
try:
afegir_altres_dades = form.data['altres']
return redirect('add.extrainfo.alumne', alumne_id=alumne_instance.id)
except Exception as e:
pass
else:
return render(request, 'alumnes/edit.html', view_hash)
return redirect('show.classe', classe_id=classe_id)
else:
form = EditAlumneForm(staff_view=request.user.is_staff, instance=alumne_instance)
view_hash['form'] = form
print(str(view_hash))
return render(request, 'alumnes/edit.html', view_hash)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(str(e))
return redirect('list.classes')
@login_required
def search_edit_alumne(request, alumne_id):
try:
alumne_instancia = Alumne.objects.filter(id=alumne_id).first()
return redirect('edit.alumne', classe_id=alumne_instancia.classe.id, alumne_id=alumne_id)
except:
return redirect('home')
@user_passes_test(lambda u: u.is_staff)
def edit_extrainfo_alumne(request, alumne_id, extrainfo_id=None):
try:
alumne_instance = Alumne.objects.filter(id=alumne_id).first()
if extrainfo_id:
extrainfo_instance = ExtraInfoAlumne.objects.filter(id=extrainfo_id, alumne__id=alumne_id).first()
else:
extrainfo_instance = ExtraInfoAlumne(alumne=alumne_instance)
if request.method == 'POST':
form = InfoAlumneForm(request.POST, instance=extrainfo_instance)
if form.is_valid():
try:
if request.FILES['attachment']:
myfile = request.FILES['attachment']
upload_subdir = str(int(time.time()))
fs = FileSystemStorage(location=settings.UPLOADS_ROOT+'/'+upload_subdir)
filename = fs.save(myfile.name, myfile)
upload = FileAttachment(filename=myfile.name, filepath=fs.location+'/'+filename, upload_path=upload_subdir)
upload.save()
extrainfo_instance.attachment=upload
except Exception as e:
if request.user.is_staff:
messages.error(request, str(e))
pass
extrainfo_instance.save()
form.save()
messages.info(request, 'Dades guardades correctament')
else:
return render(request, 'alumnes/extra/upload.html', {
'form': form,
'extrainfo_instance': extrainfo_instance,
'fileattachment': extrainfo_instance.attachment,
'alumne_instance': alumne_instance
})
return redirect('search.edit.alumne', alumne_id=alumne_id)
else:
form = InfoAlumneForm(instance=extrainfo_instance)
return render(request, 'alumnes/extra/upload.html', {
'form': form,
'extrainfo_instance': extrainfo_instance,
'fileattachment': extrainfo_instance.attachment,
'alumne_instance': alumne_instance
})
except Exception as e:
if request.user.is_staff:
messages.error(request, str(e))
return redirect('search.edit.alumne', alumne_id=alumne_id)
@login_required
def edit_alumne_classes(request, alumne_id):
try:
alumne_instance = Alumne.objects.filter(id=alumne_id)[0]
list_classes = alumne_instance.classes.all()
return render(request, 'alumnes/list_classes.html', {
'list_classes': list_classes,
'alumne_instance': alumne_instance,
'user_admin': request.user.is_staff,
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('search.edit.alumne', {'alumne_id': alumne_id})
@login_required
def unlink_alumne_classes(request, alumne_id, classe_id):
try:
alumne_instance = Alumne.objects.filter(id=alumne_id)[0]
classe_instance = Classe.objects.filter(id=classe_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
alumne_instance.classes.remove(classe_instance)
alumne_instance.save()
messages.info(request, 'Alumne eliminat de la classe')
return redirect('show.classe', classe_id=classe_id)
else:
messages.error(request, 'Error eliminant la classe')
else:
form = AreYouSureForm(request.GET)
return render(request, 'alumnes/unlink_classe.html', {'classe_instance': classe_instance, 'alumne_instance': alumne_instance})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('search.edit.alumne', {'alumne_id': alumne_id})
```
#### File: cole/views/cursos_views.py
```python
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import render, redirect
from django.contrib import messages
from django.conf import settings
from cole.forms import *
import sys
import os
@user_passes_test(lambda u: u.is_staff)
def list_cursos(request):
cursos = Curs.objects.all()
return render(request, 'cursos/list.html', {'cursos': cursos })
@user_passes_test(lambda u: u.is_staff)
def show_curs(request, curs_id):
try:
curs_instance = Curs.objects.filter(id=curs_id)[0]
return render(request, 'cursos/show.html', { 'content': 'overview', 'curs_instance': curs_instance, 'list_classes': curs_instance.classes.all() })
except Exception as e:
if request.user.is_staff:
print(str(e))
messages.error(request, str(e))
return redirect('staff.settings')
@user_passes_test(lambda u: u.is_staff)
def edit_curs(request, curs_id=None):
try:
if curs_id:
new_curs = False
curs_instance = Curs.objects.filter(id=curs_id)[0]
print("caca "+str(curs_instance.modalitat))
else:
new_curs = True
curs_instance = Curs()
if request.method == 'POST':
form = CursForm(request.POST, instance=curs_instance)
if form.is_valid():
form.save()
messages.info(request, 'Dades guardades correctament')
else:
return render(request, 'cursos/edit.html', { 'form': form, 'curs_instance': curs_instance, 'new_curs': new_curs })
return redirect('show.curs', curs_id=curs_instance.id)
else:
form = CursForm(instance=curs_instance)
return render(request, 'cursos/edit.html', { 'form': form, 'curs_instance': curs_instance, 'new_curs': new_curs })
except Exception as e:
if request.user.is_staff:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(str(e))
messages.error(request, str(e))
if curs_id:
return redirect('show.curs', curs_id=curs_id)
else:
return redirect('staff.settings')
#
# mailing
#
@user_passes_test(lambda u: u.is_staff)
def list_curs_mailings(request, curs_id):
try:
curs_instance = Curs.objects.filter(id=curs_id).first()
list_mailings = Mailing.objects.filter(curs__id=curs_id)
return render(request, 'mailing/cursos/list.html', { 'curs_instance': curs_instance, 'list_mailings': list_mailings, 'content': 'mailing' })
except Exception as e:
if request.user.is_staff:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(str(e))
messages.error(request, str(e))
return redirect('show.curs', curs_id=curs_id)
@user_passes_test(lambda u: u.is_staff)
def edit_mailing_curs(request, curs_id, mailing_id=None):
try:
instance_curs = Curs.objects.filter(id=curs_id)[0]
if mailing_id:
instance_mailing = Mailing.objects.filter(curs__id=instance_curs.id, id=mailing_id)[0]
else:
instance_mailing = Mailing(curs=instance_curs, email_from='', email_reply_to=request.user.email)
if request.method == 'POST':
form = ClasseMailingForm(request.POST, instance=instance_mailing)
if form.is_valid():
form.save()
messages.info(request, 'Guardat mailing')
try:
boto_apretat = str(form.data['guardar'])
return redirect('list.curs.mailings', curs_id=curs_id)
except:
return redirect('add.attachment.mailing', mailing_id=instance_mailing.id)
else:
return render(request, 'mailing/cursos/edit.html', {
'form': form,
'instance_mailing': instance_mailing,
'image_hash': instance_mailing.images_hash,
'attachment_hash': instance_mailing.attachment_hash
})
return redirect('list.curs.mailings', curs_id=curs_id)
else:
form = ClasseMailingForm(instance=instance_mailing)
return render(request, 'mailing/classes/edit.html', {
'form': form,
'instance_mailing': instance_mailing,
'image_hash': instance_mailing.images_hash,
'attachment_hash': instance_mailing.attachment_hash
})
except Exception as e:
if settings.DEBUG:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(str(e))
return redirect('list.curs.mailings', curs_id=curs_id)
@user_passes_test(lambda u: u.is_staff)
def show_mailing_curs(request, curs_id, mailing_id):
try:
instance_curs = Curs.objects.filter(id=curs_id)[0]
instance_mailing = Mailing.objects.filter(curs__id=curs_id)[0]
return render(request, 'mailing/classes/show.html', {
'instance_mailing': instance_mailing,
'instance_curs': instance_curs,
'image_hash': instance_mailing.images_hash,
'attachment_hash': instance_mailing.attachment_hash
})
except Exception as e:
if settings.DEBUG:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(str(e))
return redirect('list.curs.mailings', curs_id=curs_id)
# TODO:
@user_passes_test(lambda u: u.is_staff)
def enviar_mailing_curs(request, classe_id, mailing_id):
try:
if request.user.is_superuser:
instance_classe = Classe.objects.filter(id=classe_id)[0]
else:
instance_classe = Classe.objects.filter(id=classe_id).filter(Q(delegat=request.user) | Q(subdelegat=request.user))[0]
instance_mailing = Mailing.objects.filter(classes__id=instance_classe.id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
instance_mailing.status = MAILING_STATUS_PROGRAMAT
instance_mailing.save()
messages.info(request, 'e-Mail programat per enviar-se')
return redirect('list.classe.mailings', classe_id=instance_classe.id)
else:
messages.error(request, 'Error eliminant l\'alumne')
else:
form = AreYouSureForm(request.GET)
return render(request, 'mailing/classes/enviar.html', { 'instance_mailing': instance_mailing, 'instance_classe': instance_classe })
except Exception as e:
print(str(e))
return redirect('show.classe', classe_id=classe_id)
```
#### File: cole/views/templates_views.py
```python
from django.contrib.auth.decorators import user_passes_test
from django.core.files.storage import FileSystemStorage
from django.shortcuts import render, redirect
from django.contrib import messages
from cole.forms import *
import time
import sys
import os
@user_passes_test(lambda u: u.is_staff)
def upload_template(request):
try:
if request.method == 'POST' and request.FILES['attachment']:
myfile = request.FILES['attachment']
upload_subdir = str(int(time.time()))
fs = FileSystemStorage(location=settings.UPLOADS_ROOT+'/'+upload_subdir)
filename = fs.save(myfile.name, myfile)
upload_template = WordTemplate(filename=myfile.name, filepath=fs.location+'/'+filename, upload_path=upload_subdir)
upload_template.save()
messages.info(request, 'Fitxer pujat correctament')
if request.user.is_superuser:
messages.info(request, upload_template.filepath)
messages.info(request, upload_template.static_url)
return redirect('peticions.list.templates')
else:
return render(request, 'templates/upload.html', {})
except Exception as e:
messages.error(request, 'Error pujant arxiu')
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.templates')
@user_passes_test(lambda u: u.is_staff)
def list_templates(request):
list_templates = WordTemplate.objects.all()
return render(request, 'templates/list.html', {
'list_templates': list_templates,
})
```
#### File: cole/views/user_views.py
```python
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth import update_session_auth_hash, login
from django.shortcuts import render, redirect
from django.contrib import messages
from cole.forms import *
import sys
import os
@user_passes_test(lambda u: u.is_staff)
def switch_user(request, user_slug):
try:
user_instance = User.objects.filter(slug=user_slug)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
login(request, user_instance)
return redirect('home')
messages.info(request, 'Canvi d\'usuari completat')
else:
messages.error(request, 'Error fent el canvi d\'usuari')
else:
form = AreYouSureForm(request.GET)
return render(request, 'staff/users/su.html', {'user_instance': user_instance})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('list.users')
@user_passes_test(lambda u: u.is_staff)
def edit_user(request, user_slug):
try:
user_instance = User.objects.filter(slug=user_slug)[0]
if request.method == 'POST':
form = AdminEditUser(request.POST, instance=user_instance)
if form.is_valid():
form.save()
messages.info(request, 'Guardada configuració de l\'usuari')
return redirect('list.users')
else:
messages.error(request, 'Formulari incorrecte')
return render(request, 'staff/users/edit.html', {
'form': form,
'user_instance': user_instance,
})
else:
form = AdminEditUser(instance=user_instance)
return render(request, 'staff/users/edit.html', {
'form': form,
'user_instance': user_instance,
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('list.users')
@user_passes_test(lambda u: u.is_staff)
def users_list(request):
list_users = User.objects.all()
return render(request, 'staff/users/list.html', {
'list_users': list_users,
})
@login_required
def user_settings(request):
if request.method == 'POST':
form = AMPAUserName(request.POST)
if form.is_valid():
request.user.name = form.data['name'][0]
request.user.save()
return redirect('user.settings')
else:
messages.error(request, 'Error guardant dades')
else:
form = AMPAUserName(request.GET, initial={'name': request.user.name})
return render(request, 'users/settings.html', { 'user': request.user, 'form': form })
@login_required
def change_password(request):
try:
if request.user.is_authenticated:
if request.method == 'POST':
form = PasswordChangeForm(request.POST)
if form.is_valid():
try:
if request.user.check_password(form.data['password_actual'][0]):
request.user.set_password(form.data['password1'][0])
request.user.save()
update_session_auth_hash(request, request.user)
messages.info(request, 'Contrasenya actualitzada')
return redirect('home')
else:
messages.error(request, 'Contrasenya actual incorrecte')
except Exception as e:
#if request.user.is_superuser:
messages.error(request, 'Error canviant la contrasenya: '+str(e))
else:
messages.error(request, 'Error al canviar la contrasenya')
else:
form = PasswordChangeForm(request.GET)
return render(request, 'users/password_change.html', { 'form': form } )
except Exception as e:
if settings.DEBUG:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(str(e))
return redirect('home')
```
#### File: ampa/peticions/forms.py
```python
from django.forms import ModelForm
from django import forms
from peticions.models import *
class IssueFilterForm(forms.Form):
status_filter = forms.ChoiceField(choices=ISSUE_STATUS, required = False)
def __init__(self, data, **kwargs):
initial = kwargs.get('initial', {})
data = {**initial, **data}
super().__init__(data, **kwargs)
def clean(self):
try:
status_filter = self.data['status_filter'][0]
except:
return
class Meta:
fields = (['status_filter'])
labels = {
'status_filter': 'Estat de la petició',
}
class IssueForm(forms.ModelForm):
class Meta:
model = Issue
fields = (['titol', 'categories', 'public', 'html_message'])
labels = {
'titol': 'Titol petició',
'categories': 'Categories',
'html_message': 'Descripció',
}
class IssueAdminForm(forms.ModelForm):
class Meta:
model = Issue
fields = (['titol', 'owner', 'representant', 'categories', 'public', 'status', 'html_message'])
labels = {
'titol': 'Titol petició',
'owner': 'Autor',
'representant': 'Mostra la petició feta com a representant de:',
'categories': 'Categories',
'public': 'publicat',
'destacada': 'Destacar petició',
'status': 'Estat',
'html_message': 'Descripció',
}
widgets = {
'owner': forms.Select(attrs={'disabled':'disabled'})
}
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = (['internal', 'html_message'])
labels = {
'internal': 'restringir',
'html_message': 'Comentari',
}
class AdminCommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = (['user', 'representant', 'internal', 'html_message'])
labels = {
'representant': 'Mostra el comentari fet en nom de:',
'internal': 'restringir',
'user': 'Autor',
'html_message': 'Comentari',
}
widgets = {
'user': forms.Select(attrs={'disabled':'disabled'})
}
class JuntaPeuForm(forms.ModelForm):
class Meta:
model = Junta
fields = (['peu_message'])
labels = {
'peu_message': 'Text',
}
class JuntaForm(forms.ModelForm):
class Meta:
model = Junta
fields = (['name', 'celebracio', 'public', 'html_message', 'wordtemplate'])
widgets = {
'celebracio': forms.DateInput(format=('%Y-%m-%d'), attrs={"type": 'date'}),
}
labels = {
'name': 'Junta',
'celebracio': 'Data de celebració',
'public': 'Publicar',
'html_message': 'Text',
'wordtemplate': 'Plantilla',
}
class CategoryForm(forms.ModelForm):
name = forms.TextInput(attrs={'size': '40'})
class Meta:
model = Category
fields = (['name', 'ordre'])
labels = {
'name': 'Nom de la categoria',
'ordre': 'Ordre'
}
class RepresentantForm(forms.ModelForm):
name = forms.TextInput(attrs={'size': '40'})
class Meta:
model = Representant
fields = (['name'])
labels = {
'name': 'Representant',
}
```
#### File: peticions/migrations/0010_auto_20210221_1545.py
```python
from django.db import migrations, models
import django.db.models.deletion
def forward(apps, schema_editor):
try:
Comment = apps.get_model("peticions", "Comment")
Representant = apps.get_model("peticions", "Representant")
try:
representant_ampa = Representant.objects.filter(name="AMPA")[0]
except:
representant_ampa = None
if not representant_ampa:
representant_ampa = Representant(name="AMPA")
representant_ampa.save()
for comment in Comment.objects.all():
if comment.ampa:
comment.representant = representant_ampa
comment.save()
except:
pass
class Migration(migrations.Migration):
dependencies = [
('peticions', '0009_auto_20210221_1533'),
]
operations = [
migrations.AddField(
model_name='comment',
name='representant',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='comments', to='peticions.representant'),
),
migrations.RunPython(forward),
migrations.RemoveField(
model_name='comment',
name='ampa',
),
]
```
#### File: peticions/migrations/0015_slug_juntes_issues.py
```python
from django.db import migrations, models
from django.utils.text import slugify
def forward(apps, schema_editor):
Junta = apps.get_model("peticions", "Junta")
for junta in Junta.objects.all():
if not junta.slug:
junta.slug = slugify(junta.name, allow_unicode=False)
junta.save()
Issue = apps.get_model("peticions", "Issue")
for issue in Issue.objects.all():
if not issue.slug:
issue.slug = slugify(issue.titol, allow_unicode=False)
issue.save()
class Migration(migrations.Migration):
dependencies = [
('peticions', '0014_auto_20210307_2137'),
]
operations = [
migrations.AddField(
model_name='issue',
name='destacada',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='issue',
name='slug',
field=models.SlugField(default=None, blank=True, null=True, max_length=256),
),
migrations.AddField(
model_name='junta',
name='slug',
field=models.SlugField(default=None, blank=True, null=True, max_length=256),
),
migrations.RunPython(forward),
]
```
#### File: ampa/peticions/models.py
```python
from django.utils.text import slugify
from django.db.models import Count
from django.db import models
from voting.models import *
from cole.models import *
import html2text
import uuid
ISSUE_STATUS_DRAFT = 'a'
ISSUE_STATUS_OPEN = 'g'
ISSUE_STATUS_WAITING = 'p'
ISSUE_STATUS_REJECTED = 'y'
ISSUE_STATUS_CLOSED = 'z'
ISSUE_STATUS = [
(ISSUE_STATUS_DRAFT, 'esborrany'),
(ISSUE_STATUS_OPEN, 'proposada a junta'),
(ISSUE_STATUS_WAITING, 'esperant resposta'),
(ISSUE_STATUS_REJECTED, 'rebutjada'),
(ISSUE_STATUS_CLOSED, 'tancada'),
]
class Representant(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=256, unique=True)
def __str__(self):
return self.name
class Meta:
ordering = ['name']
indexes = [
models.Index(fields=['name']),
]
class Category(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=256, unique=True)
ordre = models.IntegerField(default=1)
def __str__(self):
return self.name
class Meta:
ordering = ['ordre']
indexes = [
models.Index(fields=['ordre']),
]
class Issue(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
owner = models.ForeignKey(User, on_delete=models.SET_NULL, related_name='issues', default=None, blank=True, null=True)
representant = models.ForeignKey(Representant, on_delete=models.SET_NULL, related_name='issues', default=None, blank=True, null=True)
titol = models.CharField(max_length=256)
slug = models.SlugField(max_length=256, default=None, blank=True, null=True)
html_message = models.TextField(max_length=50000, default=None, blank=True, null=True)
public = models.BooleanField(default=True)
destacada = models.BooleanField(default=False)
status = models.CharField(
max_length=1,
choices=ISSUE_STATUS,
default=ISSUE_STATUS_DRAFT,
)
categories = models.ManyToManyField(Category, related_name='issues')
likes = models.ManyToManyField(User, related_name='liked_issues')
dislikes = models.ManyToManyField(User, related_name='disliked_issues')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
self.full_clean()
if not self.slug:
self.slug = slugify(self.titol, allow_unicode=False)
super().save(*args, **kwargs)
def display_categories(self):
return ', '.join(list(self.categories.values_list('name', flat=True)))
def display_juntes(self):
return ', '.join(list(self.juntes.values_list('name', flat=True)))
def display_updated(self):
updated = self.updated_at
for comment in self.comments.all():
if comment.updated_at > updated:
updated = comment.updated_at
return updated
class Meta:
ordering = ['-updated_at']
indexes = [
models.Index(fields=['-updated_at']),
]
class Comment(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
issue = models.ForeignKey(Issue, on_delete=models.SET_NULL, related_name='comments', default=None, blank=True, null=True)
user = models.ForeignKey(User, on_delete=models.SET_NULL, related_name='comments', default=None, blank=True, null=True)
representant = models.ForeignKey(Representant, on_delete=models.SET_NULL, related_name='comments', default=None, blank=True, null=True)
html_message = models.TextField(max_length=50000, default=None, blank=True, null=True)
internal = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['created_at']
indexes = [
models.Index(fields=['created_at']),
]
class Junta(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=256)
slug = models.SlugField(max_length=256, unique=True)
html_message = models.TextField(max_length=50000, default='', blank=True, null=True)
peu_message = models.TextField(max_length=50000, default='', blank=True, null=True)
public = models.BooleanField(default=False)
celebracio = models.DateTimeField(default=None, blank=True, null=True)
issues = models.ManyToManyField(Issue, related_name='juntes')
votacions = models.ManyToManyField(Election, related_name='juntes')
latest_export = models.CharField(max_length=256, blank=True, null=True, default=None)
wordtemplate = models.ForeignKey(WordTemplate, on_delete=models.SET_NULL, related_name='juntes', default=None, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
text_message = ""
def render_text_version(self):
h2t = html2text.HTML2Text()
h2t.ignore_links = True
self.text_message = h2t.handle(self.html_message)
return self.text_message
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name, allow_unicode=False)
self.full_clean()
super().save(*args, **kwargs)
def __str__(self):
return self.name
def _get_categories(self):
return Category.objects.filter(id__in=self.issues.values('categories').distinct()).annotate(cat_count=Count('issues')).filter(cat_count__gt=1).order_by('ordre')
categories = property(_get_categories)
def _get_uncategorized_issues(self):
return self.issues.filter(categories=None)
uncategorized_issues = property(_get_uncategorized_issues)
def _get_multicategorized_issues(self):
return self.issues.annotate(cat_count=Count('categories')).filter(cat_count__gt=1)
multicategorized_issues = property(_get_multicategorized_issues)
class Meta:
ordering = ['-celebracio']
indexes = [
models.Index(fields=['-celebracio']),
]
```
#### File: ampa/peticions/views.py
```python
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.decorators import login_required
from django_xhtml2pdf.utils import generate_pdf
from django.shortcuts import render, redirect
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.contrib import messages
from django.db.models import Q
from docxtpl import DocxTemplate
from peticions.models import *
from peticions.forms import *
from cole.forms import *
import io
#
# staff
#
@user_passes_test(lambda u: u.is_staff)
def preview_docx(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
junta_instance.render_text_version()
tpl = DocxTemplate("/home/jprats/git/django-ampa/test.docx")
context = {
'junta_instance': junta_instance,
'issue_add_comments': False,
'issue_title_size': 'h4',
'user_admin': True,
'is_pdf': True
}
tpl.render(context)
# tpl.save('./test_output.docx')
tpl_io = io.BytesIO()
tpl.save(tpl_io)
tpl_io.seek(0)
response = HttpResponse(tpl_io.read())
# Content-Disposition header makes a file downloadable
response["Content-Disposition"] = "attachment; filename=preview.docx"
# Set the appropriate Content-Type for docx file
response["Content-Type"] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
return response
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def preview_pdf(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
resp = HttpResponse(content_type='application/pdf')
return generate_pdf('peticions/juntes/render_pdf.html', file_object=resp, context={
'junta_instance': junta_instance,
'issue_add_comments': False,
'issue_title_size': 'h4',
'user_admin': True,
'is_pdf': True
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def delete_comment(request, issue_id, comment_id):
try:
comment_instance = Comment.objects.filter(id=comment_id, issue__id=issue_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
comment_instance.delete()
return redirect('peticions.edit.issue', {'issue_id': issue_id})
else:
messages.error(request, 'Error eliminant comentari')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/comments/delete.html', { 'comment': comment_instance })
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.edit.issue', {'issue_id': issue_id})
@user_passes_test(lambda u: u.is_staff)
def delete_junta(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
junta_instance.delete()
return redirect('peticions.list.juntes')
else:
messages.error(request, 'Error eliminant la junta')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/juntes/delete.html', {
'junta_instance': junta_instance,
'issue_title_size': 'h4',
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def delete_representant(request, representant_id):
try:
instance_representant = Representant.objects.filter(id=representant_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
instance_representant.delete()
return redirect('peticions.list.representants')
else:
messages.error(request, 'Error eliminant representant')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/representants/delete.html', { 'instance_representant': instance_representant })
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.representants')
@user_passes_test(lambda u: u.is_staff)
def delete_category(request, category_id):
try:
instance_category = Category.objects.filter(id=category_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
instance_category.delete()
return redirect('peticions.list.categories')
else:
messages.error(request, 'Error eliminant la categoria')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/categories/delete.html', { 'instance_category': instance_category })
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.categories')
@user_passes_test(lambda u: u.is_staff)
def delete_issue(request, issue_id):
try:
instance_issue = Issue.objects.filter(id=issue_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
instance_issue.delete()
return redirect('peticions.list.issues')
else:
messages.error(request, 'Error eliminant la petició')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/issues/delete.html', { 'issue_instance': instance_issue })
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.issues')
@user_passes_test(lambda u: u.is_staff)
def close_junta(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
for issue in junta_instance.issues.all():
issue.status = ISSUE_STATUS_CLOSED
issue.save()
junta_instance.save()
messages.info(request, 'Junta tancada')
return redirect('peticions.list.juntes')
else:
messages.error(request, 'Error tancant la junta')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/juntes/close.html', {
'junta_instance': junta_instance,
'issue_title_size': 'h4',
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def publish_junta(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
for issue in junta_instance.issues.all():
issue.status = ISSUE_STATUS_CLOSED
issue.save()
junta_instance.public = True
junta_instance.save()
messages.info(request, 'Junta publicada')
return redirect('peticions.list.juntes')
else:
messages.error(request, 'Error publicant junta')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/juntes/publish.html', {
'junta_instance': junta_instance,
'issue_title_size': 'h4',
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def edit_representant(request, representant_id=None):
try:
if representant_id:
representant_instance = Representant.objects.filter(id=representant_id)[0]
else:
representant_instance = Representant()
if request.method == 'POST':
form = RepresentantForm(request.POST, instance=representant_instance)
if form.is_valid():
form.save()
messages.info(request, 'Representant guardat correctament')
return redirect('peticions.list.representants')
else:
messages.error(request, 'Formulari incorrecte')
return render(request, 'peticions/representants/edit.html', {
'form': form,
'representant_instance': representant_instance,
})
else:
form = RepresentantForm(instance=representant_instance)
return render(request, 'peticions/representants/edit.html', {
'form': form,
'representant_instance': representant_instance,
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.representants')
@user_passes_test(lambda u: u.is_staff)
def edit_category(request, category_id=None):
try:
if category_id:
category_instance = Category.objects.filter(id=category_id)[0]
else:
category_instance = Category()
if request.method == 'POST':
form = CategoryForm(request.POST, instance=category_instance)
if form.is_valid():
form.save()
messages.info(request, 'Categoria guardada correctament')
return redirect('peticions.list.categories')
else:
messages.error(request, 'Formulari incorrecte')
return render(request, 'peticions/categories/edit.html', {
'form': form,
'category_instance': category_instance,
})
else:
form = CategoryForm(instance=category_instance)
return render(request, 'peticions/categories/edit.html', {
'form': form,
'category_instance': category_instance,
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.categories')
@user_passes_test(lambda u: u.is_staff)
def list_categories(request):
list_categories_raw = Category.objects.all()
page = request.GET.get('page', 1)
paginator = Paginator(list_categories_raw, 10)
try:
list_categories = paginator.page(page)
except PageNotAnInteger:
list_categories = paginator.page(1)
except EmptyPage:
list_categories = paginator.page(paginator.num_pages)
return render(request, 'peticions/categories/list.html', {
'list_categories': list_categories,
'public': False,
'user_admin': request.user.is_staff
})
@user_passes_test(lambda u: u.is_staff)
def list_representants(request):
list_representants_raw = Representant.objects.all()
page = request.GET.get('page', 1)
paginator = Paginator(list_representants_raw, 10)
try:
list_representants = paginator.page(page)
except PageNotAnInteger:
list_representants = paginator.page(1)
except EmptyPage:
list_representants = paginator.page(paginator.num_pages)
return render(request, 'peticions/representants/list.html', {
'list_representants': list_representants,
'public': False,
'user_admin': request.user.is_staff
})
@user_passes_test(lambda u: u.is_staff)
def forward_open_peticions(request):
try:
list_issues = Issue.objects.filter(public=True, status=ISSUE_STATUS_DRAFT)
config = Entitat.objects.first()
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
for issue in list_issues:
issue.status = ISSUE_STATUS_OPEN
issue.save()
messages.info(request, 'Canviat l\'estat de les peticions')
return redirect('peticions.list.issues')
else:
messages.error(request, 'Error fent el canvi d\'estat')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/issues/forward_open.html', {'list_issues': list_issues, 'config': config})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.issues')
@user_passes_test(lambda u: u.is_staff)
def edit_junta_peu(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
if request.method == 'POST':
form = JuntaPeuForm(request.POST, instance=junta_instance)
if form.is_valid():
form.save()
messages.info(request, 'Peu de junta guardat correctament')
return redirect('peticions.edit.junta', junta_id=junta_id)
else:
messages.error(request, 'Formulari incorrecte')
return render(request, 'peticions/juntes/edit_peu.html', {
'form': form,
'junta_instance': junta_instance,
})
else:
form = JuntaPeuForm(instance=junta_instance)
return render(request, 'peticions/juntes/edit_peu.html', {
'form': form,
'junta_instance': junta_instance,
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.edit.junta', junta_id=junta_id)
@user_passes_test(lambda u: u.is_staff)
def edit_junta(request, junta_id=None):
try:
if junta_id:
junta_instance = Junta.objects.filter(id=junta_id)[0]
else:
junta_instance = Junta()
for categoria in junta_instance.categories:
print(categoria)
if request.method == 'POST':
form = JuntaForm(request.POST, instance=junta_instance)
if form.is_valid():
form.save()
messages.info(request, 'Junta guardada correctament')
try:
boto_apretat = str(form.data['votarem'])
except:
try:
boto_apretat = str(form.data['queixarem'])
return redirect('peticions.edit.junta.list.peticions', junta_id=junta_instance.id)
except:
try:
boto_apretat = str(form.data['veure'])
return redirect('peticions.show.junta', junta_id=junta_instance.id)
except:
try:
boto_apretat = str(form.data['pudor'])
return redirect('peticions.edit.peu.junta', junta_id=junta_instance.id)
except:
try:
boto_apretat = str(form.data['presentar'])
return redirect('peticions.present.junta', junta_id=junta_instance.id)
except:
try:
boto_apretat = str(form.data['tancar'])
return redirect('peticions.close.junta', junta_id=junta_instance.id)
except:
pass
return redirect('peticions.list.juntes')
else:
messages.error(request, 'Formulari incorrecte')
return render(request, 'peticions/juntes/edit.html', {
'form': form,
'junta_instance': junta_instance,
})
else:
form = JuntaForm(instance=junta_instance)
return render(request, 'peticions/juntes/edit.html', {
'form': form,
'junta_instance': junta_instance,
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def list_junta_peticio(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
list_issues_add = Issue.objects.filter(public=True, status=ISSUE_STATUS_OPEN).exclude(id__in=junta_instance.issues.values('id'))
list_issues_remove = junta_instance.issues.all()
return render(request, 'peticions/juntes/add_to_junta_list.html', {
'list_issues_add': list_issues_add,
'list_issues_remove': list_issues_remove,
'public': False,
'user_admin': request.user.is_staff,
'junta_instance': junta_instance
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def add_all_junta_peticio(request, junta_id):
try:
junta_instance = Junta.objects.filter(id=junta_id)[0]
if request.method == 'POST':
form = AreYouSureForm(request.POST)
if form.is_valid():
for issue in Issue.objects.filter(public=True, status=ISSUE_STATUS_OPEN):
issue.status = ISSUE_STATUS_WAITING
issue.save()
junta_instance.issues.add(issue)
junta_instance.save()
return redirect('peticions.edit.junta', junta_id=junta_id)
else:
messages.error(request, 'Error afegit peticions a la junta')
else:
form = AreYouSureForm(request.GET)
return render(request, 'peticions/juntes/add_all_issues.html', { 'junta_instance': junta_instance, 'list_issues_add': Issue.objects.filter(public=True, status=ISSUE_STATUS_OPEN) })
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
@user_passes_test(lambda u: u.is_staff)
def add_junta_peticio(request, junta_id, issue_id):
try:
if request.method == "POST":
junta_instance = Junta.objects.filter(id=junta_id)[0]
issue_instance = Issue.objects.filter(id=issue_id)[0]
issue_instance.status = ISSUE_STATUS_WAITING
issue_instance.save()
junta_instance.issues.add(issue_instance)
junta_instance.save()
except Exception as e:
messages.error(request, "Error afegint petició a l'ordre del dia")
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.edit.junta', junta_id=junta_id)
@user_passes_test(lambda u: u.is_staff)
def remove_junta_peticio(request, junta_id, issue_id):
try:
if request.method == "POST":
junta_instance = Junta.objects.filter(id=junta_id)[0]
issue_instance = Issue.objects.filter(id=issue_id)[0]
issue_instance.status = ISSUE_STATUS_OPEN
issue_instance.save()
junta_instance.issues.remove(issue_instance)
junta_instance.save()
except Exception as e:
messages.error(request, "Error eliminant petició de l'ordre del dia")
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.edit.junta', junta_id=junta_id)
#
# registered
#
@user_passes_test(lambda u: u.is_staff)
def like_issue(request, issue_id):
try:
if request.method == "POST":
issue_instance = Issue.objects.filter(id=issue_id)[0]
if not request.user in issue_instance.likes.all():
if request.user in issue_instance.dislikes.all():
issue_instance.dislikes.remove(request.user)
issue_instance.likes.add(request.user)
else:
issue_instance.likes.add(request.user)
issue_instance.save()
except Exception as e:
messages.error(request, "Error fent like")
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.show.issue', issue_id=issue_id)
@user_passes_test(lambda u: u.is_staff)
def dislike_issue(request, issue_id):
try:
if request.method == "POST":
issue_instance = Issue.objects.filter(id=issue_id)[0]
if not request.user in issue_instance.dislikes.all():
if request.user in issue_instance.likes.all():
issue_instance.likes.remove(request.user)
issue_instance.dislikes.add(request.user)
else:
issue_instance.dislikes.add(request.user)
issue_instance.save()
except Exception as e:
messages.error(request, "Error fent dislike")
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.show.issue', issue_id=issue_id)
#
# login required
#
@login_required
def filter_issues(request):
if request.method == 'POST':
form = IssueFilterForm(request.POST)
try:
if form.data['status_filter'][0]:
url = reverse('peticions.list.issues')
return HttpResponseRedirect(url + "?status_filter="+form.data['status_filter'][0])
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.issues')
else:
form = IssueFilterForm(request.GET)
return render(request, 'peticions/issues/filter.html', {'form': form })
@login_required
def edit_comment(request, issue_id, comment_id=None):
try:
issue_instance = Issue.objects.filter(id=issue_id)[0]
if issue_instance.status==ISSUE_STATUS_CLOSED:
return redirect('peticions.edit.issue', issue_id=issue_id)
if comment_id:
comment_instance = Comment.objects.filter(issue__id=issue_id, id=comment_id)[0]
is_new = False
else:
comment_instance = Comment(issue=Issue.objects.filter(id=issue_id)[0], user=request.user)
is_new = True
if is_new:
if request.user.representant:
comment_instance.representant=request.user.representant
if request.method == 'POST':
if request.user.is_staff:
form = AdminCommentForm(request.POST, instance=comment_instance)
else:
form = CommentForm(request.POST, instance=comment_instance)
if form.is_valid():
form.save()
messages.info(request, 'Comentari guardat correctament')
return redirect('peticions.edit.issue', issue_id=issue_id)
else:
messages.error(request, 'Formulari incorrecte')
return render(request, 'peticions/issues/edit.html', {
'form': form,
'comment_instance': comment_instance,
'is_new': is_new,
'issue_id': issue_id,
'issue_instance': issue_instance,
})
else:
if request.user.is_staff:
form = AdminCommentForm(instance=comment_instance)
else:
form = CommentForm(instance=comment_instance)
return render(request, 'peticions/comments/edit.html', {
'form': form,
'comment_instance': comment_instance,
'is_new': is_new,
'issue_id': issue_id,
'issue_instance': issue_instance,
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.edit.issue', issue_id=issue_id)
@login_required
def show_issue(request, issue_id):
try:
config = Entitat.objects.first()
issue_instance = Issue.objects.filter(id=issue_id)[0]
return render(request, 'peticions/issues/show.html', {
'issue_instance': issue_instance,
'config': config,
'user_admin': request.user.is_staff,
'issue_add_comments': True,
'issue_title_size': 'h1'
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.issues')
@login_required
def edit_issue(request, issue_id=None):
try:
if issue_id:
issue_instance = Issue.objects.filter(id=issue_id)[0]
is_new = False
else:
issue_instance = Issue(owner=request.user)
is_new = True
if is_new:
if request.user.representant:
issue_instance.representant=request.user.representant
if request.user==issue_instance.owner or request.user.is_staff:
owner_view = True
else:
owner_view = False
if issue_instance.owner!=request.user and not request.user.is_staff:
return redirect('peticions.show.issue', issue_id=issue_id)
if request.method == 'POST':
if request.user.is_staff:
form = IssueAdminForm(request.POST, instance=issue_instance)
else:
form = IssueForm(request.POST, instance=issue_instance)
if form.is_valid():
form.save()
messages.info(request, 'Petició guardada correctament')
return redirect('peticions.list.issues')
else:
messages.error(request, 'Formulari incorrecte')
return render(request, 'peticions/issues/edit.html', {
'form': form,
'issue_instance': issue_instance,
'is_new': is_new,
'owner_view': owner_view,
'user': request.user,
'user_admin': request.user.is_staff
})
else:
if request.user.is_staff:
form = IssueAdminForm(instance=issue_instance)
else:
form = IssueForm(instance=issue_instance)
return render(request, 'peticions/issues/edit.html', {
'form': form,
'issue_instance': issue_instance,
'is_new': is_new,
'owner_view': owner_view,
'user': request.user,
'user_admin': request.user.is_staff
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.issues')
@login_required
def list_issues(request):
config = Entitat.objects.first()
issue_status = request.GET.get('status_filter', None)
# print(str(issue_status))
if request.user.is_staff:
list_issues_raw = Issue.objects.all()
else:
list_issues_raw = Issue.objects.filter(public=True)
# print(list_issues_raw.count())
if issue_status:
# print('applico filtre status')
list_issues_raw = list_issues_raw.filter(status=issue_status)
# print(list_issues_raw.count())
page = request.GET.get('page', 1)
paginator = Paginator(list_issues_raw, 10)
try:
list_issues = paginator.page(page)
except PageNotAnInteger:
list_issues = paginator.page(1)
except EmptyPage:
list_issues = paginator.page(paginator.num_pages)
return render(request, 'peticions/issues/list.html', {
'list_issues': list_issues,
'config': config,
'public': False,
'user_admin': request.user.is_staff,
'issue_status': issue_status
})
#
# PUBLIC
#
def list_juntes(request):
user_admin = False
if request.user.is_authenticated:
if request.user.is_staff:
user_admin = True
list_juntes_raw = Junta.objects.all()
else:
list_juntes_raw = Junta.objects.filter(public=True)
else:
list_juntes_raw = Junta.objects.filter(public=True)
page = request.GET.get('page', 1)
paginator = Paginator(list_juntes_raw, 10)
try:
list_juntes = paginator.page(page)
except PageNotAnInteger:
list_juntes = paginator.page(1)
except EmptyPage:
list_juntes = paginator.page(paginator.num_pages)
return render(request, 'peticions/juntes/list.html', {
'list_juntes': list_juntes,
'user_admin': user_admin
})
def present_junta(request, junta_id):
try:
user_admin = False
if request.user.is_authenticated:
if request.user.is_staff:
user_admin = True
junta_instance = Junta.objects.filter(id=junta_id)[0]
else:
junta_instance = Junta.objects.filter(id=junta_id, public=True)[0]
else:
junta_instance = Junta.objects.filter(id=junta_id, public=True)[0]
return render(request, 'peticions/juntes/present.html', {
'junta_instance': junta_instance,
'issue_add_comments': False,
'issue_title_size': 'h4',
'user_admin': user_admin
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
def show_junta(request, junta_id):
try:
user_admin = False
if request.user.is_authenticated:
if request.user.is_staff:
user_admin = True
junta_instance = Junta.objects.filter(id=junta_id)[0]
else:
junta_instance = Junta.objects.filter(id=junta_id, public=True)[0]
else:
junta_instance = Junta.objects.filter(id=junta_id, public=True)[0]
return render(request, 'peticions/juntes/show.html', {
'junta_instance': junta_instance,
'issue_add_comments': False,
'issue_title_size': 'h4',
'user_admin': user_admin
})
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
def show_acte_junta(request, junta_slug):
try:
junta_instance = Junta.objects.filter(slug=junta_slug)[0]
return show_junta(request, junta_instance.id)
except Exception as e:
if request.user.is_superuser:
messages.error(request, str(e))
return redirect('peticions.list.juntes')
```
#### File: ampa/voting/models.py
```python
from django.db import models
from cole.models import *
import uuid
import json
ELECTION_STATUS_DRAFT = '0'
ELECTION_STATUS_OPEN = '1'
ELECTION_STATUS_CLOSED = '2'
ELECTION_STATUS = [
(ELECTION_STATUS_DRAFT, 'borrador'),
(ELECTION_STATUS_OPEN, 'obertes'),
(ELECTION_STATUS_CLOSED, 'tancades'),
]
class Election(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
owner = models.ForeignKey(User, on_delete=models.SET_NULL, related_name='elections', default=None, blank=True, null=True)
multianswer = models.BooleanField(default=False)
anonymous = models.BooleanField(default=False)
titol = models.CharField(max_length=256)
html_message = models.TextField(max_length=10000, default=None, blank=True, null=True)
status = models.CharField(
max_length=1,
choices=ELECTION_STATUS,
default=ELECTION_STATUS_DRAFT,
)
open_id = models.UUIDField(primary_key=False, default=None, editable=False, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def get_vote_count(self):
total = 0
for option in Vote.objects.filter(valid=True, election=self):
total += 1
return total
def save(self, *args, **kwargs):
if not self.open_id and self.status==ELECTION_STATUS_OPEN:
self.open_id = uuid.uuid4()
super().save(*args, **kwargs)
class Meta:
ordering = ['-updated_at']
class Option(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
election = models.ForeignKey(Election, on_delete=models.CASCADE, related_name='options')
text = models.CharField(max_length=1000, default='')
order = models.IntegerField(default=0)
class Meta:
ordering = ['order']
##########################################################################################################################################
class ElectionLog(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
election = models.ForeignKey(Election, on_delete=models.CASCADE, related_name='logs', blank=False, null=False)
option = models.ForeignKey(Option, on_delete=models.CASCADE, related_name='logs', blank=True, null=True)
log = models.CharField(max_length=1000)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
##########################################################################################################################################
class Vote(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
election = models.ForeignKey(Election, on_delete=models.CASCADE, related_name='votes', blank=False, null=False)
option = models.ForeignKey(Option, on_delete=models.CASCADE, related_name='votes', blank=True, null=True)
voter_id = models.CharField(max_length=256, default='', blank=True, null=True)
voter_verification = models.CharField(max_length=256, default='', blank=True, null=True)
valid = models.BooleanField(default=True)
invalidation_reason = models.CharField(max_length=256, default='', blank=True, null=True)
election_log = models.ForeignKey(ElectionLog, on_delete=models.CASCADE, related_name='votes', blank=True, null=True, default=None)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
if self.election.status!=ELECTION_STATUS_OPEN:
raise Exception('Vot no comptabilitzat')
if self.option:
log = ElectionLog(
election=self.election,
option=self.option,
log=json.dumps( {
'id': str(self.id),
'voter_id': str(self.voter_id),
'voter_verification': str(self.voter_verification),
'election_id': str(self.election.id),
'option_id': str(self.option.id)
})
)
else:
log = ElectionLog(
election=self.election,
option=None,
log=json.dumps( {
'id': str(self.id),
'voter_id': str(self.voter_id),
'voter_verification': str(self.voter_verification),
'election_id': str(self.election.id),
'option_id': 'blanc'
})
)
log.save()
self.election_log = log
super().save(*args, **kwargs)
``` |
{
"source": "jordiprats/python-covidcache",
"score": 2
} |
#### File: jordiprats/python-covidcache/covidcache.py
```python
from flask import Flask, request
import re
import os
import sys
import time
import json
import requests
debug = True
covid_cache = {}
def fetch_status(id):
global covid_cache
try:
if id in covid_cache.keys():
if time.time()-covid_cache[id]['timestamp'] < 3600:
if debug: print("cached data")
return True
else:
if debug: print("updating scached data")
# DATAGENERACIO;DATACREACIO;CODCENTRE;CODIMUNICIPI;ESTAT;GRUP_CONFIN;ALUMN_CONFIN;DOCENT_CONFIN;ALTRES_CONFIN;GRUP_ESTABLE;ALUMN_POSITIU;PERSONAL_POSITIU;ALTRES_POSITIU
# 01/10/2020 7:00;28/09/2020 9:28;IDCENTRE;MUNICIPI;Obert;0;6;0;0;51;0;0;0
# DATAGENERACIO;DATACREACIO;CODCENTRE;ESTAT;GRUP_CONFIN;ALUMN_CONFIN;DOCENT_CONFIN;ALTRES_CONFIN;GRUP_ESTABLE;ALUMN_POSITIU;PERSONAL_POSITIU;ALTRES_POSITIU
# 03/10/2020 7:00;02/10/2020 8:51;IDCENTRE;Obert;0;5;0;0;51;0;0;0
regex_str = '[0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+;([0-9]+)/([0-9]+)/[^;]+;'+id+';([^;]+);([0-9]+);([0-9]+);([0-9]+);([0-9]+);[0-9]+;([0-9]+);([0-9]+);([0-9]+)'
if debug: print(regex_str)
r = requests.get('https://tracacovid.akamaized.net/data.csv', stream=True)
for line in r.iter_lines(decode_unicode=True):
if debug: print(line)
m = re.search(regex_str, line)
if m:
ultim_update = (int(m.group(1))*100)+int(m.group(2))
estat_centre = m.group(3)
groups_confinats = int(m.group(4))
confinats = int(m.group(5))+int(m.group(6))+int(m.group(7))
positius = int(m.group(8))+int(m.group(9))+int(m.group(10))
covid_cache[id] = {
'timestamp': time.time(),
'ultim_update': ultim_update,
'estat_centre': estat_centre,
'groups_confinats': groups_confinats,
'confinats': confinats,
'positius': positius
}
return True
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
print(str(e))
return False
app = Flask(__name__)
@app.route('/school/<id>')
def query_example(id):
global covid_cache
fetch_status(id)
try:
if covid_cache[id]:
return json.dumps(covid_cache[id])
else:
return { 'WTF': True }
except:
pass
return { 'WTF': True }
app.run(host='0.0.0.0', debug=debug, port=5000)
``` |
{
"source": "jordiprats/python-healthcheckd",
"score": 3
} |
#### File: jordiprats/python-healthcheckd/healthcheckd.py
```python
import sys
import logging
import subprocess
from pid import PidFile
from configparser import ConfigParser
from http.server import BaseHTTPRequestHandler,HTTPServer
#This class will handles any incoming request from
#the browser
class HealthCheckHandler(BaseHTTPRequestHandler):
def check_status(self):
global command
p = subprocess.Popen('bash -c \''+command+"'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()[0]
print(p.returncode)
return p.returncode==0
def do_healthcheck(self):
if self.check_status():
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(bytes("OK", "utf-8"))
else:
self.send_response(503)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write(bytes("ERROR", "utf-8"))
#Handler for the GET requests
def do_GET(self):
self.do_healthcheck()
return
#Handler for the HEAD requests
def do_HEAD(self):
self.do_healthcheck()
return
if __name__ == "__main__":
try:
configfile = sys.argv[1]
except IndexError:
configfile = '/etc/healthcheckd.config'
try:
config = ConfigParser()
config.read(configfile)
try:
pidfile = config.get('healthcheckd', 'pidfile').strip('"').strip("'").strip()
except:
pidfile = 'healthcheckd'
try:
piddir = config.get('healthcheckd', 'piddir').strip('"').strip("'").strip()
except:
piddir = '/tmp'
try:
port_number = int(config.get('healthcheckd', 'port').strip('"').strip("'").strip())
except:
port_number = 17
try:
command = config.get('healthcheckd', 'command').strip('"').strip("'").strip()
except Exception as e:
command = '/bin/true'
print('INFO: setting default command: '+command)
with PidFile(piddir=piddir, pidname=pidfile) as pidfile:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
try:
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', port_number), HealthCheckHandler)
print('Started httpserver on port '+str(port_number))
#Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
logging.info('shutting down healthcheckd')
server.socket.close()
sys.exit()
except Exception as e:
msg = 'Global ERROR: '+str(e)
logging.error(msg)
sys.exit(msg+'\n')
``` |
{
"source": "jordiprats/python-libscrt",
"score": 3
} |
#### File: jordiprats/python-libscrt/libscrt.py
```python
from Crypto.Cipher import Blowfish
import argparse
import codecs
import six
import sys
import re
def decrypt(password):
c1_code = '5F B0 45 A2 94 17 D9 16 C6 C6 A2 FF 06 41 82 B7'.replace(' ','')
c2_code = '24 A6 3D DE 5B D3 B3 82 9C 7E 06 F4 08 16 AA 07'.replace(' ','')
c1 = Blowfish.new(codecs.decode(c1_code, 'hex'), Blowfish.MODE_CBC, '\x00'*8)
c2 = Blowfish.new(codecs.decode(c2_code, 'hex'), Blowfish.MODE_CBC, '\x00'*8)
padded = c1.decrypt(c2.decrypt(codecs.decode(password, 'hex'))[4:-4])
p = bytes()
while padded[:2] != b'\x00\x00' :
p += padded[:2]
padded = padded[2:]
return codecs.decode(p, 'UTF-16')
def hostname(f):
x = open(f).read().replace('\x00', '')
REGEX_HOSTNAME = re.compile(six.u(r'S:"Hostname"=([^\r\n]*)'))
m = REGEX_HOSTNAME.search(x)
if m:
return m.group(1)
return ''
def password(f):
x = open(f).read().replace('\x00', '')
REGEX_PASWORD = re.compile(six.u(r'S:"Password"=u([0-9a-f]+)'))
m = REGEX_PASWORD.search(x)
if m:
return decrypt(m.group(1))
return ''
def port(f):
x = open(f).read().replace('\x00', '')
REGEX_PORT = re.compile(six.u(r'D:"\[SSH2\] Port"=([0-9a-f]{8})'))
m = REGEX_PORT.search(x)
if m:
return '%d'%(int(m.group(1), 16))
return ''
def username(f):
x = open(f).read().replace('\x00', '')
REGEX_USERNAME = re.compile(six.u(r'S:"Username"=([^\r\n]*)'))
m = REGEX_USERNAME.search(x)
if m:
return m.group(1)
return ''
if __name__ == '__main__':
file_list = sys.argv
file_list.pop(0)
for filename in file_list:
print('sshpass -p"'+password(filename)+'" ssh -p '+port(filename)+' '+username(filename)+'@'+hostname(filename))
``` |
{
"source": "jordiprats/python-tier1",
"score": 2
} |
#### File: python-tier1/poc/poc_detail.py
```python
from __future__ import print_function
"""
POC
"""
import re
import os
import sys
import getopt
import pysnow
import argparse
import requests
from datetime import datetime, timedelta
from configparser import SafeConfigParser
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def showJelp(msg=''):
print("Usage:")
print(" [-c|--config] <config file>")
print(" [-l|--list]")
print("");
sys.exit(msg)
if __name__ == '__main__':
list_option = False
config_file = os.path.expanduser('~')+'/.shhrc'
SHH_INSTANCE = ''
SHH_USERNAME = ''
SHH_PASSWORD = ''
debug = False
# parse opts
try:
options, remainder = getopt.getopt(sys.argv[1:], 'hlc:', [
'help'
'list',
'config=',
])
except Exception as e:
showJelp(str(e))
for opt, arg in options:
if opt in ('-l', '--list'):
list_option = True
elif opt in ('-c', '--config'):
config_file = arg
else:
showJelp("unknow option")
config = SafeConfigParser()
config.read(config_file)
if debug : eprint("CONFIG FILE PATH: "+config_file)
try:
debug = config.getboolean('shh', 'debug')
except:
pass
try:
SHH_INSTANCE = config.get('shh', 'instance').strip('"').strip()
if debug : eprint("INSTANCE: "+SHH_INSTANCE)
except Exception as e:
sys.exit("ERROR: instance is mandatory - "+str(e))
try:
SHH_USERNAME = config.get('shh', 'username').strip('"').strip()
if debug : eprint("SHH_USERNAME: "+SHH_USERNAME)
except:
sys.exit("ERROR: username is mandatory")
try:
SHH_PASSWORD = config.get('shh', 'password').strip('"').strip()
# if debug : eprint("SHH_PASSWORD: "+<PASSWORD>)
except:
sys.exit("ERROR: username is mandatory")
c = pysnow.client.Client(instance=SHH_INSTANCE, user=SHH_USERNAME, password=<PASSWORD>)
qb = (pysnow.QueryBuilder()
.field('assignment_group.name').equals('MS Team 2')
.AND()
.field('active').equals('true')
)
incident = c.resource(api_path='/table/incident')
response = incident.get(query=qb)
# Iterate over the matching records and print out number
for record in response.all():
print(str(record))
``` |
{
"source": "jordips/flask-restx-api-with-vue-front",
"score": 3
} |
#### File: api/todos/endpoints.py
```python
from flask import jsonify
from flask_restx import Namespace, Resource, fields
import json
from random import randint
from .models import todo_model
ns = Namespace('todos', description='Todos operations')
# Add models
ns.add_model("todo", todo_model)
# Init Array
todos = [
{'id': '123', 'task': 'Clean the bedroom'},
{'id': '563', 'task': 'Go to shop milk'}
]
@ns.route("/")
class TodoList(Resource):
@ns.doc('list_todos')
@ns.marshal_list_with(todo_model)
def get(self):
"""
Returns a list of todos
"""
return todos
@ns.doc('create_todo')
@ns.expect(todo_model)
@ns.marshal_with(todo_model, code=201)
def post(self):
"""
Adds a new todo to the list
"""
todo = ns.payload
todo['id'] = randint(0, 1000)
todos.append(todo)
return todo
@ns.route("/<int:id>")
@ns.response(404, 'Todo not found')
@ns.param('id', 'The task identifier')
class Todo(Resource):
@ns.doc('get_todo')
@ns.marshal_with(todo_model)
def get(self, id):
'''Fetch a given resource'''
for todo in todos:
if todo['id'] == id:
return todo
api.abort(404, "Todo {} doesn't exist".format(id))
@ns.doc('delete_todo')
@ns.response(204, 'Todo deleted')
def delete(self, id):
'''Delete a task given its identifier'''
for todo in todos:
if todo['id'] == id:
todos.remove(todo)
return '', 204
api.abort(404, "Todo {} doesn't exist".format(id))
@ns.expect(todo_model)
@ns.marshal_with(todo_model)
def put(self, id):
'''Update a task given its identifier'''
for todo in todos:
if todo['id'] == id:
todo.update(ns.payload)
return todo
api.abort(404, "Todo {} doesn't exist".format(id))
``` |
{
"source": "jordiriu/MP-DQN",
"score": 2
} |
#### File: MP-DQN/agents/pdqn_multipass.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from agents.pdqn import PDQNAgent
from agents.utils import hard_update_target_network
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class MultiPassQActor(nn.Module):
def __init__(self, state_size, action_size, action_parameter_size_list, hidden_layers=(100,),
output_layer_init_std=None, activation="relu", **kwargs):
super().__init__()
self.state_size = state_size
self.action_size = action_size
self.action_parameter_size_list = action_parameter_size_list
self.action_parameter_size = sum(action_parameter_size_list)
self.activation = activation
# create layers
self.layers = nn.ModuleList()
inputSize = self.state_size + self.action_parameter_size
lastHiddenLayerSize = inputSize
if hidden_layers is not None:
nh = len(hidden_layers)
self.layers.append(nn.Linear(inputSize, hidden_layers[0]))
for i in range(1, nh):
self.layers.append(nn.Linear(hidden_layers[i - 1], hidden_layers[i]))
lastHiddenLayerSize = hidden_layers[nh - 1]
self.layers.append(nn.Linear(lastHiddenLayerSize, self.action_size))
# initialise layer weights
for i in range(0, len(self.layers) - 1):
nn.init.kaiming_normal_(self.layers[i].weight, nonlinearity=activation)
nn.init.zeros_(self.layers[i].bias)
if output_layer_init_std is not None:
nn.init.normal_(self.layers[-1].weight, mean=0., std=output_layer_init_std)
# else:
# nn.init.zeros_(self.layers[-1].weight)
nn.init.zeros_(self.layers[-1].bias)
self.offsets = self.action_parameter_size_list.cumsum()
self.offsets = np.insert(self.offsets, 0, 0)
def forward(self, state, action_parameters):
# implement forward
negative_slope = 0.01
Q = []
# duplicate inputs so we can process all actions in a single pass
batch_size = state.shape[0]
# with torch.no_grad():
x = torch.cat((state, torch.zeros_like(action_parameters)), dim=1)
x = x.repeat(self.action_size, 1)
for a in range(self.action_size):
x[a*batch_size:(a+1)*batch_size, self.state_size + self.offsets[a]: self.state_size + self.offsets[a+1]] \
= action_parameters[:, self.offsets[a]:self.offsets[a+1]]
num_layers = len(self.layers)
for i in range(0, num_layers - 1):
if self.activation == "relu":
x = F.relu(self.layers[i](x))
elif self.activation == "leaky_relu":
x = F.leaky_relu(self.layers[i](x), negative_slope)
else:
raise ValueError("Unknown activation function "+str(self.activation))
Qall = self.layers[-1](x)
# extract Q-values for each action
for a in range(self.action_size):
Qa = Qall[a*batch_size:(a+1)*batch_size, a]
if len(Qa.shape) == 1:
Qa = Qa.unsqueeze(1)
Q.append(Qa)
Q = torch.cat(Q, dim=1)
return Q
class MultiPassPDQNAgent(PDQNAgent):
NAME = "Multi-Pass P-DQN Agent"
def __init__(self,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.actor = MultiPassQActor(self.observation_space.shape[0], self.num_actions, self.action_parameter_sizes,
**kwargs['actor_kwargs']).to(device)
self.actor_target = MultiPassQActor(self.observation_space.shape[0], self.num_actions, self.action_parameter_sizes,
**kwargs['actor_kwargs']).to(device)
hard_update_target_network(self.actor, self.actor_target)
self.actor_target.eval()
self.actor_optimiser = optim.Adam(self.actor.parameters(), lr=self.learning_rate_actor)
```
#### File: MP-DQN/common/wrappers.py
```python
import copy
import gym
import numpy as np
from gym.spaces import Tuple, Box
class ScaledStateWrapper(gym.ObservationWrapper):
"""
Scales the observation space to [-1,1]
"""
def __init__(self, env):
super(ScaledStateWrapper, self).__init__(env)
obs = env.observation_space
self.compound = False
self.low = None
self.high = None
print(type(obs))
print(obs)
if isinstance(obs, gym.spaces.Box):
self.low = env.observation_space.low
self.high = env.observation_space.high
self.observation_space = gym.spaces.Box(low=-np.ones(self.low.shape), high=np.ones(self.high.shape),
dtype=np.float32)
elif isinstance(obs, Tuple):
self.low = obs.spaces[0].low
self.high = obs.spaces[0].high
assert len(obs.spaces) == 2 and isinstance(obs.spaces[1], gym.spaces.Discrete)
self.observation_space = Tuple(
(gym.spaces.Box(low=-np.ones(self.low.shape), high=np.ones(self.high.shape),
dtype=np.float32),
obs.spaces[1]))
self.compound = True
else:
raise Exception("Unsupported observation space type: %s" % self.observation_space)
def scale_state(self, state):
state = 2. * (state - self.low) / (self.high - self.low) - 1.
return state
def _unscale_state(self, scaled_state):
state = (self.high - self.low) * (scaled_state + 1.) / 2. + self.low
return state
def observation(self, obs):
if self.compound:
state, steps = obs
ret = (self.scale_state(state), steps)
else:
ret = self.scale_state(obs)
return ret
class TimestepWrapper(gym.Wrapper):
"""
Adds a timestep return to an environment for compatibility reasons.
"""
def reset(self, **kwargs):
state = self.env.reset(**kwargs)
return state, 0
def step(self, action):
state, reward, terminal, info = self.env.step(action)
obs = (state, 1)
return obs, reward, terminal, info
class ScaledParameterisedActionWrapper(gym.ActionWrapper):
"""
Changes the scale of the continuous action parameters to [-1,1].
Parameter space must be flattened!
Tuple((
Discrete(n),
Box(c_1),
Box(c_2),
...
Box(c_n)
)
"""
def __init__(self, env):
super(ScaledParameterisedActionWrapper, self).__init__(env)
self.old_as = env.action_space
self.num_actions = self.old_as.spaces[0].n
self.high = [self.old_as.spaces[i].high for i in range(1, self.num_actions + 1)]
self.low = [self.old_as.spaces[i].low for i in range(1, self.num_actions + 1)]
self.range = [self.old_as.spaces[i].high - self.old_as.spaces[i].low for i in range(1, self.num_actions + 1)]
new_params = [ # parameters
Box(-np.ones(self.old_as.spaces[i].low.shape), np.ones(self.old_as.spaces[i].high.shape), dtype=np.float32)
for i in range(1, self.num_actions + 1)
]
self.action_space = Tuple((
self.old_as.spaces[0], # actions
*new_params,
))
def action(self, action):
"""
Rescale from [-1,1] to original action-parameter range.
:param action:
:return:
"""
action = copy.deepcopy(action)
p = action[0]
action[1][p] = self.range[p] * (action[1][p] + 1) / 2. + self.low[p]
return action
class QPAMDPScaledParameterisedActionWrapper(gym.ActionWrapper):
"""
Changes the scale of the continuous action parameters to [-1,1].
Parameter space not flattened in this case
Tuple((
Discrete(n),
Tuple((
Box(c_1),
Box(c_2),
...
Box(c_n)
))
)
"""
def __init__(self, env):
super(QPAMDPScaledParameterisedActionWrapper, self).__init__(env)
self.old_as = env.action_space
self.num_actions = self.old_as.spaces[0].n
self.high = [self.old_as.spaces[1][i].high for i in range(self.num_actions)]
self.low = [self.old_as.spaces[1][i].low for i in range(self.num_actions)]
self.range = [self.old_as.spaces[1][i].high - self.old_as.spaces[1][i].low for i in range(self.num_actions)]
new_params = [ # parameters
gym.spaces.Box(-np.ones(self.old_as.spaces[1][i].low.shape), np.ones(self.old_as.spaces[1][i].high.shape),
dtype=np.float32)
for i in range(self.num_actions)
]
self.action_space = gym.spaces.Tuple((
self.old_as.spaces[0], # actions
gym.spaces.Tuple(tuple(new_params)),
))
def action(self, action):
"""
Rescale from [-1,1] to original action-parameter range.
:param action:
:return:
"""
action = copy.deepcopy(action)
p = action[0]
action[1][p] = self.range[p] * (action[1][p] + 1) / 2. + self.low[p]
return action
```
#### File: jordiriu/MP-DQN/run_goal_qpamdp.py
```python
import click
import time
import gym
import os
import numpy as np
import gym_goal
from agents.qpamdp import QPAMDPAgent
from agents.sarsa_lambda import SarsaLambdaAgent
from common.wrappers import ScaledStateWrapper, QPAMDPScaledParameterisedActionWrapper
from gym_goal.envs.config import GOAL_WIDTH, PITCH_WIDTH, PITCH_LENGTH
from gym.wrappers import Monitor
from common.goal_domain import CustomFourierBasis, GoalObservationWrapper
variances = [0.01, 0.01, 0.01]
xfear = 50.0 / PITCH_LENGTH
yfear = 50.0 / PITCH_WIDTH
caution = 5.0 / PITCH_WIDTH
kickto_weights = np.array([[2.5, 1, 0, xfear, 0], [0, 0, 1 - caution, 0, yfear]])
initial_parameter_weights = [
kickto_weights,
np.array([[GOAL_WIDTH / 2 - 1, 0]]),
np.array([[-GOAL_WIDTH / 2 + 1, 0]])
]
def evaluate(env, agent, episodes=1000):
returns = []
timesteps = []
for _ in range(episodes):
state, _ = env.reset()
terminal = False
t = 0
total_reward = 0.
while not terminal:
t += 1
state = np.array(state, dtype=np.float32, copy=False)
action = agent.act(state)
(state, _), reward, terminal, _ = env.step(action)
total_reward += reward
timesteps.append(t)
returns.append(total_reward)
return np.array(returns)
@click.command()
@click.option('--seed', default=7, help='Random seed.', type=int)
@click.option('--episodes', default=20000, help='Number of epsiodes.', type=int)
@click.option('--evaluation-episodes', default=100, help='Episodes over which to evaluate after training.', type=int)
@click.option('--scale', default=False, help='Scale inputs and actions.', type=bool) # default 50, 25 best
@click.option('--initialise-params', default=True, help='Initialise action parameters.', type=bool)
@click.option('--save-dir', default="results/goal", help='Output directory.', type=str)
@click.option('--title', default="QPAMDP", help="Prefix of output files", type=str)
def run(seed, episodes, evaluation_episodes, scale, initialise_params, save_dir, title):
alpha_param = 0.1
env = gym.make('Goal-v0')
env = GoalObservationWrapper(env)
if scale:
variances[0] = 0.0001
variances[1] = 0.0001
variances[2] = 0.0001
alpha_param = 0.06
initial_parameter_weights[0] = np.array([[-0.375, 0.5, 0, 0.0625, 0],
[0, 0, 0.8333333333333333333, 0, 0.111111111111111111111111]])
initial_parameter_weights[1] = np.array([0.857346647646219686, 0])
initial_parameter_weights[2] = np.array([-0.857346647646219686, 0])
env = ScaledStateWrapper(env)
env = QPAMDPScaledParameterisedActionWrapper(env)
dir = os.path.join(save_dir, title)
env = Monitor(env, directory=os.path.join(dir, str(seed)), video_callable=False, write_upon_reset=False, force=True)
env.seed(seed)
np.random.seed(seed)
action_obs_index = np.arange(14)
param_obs_index = np.array([
np.array([10, 11, 14, 15]), # ball_features
np.array([16]), # keeper_features
np.array([16]), # keeper_features
])
basis = CustomFourierBasis(14, env.observation_space.spaces[0].low[:14], env.observation_space.spaces[0].high[:14])
discrete_agent = SarsaLambdaAgent(env.observation_space.spaces[0], env.action_space.spaces[0], basis=basis, seed=seed, alpha=0.01,
lmbda=0.1, gamma=0.9, temperature=1.0, cooling=1.0, scale_alpha=False,
use_softmax=True,
observation_index=action_obs_index, gamma_step_adjust=False)
agent = QPAMDPAgent(env.observation_space.spaces[0], env.action_space, alpha=alpha_param, initial_action_learning_episodes=4000,
seed=seed, action_obs_index=action_obs_index, parameter_obs_index=param_obs_index,
variances=variances, discrete_agent=discrete_agent, action_relearn_episodes=2000,
parameter_updates=1000, parameter_rollouts=50, norm_grad=True, print_freq=100,
phi0_func=lambda state: np.array([1, state[1], state[1]**2]),
phi0_size=3)
# Alternating learning periods from original paper:
# QPAMDP(1) : init(2000), parameter_updates(50), relearn(50)
# QPAMDP(infinity) : init(2000), parameter_updates(1000), relearn(2000)
# needed to increase initial action learning episodes to 4000
if initialise_params:
for a in range(3):
agent.parameter_weights[a] = initial_parameter_weights[a]
max_steps = 150
start_time = time.time()
agent.learn(env, episodes, max_steps)
end_time = time.time()
print("Training took %.2f seconds" % (end_time - start_time))
env.close()
returns = np.array(env.get_episode_rewards())
print("Saving training results to:",os.path.join(dir, "QPAMDP{}".format(str(seed))))
np.save(os.path.join(dir, title + "{}".format(str(seed))), returns)
print("Ave. return =", sum(returns) / len(returns))
print("Ave. last 100 episode return =", sum(returns[-100:]) / 100.)
print('Total P(S):{0:.4f}'.format((returns == 50.).sum() / len(returns)))
print('Ave. last 100 episode P(S):{0:.4f}'.format((returns[-100:] == 50.).sum() / 100.))
if evaluation_episodes > 0:
print("Evaluating agent over {} episodes".format(evaluation_episodes))
agent.variances = 0
agent.discrete_agent.epsilon = 0.
agent.discrete_agent.temperature = 0.
evaluation_returns = evaluate(env, agent, evaluation_episodes)
print("Ave. evaluation return =", sum(evaluation_returns) / len(evaluation_returns))
print("Ave. evaluation prob. =", sum(evaluation_returns == 50.) / len(evaluation_returns))
np.save(os.path.join(dir, title + "{}e".format(str(seed))), evaluation_returns)
if __name__ == '__main__':
run()
``` |
{
"source": "jordisr/poreover",
"score": 3
} |
#### File: poreover/decoding/pair_decode.py
```python
import numpy as np
from multiprocessing import Pool, get_logger
import argparse, random, sys, glob, os, re
from scipy.special import logsumexp
#from Bio import pairwise2
import logging
import copy
import progressbar
from itertools import starmap
from pathlib import Path
from . import decode
from . import decoding_cpp
from . import envelope
from . import prefix_search
import poreover.align as align
def fasta_format(name, seq, width=60):
fasta = '>'+name+'\n'
window = 0
while window+width < len(seq):
fasta += (seq[window:window+width]+'\n')
window += width
fasta += (seq[window:]+'\n')
return(fasta)
def get_anchors(alignment, matches, indels):
# find alignment 'anchors' from contiguous stretches of matches or indels
state_start = 0
state_counter = 1
prev_state = 'START'
anchor_ranges = []
anchor_type = []
for i,(a1,a2) in enumerate(alignment.T):
# options are match/insertion/deletion/mismatch
if a1 == a2:
state = 'mat'
elif a1 == '-':
state = 'ins'
elif a2 == '-':
state = 'del'
else:
state = 'mis'
if prev_state == state and state != 'mis':
state_counter += 1
else:
if prev_state == 'ins' and state_counter >= indels:
anchor_ranges.append((state_start,i))
anchor_type.append(prev_state)
if prev_state == 'del' and state_counter >= indels:
anchor_ranges.append((state_start,i))
anchor_type.append(prev_state)
if prev_state == 'mat' and state_counter >= matches:
anchor_ranges.append((state_start,i))
anchor_type.append(prev_state)
prev_state = state
state_counter = 1
state_start = i
return(anchor_ranges, anchor_type)
def argmax_path(forward):
seq_max = forward.shape[1]
forward_indices = np.zeros(seq_max, dtype=int)
cumul = 1
for i in range(1,seq_max):
forward_indices[i] = np.argmax(forward[cumul:,i])+cumul
cumul = forward_indices[i]
return(forward_indices)
def viterbi_path(forward):
(sig_max, seq_max) = forward.shape
forward_indices = np.zeros(seq_max, dtype=int)
seq_i, sig_i = 1, 0
while (0 <= seq_i < seq_max-1) and (0 <= sig_i < sig_max-1):
next_pos = np.argmax([forward[sig_i+1,seq_i], forward[sig_i,seq_i+1], forward[sig_i+1,seq_i+1]])
if next_pos > 0:
forward_indices[seq_i] = sig_i
seq_i += 1
if (next_pos == 0) or (next_pos == 1):
sig_i += 1
forward_indices[seq_i:] = sig_max
return(forward_indices)
def get_sequence_mapping(path, kind):
signal_to_sequence = []
sequence_to_signal = []
label_len = 0
if kind is 'poreover':
for i, p in enumerate(path):
if p < 4:
sequence_to_signal.append(i)
signal_to_sequence.append(label_len)
label_len += 1
elif kind is 'flipflop':
for i, p in enumerate(path):
if i == 0:
sequence_to_signal.append(i)
signal_to_sequence.append(label_len)
else:
if path[i] != path[i-1]:
label_len += 1
sequence_to_signal.append(i)
signal_to_sequence.append(label_len)
elif kind is 'bonito':
for i, p in enumerate(path):
if p == 4 or path[i] == path[i-1]:
pass
else:
sequence_to_signal.append(i)
signal_to_sequence.append(label_len)
label_len += 1
return(sequence_to_signal, signal_to_sequence)
class parallel_decoder:
def __init__(self, args, kind):
self.args = args
self.kind = {'poreover':'ctc', 'guppy':'ctc_flipflop', 'flappie':'ctc_flipflop', 'bonito':'ctc_merge_repeats'}[self.args.basecaller]
def _beam_search_2d(self, logits1, logits2, b, b_tot, u1, u2, v1, v2):
size = (u2-u1+1)*(v2-v1+1)
print('\t {}/{} Basecalling box {}-{}x{}-{} (size: {} elements)...'.format(b,b_tot,u1,u2,v1,v2,size),file=sys.stderr)
if size <= 1:
return(u1,'')
elif (u2-u1) < 1:
return((u1, prefix_search.prefix_search_log_cy(logits2[v1:v2])[0]))
elif (v2-v1) < 1:
return((u1, prefix_search.prefix_search_log_cy(logits1[u1:u2])[0]))
else:
seq = decoding_cpp.cpp_beam_search_2d(
logits1[u1:u2],
logits2[v1:v2],
beam_width_=self.args.beam_width,
model_=self.kind)
return((u1, seq))
def _beam_search_2d_envelope(self, y1_subset, y2_subset, subset_envelope):
return(decoding_cpp.cpp_beam_search_2d(
y1_subset,
y2_subset,
subset_envelope.tolist(),
beam_width_=self.args.beam_width,
method_=self.args.beam_search_method,
model_=self.kind))
def _prefix_search_1d(self, y):
# Perform 1d basecalling and get signal-sequence mapping
(prefix, forward) = prefix_search.prefix_search_log_cy(y, return_forward=True)
try:
forward_indices = viterbi_path(forward)
except:
logger.warning('WARNING: Best label is blank! y.shape:{} forward.shape:{} prefix:{}'.format(y.shape, forward.shape, prefix))
return('',[]) # in case of gap being most probable
assert(len(prefix) == len(forward_indices))
assert(np.all(np.diff(forward_indices) >= 0))
return((prefix,forward_indices))
def _prefix_search_2d(self, logits1, logits2, b, b_tot, u1, u2, v1, v2):
MEM_LIMIT = 1000000000 # 1 GB
size = (u2-u1+1)*(v2-v1+1)
assert(size > 0)
print('\t {}/{} Basecalling box {}-{}x{}-{} (size: {} elements)...'.format(b,b_tot,u1,u2,v1,v2,size),file=sys.stderr)
if size <= 1:
return(u1,'')
elif (u2-u1) < 1:
return((u1, prefix_search.prefix_search_log_cy(logits2[v1:v2])[0]))
elif (v2-v1) < 1:
return((u1, prefix_search.prefix_search_log_cy(logits1[u1:u2])[0]))
elif size*8 > MEM_LIMIT:
logger.error('ERROR: Box too large to basecall {}-{}:{}-{} (size: {} elements)'.format(u1,u2,v1,v2,size))
return(u1,'')
else:
try:
return((u1, prefix_search.pair_prefix_search_log_cy(logits1[u1:u2],logits2[v1:v2])[0]))
except:
logger.warning('WARNING: Error while basecalling box {}-{}:{}-{}'.format(u1,u2,v1,v2))
return(u1,'')
def _prefix_search_2d_envelope(self, y1_subset, y2_subset, subset_envelope):
return(decoding_cpp.cpp_pair_prefix_search_log(
y1_subset,
y2_subset,
subset_envelope.tolist(),
"ACGT"))
def get_function(self):
if self.args.algorithm == 'beam':
if self.args.method == 'envelope':
return(self._beam_search_2d_envelope)
else:
return(self._beam_search_2d)
elif self.args.algorithm == 'prefix':
assert(self.kind == "poreover")
if self.args.method == 'envelope':
return(self._prefix_search_2d_envelope)
else:
return(self._prefix_search_2d)
def pair_decode(args):
# set up logger - should make it global
progressbar.streams.wrap_stderr()
#logging.basicConfig()
logger = get_logger()
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if args.logging == "debug":
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# print software message, should incorporate to other subroutines as well
coffee_emoji = u'\U00002615'
dna_emoji = u'\U0001F9EC'
logger.info('{0:2}{1:3}{0:2} {2:^30} {0:2}{1:3}{0:2}'.format(coffee_emoji, dna_emoji,'PoreOver pair-decode'))
#logger.info(('{0:2}{1:3}'*9+'{0:2}').format(coffee_emoji, dna_emoji))
in_path = getattr(args, 'in')
if len(in_path) == 1:
args_list = []
with open(in_path[0], 'r') as read_pairs:
for n, line in enumerate(read_pairs):
args_copy = copy.deepcopy(args)
setattr(args_copy, 'in', line.split())
#args_copy.out = "pair{}".format(n)
args_list.append(args_copy)
# set up progressbar and manage output
class callback_helper:
def __init__(self):
self.counter = 0
self.pbar = progressbar.ProgressBar(max_value=len(args_list))
self.out_1d_f = open(args.out+'.1d.fasta','w')
self.out_2d_f = open(args.out+'.2d.fasta','w')
self.log_f = open(args.out+'.log','w',1)
print('# PoreOver pair-decode', file=self.log_f)
print('# '+str(vars(args)), file=self.log_f)
print('# '+'\t'.join(map(str,["read1", "read2", "length1", "length2", "sequence_identity", "skipped"])), file=self.log_f)
def callback(self, x):
self.counter += 1
self.pbar.update(self.counter)
if len(x) == 3:
print(x[0], file=self.out_1d_f)
print(x[1], file=self.out_2d_f)
print('\t'.join(map(str,[x[2].get(k, "") for k in ["read1", "read2", "length1", "length2", "sequence_identity", "skipped"]])), file=self.log_f)
elif len(x) == 2:
print(x[0], file=self.out_2d_f)
print('\t'.join(map(str,[x[1].get(k, "") for k in ["read1", "read2"]])), file=self.log_f)
elif len(x) == 1:
print('\t'.join(map(str,[x[0].get(k, "") for k in ["read1", "read2", "length1", "length2", "sequence_identity", "skipped"]])), file=self.log_f)
callback_helper_ = callback_helper()
bullet_point = u'\u25B8'+" "
logger.info(bullet_point + "found {} read pairs in {}".format(len(args_list), in_path[0]))
logger.info(bullet_point + "writing sequences to {0}.1d.fasta and {0}.2d.fasta".format(args.out))
logger.info(bullet_point + "pair alignment statistics saved to {}.log".format(args.out))
logger.info(bullet_point + "starting {} decoding processes...".format(args.threads))
with Pool(processes=args.threads) as pool:
#basecalls = pool.map(pair_decode_helper, args_list) #works but no logging
for i, arg in enumerate(args_list):
pool.apply_async(pair_decode_helper, (args_list[i],), callback=callback_helper_.callback)
pool.close()
pool.join()
else:
seqs_1d, seq_2d, summary = pair_decode_helper(args)
print(summary, file=sys.stderr)
with open(args.out+'.fasta', 'w') as out_fasta:
print(seq_2d, file=out_fasta)
def pair_decode_helper(args):
#logger = getattr(args, 'logger') # should set it globally but just testing for now
logger = get_logger() # get multiprocessing logger
in_path = getattr(args, 'in')
if len(in_path) != 2:
logger.error("ERROR: Exactly two reads are required")
path1 = Path(in_path[0])
path2 = Path(in_path[1])
# if files end in FAST5 (as pairs output might) then automatically replace extension
if path1.suffix == ".fast5":
path1 = path1.with_suffix(".npy")
if path2.suffix == ".fast5":
path2 = path2.with_suffix(".npy")
logger.debug('Read1:{} Read2:{}'.format(path1, path2))
model1 = decode.model_from_trace(os.path.join(args.dir, path1), args.basecaller)
model2 = decode.model_from_trace(os.path.join(args.dir, path2), args.basecaller)
U = model1.t_max
V = model2.t_max
if args.reverse_complement:
model2.reverse_complement()
assert(model1.kind == model2.kind)
# get appropriate helper function for multiprocessing
decoding_fn = parallel_decoder(args, model1.kind).get_function()
pair_decode_summary = {'read1':in_path[0], 'read2':in_path[1]}
if args.method == 'split':
# calculate ranges on which to split read
# currently just splitting in boxes that follow the main diagonal
box_ranges = []
u_step = args.window
for u in range(u_step,U,u_step):
box_ranges.append((u-u_step,u,int(V/U*(u-u_step)),int(V/U*u)))
box_ranges.append((box_ranges[-1][1],U,box_ranges[-1][3],V)) # add in last box with uneven
logger.debug('\t Starting consensus basecalling...')
starmap_input = []
for i, b in enumerate(box_ranges):
starmap_input.append((model1, model2, i,len(box_ranges)-1,b[0],b[1],b[2],b[3]))
basecalls = starmap(decoding_fn, starmap_input)
joined_basecalls = ''.join([b[1] for b in basecalls])
else:
if not args.diagonal_envelope:
logger.debug('\t Performing 1D basecalling...')
if args.single == 'viterbi':
basecall1, viterbi_path1 = model1.viterbi_decode(return_path=True)
basecall2, viterbi_path2 = model2.viterbi_decode(return_path=True)
elif args.single == 'beam':
print("Basecalling 1")
basecall1 = decoding_cpp.cpp_beam_search(model1.log_prob)
print("Resquiggling 1")
viterbi_path1 = decoding_cpp.cpp_viterbi_acceptor(model1.log_prob, basecall1, band_size=1000)
print("Basecalling 2")
basecall2 = decoding_cpp.cpp_beam_search(model2.log_prob)
viterbi_path2 = decoding_cpp.cpp_viterbi_acceptor(model2.log_prob, basecall2, band_size=1000)
if abs(len(basecall1) - len(basecall2)) > 1000:
logger.warning("WARNING: Skipping pair due to length mismatch.")
pair_decode_summary = {'read1':in_path[0], 'read2':in_path[1], 'length1':len(basecall1), 'length2':len(basecall2), 'skipped':1}
return [pair_decode_summary]
sequence_to_signal1, _ = get_sequence_mapping(viterbi_path1, model1.kind)
assert(len(sequence_to_signal1) == len(basecall1))
sequence_to_signal2, _ = get_sequence_mapping(viterbi_path2, model2.kind)
assert(len(sequence_to_signal2) == len(basecall2))
logger.debug('\t Aligning basecalled sequences (Read1 is {} bp and Read2 is {} bp)...'.format(len(basecall1),len(basecall2)))
#alignment = pairwise2.align.globalms(, , 2, -1, -.5, -.1)
if args.alignment == "full":
alignment = align.global_pair(basecall1, basecall2)
else:
alignment = align.global_pair_banded(basecall1, basecall2)
alignment = np.array([list(s) for s in alignment[:2]])
sequence_identity = np.sum(alignment[0] == alignment[1]) / len(alignment[0])
logger.debug('\t Read sequence identity: {}'.format(sequence_identity))
if sequence_identity < 0.5:
logger.warning("WARNING: Skipping pair due to low pairwise identity ({}%). Did you mean to take the --reverse-complement of one of the reads?".format(sequence_identity))
pair_decode_summary = {'read1':in_path[0], 'read2':in_path[1], 'length1':len(basecall1), 'length2':len(basecall2), 'sequence_identity':sequence_identity, 'skipped':1}
return [pair_decode_summary]
pair_decode_summary = {'read1':in_path[0], 'read2':in_path[1], 'length1':len(basecall1), 'length2':len(basecall2), 'sequence_identity':sequence_identity, 'skipped':0}
# get alignment_to_sequence mapping
alignment_to_sequence = np.zeros(shape=alignment.shape,dtype=int)
for i,col in enumerate(alignment.T):
# no boundary case for first element but it will wrap around to the last (which is zero)
for s in range(2):
if col[s] == '-':
alignment_to_sequence[s,i] = alignment_to_sequence[s,i-1]
else:
alignment_to_sequence[s,i] = alignment_to_sequence[s,i-1] + 1
if args.skip_matches or args.method == 'align':
anchor_ranges, anchor_type = get_anchors(alignment, matches=args.skip_threshold, indels=100)
basecall_boxes = []
basecall_anchors = []
for i,(curr_start, curr_end) in enumerate(anchor_ranges):
# get anchor sequences
if anchor_type[i] == 'mat':
basecall_anchors.append((sequence_to_signal1[alignment_to_sequence[0,curr_start]], ''.join(alignment[0,curr_start:curr_end])))
elif anchor_type[i] == 'ins':
basecall_anchors.append((sequence_to_signal1[alignment_to_sequence[0,curr_start]], ''.join(alignment[1,curr_start:curr_end])))
elif anchor_type[i] == 'del':
basecall_anchors.append((sequence_to_signal1[alignment_to_sequence[0,curr_start]], ''.join(alignment[0,curr_start:curr_end])))
if i > 0:
basecall_boxes.append((
sequence_to_signal1[alignment_to_sequence[0,anchor_ranges[i-1][1]]],
sequence_to_signal1[alignment_to_sequence[0,anchor_ranges[i][0]]],
sequence_to_signal2[alignment_to_sequence[1,anchor_ranges[i-1][1]]],
sequence_to_signal2[alignment_to_sequence[1,anchor_ranges[i][0]]]
))
else:
basecall_boxes.append((
0,
sequence_to_signal1[alignment_to_sequence[0,anchor_ranges[i][0]]],
0,
sequence_to_signal2[alignment_to_sequence[1,anchor_ranges[i][0]]]
))
assert len(anchor_ranges) > 0, 'No matches/indels of sufficient length found in alignment. Try decreasing --matches or --indels'
# add last box on the end
basecall_boxes.append((
sequence_to_signal1[alignment_to_sequence[0,anchor_ranges[-1][1]]],
model1.t_max,
sequence_to_signal2[alignment_to_sequence[1,anchor_ranges[-1][1]]],
model2.t_max))
assert(abs(len(basecall_boxes) - len(basecall_anchors))==1)
if args.debug:
with open( "debug.p", "wb" ) as pfile:
import pickle
pickle.dump({
'alignment_to_sequence':alignment_to_sequence,
'sequence_to_signal1':sequence_to_signal1,
'sequence_to_signal2':sequence_to_signal2,
'alignment':alignment,
'basecall_boxes':basecall_boxes,
'basecall_anchors':basecall_anchors,
'anchor_ranges':anchor_ranges
},pfile)
logger.debug('Splitting into {} segments to basecall, reduced to ~{:.2f} of total'.format(len(basecall_boxes), np.sum([b[1]-b[0] for b in basecall_boxes])/U))
if args.method == 'align': # args.method is deprecated
logger.debug('\t Starting consensus basecalling...')
starmap_input = []
for i, b in enumerate(basecall_boxes):
starmap_input.append((model1, model2, i,len(basecall_boxes)-1,b[0],b[1],b[2],b[3]))
basecalls = starmap(decoding_fn, starmap_input)
# sort each segment by its first signal index
joined_basecalls = ''.join([i[1] for i in sorted(basecalls + basecall_anchors)])
if args.method == 'envelope':
if args.debug:
with open( "debug.p", "wb" ) as pfile:
import pickle
pickle.dump({
'alignment_to_sequence':alignment_to_sequence,
'sequence_to_signal1':sequence_to_signal1,
'sequence_to_signal2':sequence_to_signal2,
'alignment':alignment
},pfile)
# prepare data for passing to C++
y1 = model1.log_prob
y2 = model2.log_prob
# Build envelope
if args.diagonal_envelope:
alignment_envelope = np.array([(max(int(u/U*V)-args.diagonal_width,0),min(int(u/U*V)+args.diagonal_width,V)) for u in range(U)])
else:
alignment_col = envelope.get_alignment_columns(alignment)
alignment_envelope = envelope.build_envelope(y1,y2,alignment_col, sequence_to_signal1, sequence_to_signal2, padding=args.padding)
if args.debug_envelope:
# np.median(alignment_envelope[:,1]-(np.arange(U)*U/V).astype(int))
envelope_size = alignment_envelope[:,1]-alignment_envelope[:,0]
print(path1.stem, path2.stem, len(basecall1), len(basecall2), U, V, np.mean(envelope_size), np.std(envelope_size), np.median(envelope_size), np.min(envelope_size), np.max(envelope_size))
return ([{"skipped":1}])
logger.debug('\t Starting consensus basecalling...')
if not args.skip_matches:
joined_basecalls = decoding_fn(y1, y2, alignment_envelope)
else:
basecalls = []
for i, b in enumerate(basecall_boxes):
alignment_envelope_ = alignment_envelope[b[0]:b[1]]
y1_ = y1[b[0]:b[1]]
y2_ = y2[alignment_envelope_[0,0]:alignment_envelope_[-1,1]]
alignment_envelope_ -= alignment_envelope_[0,0]
basecalls.append((b[0], decoding_fn(y1_, y2_, alignment_envelope_)))
# sort each segment by its first signal index
joined_basecalls = ''.join([i[1] for i in sorted(basecalls + basecall_anchors)])
# return formatted strings but do output in main pair_decode function
if args.diagonal_envelope:
# no 1D decoding to return if using a simple diagonal band
return (fasta_format('consensus;{};{}'.format(args.method, path1.stem, path2.stem), joined_basecalls), pair_decode_summary)
else:
return (fasta_format(in_path[0], basecall1)+fasta_format(in_path[1], basecall2), fasta_format('consensus;{};{}'.format(path1.stem, path2.stem), joined_basecalls), pair_decode_summary)
#return((basecall1, basecall2), joined_basecalls)
``` |
{
"source": "jordivandooren/flask-openid",
"score": 3
} |
#### File: flask-openid/example/example.py
```python
from flask import Flask, render_template, request, g, session, flash, \
redirect, url_for, abort
from flask_openid import OpenID
from openid.extensions import pape
from sqlalchemy import create_engine, Column, Integer, String
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
# setup flask
app = Flask(__name__)
app.config.update(
DATABASE_URI = 'sqlite:///flask-openid.db',
SECRET_KEY = 'development key',
DEBUG = True
)
# setup flask-openid
oid = OpenID(app, safe_roots=[], extension_responses=[pape.Response])
# setup sqlalchemy
engine = create_engine(app.config['DATABASE_URI'])
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=True,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
def init_db():
Base.metadata.create_all(bind=engine)
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(60))
email = Column(String(200))
openid = Column(String(200))
def __init__(self, name, email, openid):
self.name = name
self.email = email
self.openid = openid
@app.before_request
def before_request():
g.user = None
if 'openid' in session:
g.user = User.query.filter_by(openid=session['openid']).first()
@app.after_request
def after_request(response):
db_session.remove()
return response
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login', methods=['GET', 'POST'])
@oid.loginhandler
def login():
"""Does the login via OpenID. Has to call into `oid.try_login`
to start the OpenID machinery.
"""
# if we are already logged in, go back to were we came from
if g.user is not None:
return redirect(oid.get_next_url())
if request.method == 'POST':
openid = request.form.get('openid')
if openid:
pape_req = pape.Request([])
return oid.try_login(openid, ask_for=['email', 'nickname'],
ask_for_optional=['fullname'],
extensions=[pape_req])
return render_template('login.html', next=oid.get_next_url(),
error=oid.fetch_error())
@oid.after_login
def create_or_login(resp):
"""This is called when login with OpenID succeeded and it's not
necessary to figure out if this is the users's first login or not.
This function has to redirect otherwise the user will be presented
with a terrible URL which we certainly don't want.
"""
session['openid'] = resp.identity_url
if 'pape' in resp.extensions:
pape_resp = resp.extensions['pape']
session['auth_time'] = pape_resp.auth_time
user = User.query.filter_by(openid=resp.identity_url).first()
if user is not None:
flash(u'Successfully signed in')
g.user = user
return redirect(oid.get_next_url())
return redirect(url_for('create_profile', next=oid.get_next_url(),
name=resp.fullname or resp.nickname,
email=resp.email))
@app.route('/create-profile', methods=['GET', 'POST'])
def create_profile():
"""If this is the user's first login, the create_or_login function
will redirect here so that the user can set up his profile.
"""
if g.user is not None or 'openid' not in session:
return redirect(url_for('index'))
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
if not name:
flash(u'Error: you have to provide a name')
elif '@' not in email:
flash(u'Error: you have to enter a valid email address')
else:
flash(u'Profile successfully created')
db_session.add(User(name, email, session['openid']))
db_session.commit()
return redirect(oid.get_next_url())
return render_template('create_profile.html', next_url=oid.get_next_url())
@app.route('/profile', methods=['GET', 'POST'])
def edit_profile():
"""Updates a profile"""
if g.user is None:
abort(401)
form = dict(name=g.user.name, email=g.user.email)
if request.method == 'POST':
if 'delete' in request.form:
db_session.delete(g.user)
db_session.commit()
session['openid'] = None
flash(u'Profile deleted')
return redirect(url_for('index'))
form['name'] = request.form['name']
form['email'] = request.form['email']
if not form['name']:
flash(u'Error: you have to provide a name')
elif '@' not in form['email']:
flash(u'Error: you have to enter a valid email address')
else:
flash(u'Profile successfully created')
g.user.name = form['name']
g.user.email = form['email']
db_session.commit()
return redirect(url_for('edit_profile'))
return render_template('edit_profile.html', form=form)
@app.route('/logout')
def logout():
session.pop('openid', None)
flash(u'You have been signed out')
return redirect(oid.get_next_url())
if __name__ == '__main__':
init_db()
app.run()
``` |
{
"source": "JordiVillaFreixa/ORcode",
"score": 3
} |
#### File: JordiVillaFreixa/ORcode/linear_programming.py
```python
from ortools.linear_solver import pywraplp
def ShowResults(solver, variable_list, constraint_list):
"""Solve the problem and print the solution."""
print('# of variables = %d' % solver.NumVariables())
print('# of constraints = %d' % solver.NumConstraints())
result_status = solver.Solve()
# check results are ok
assert result_status == pywraplp.Solver.OPTIMAL
assert solver.VerifySolution(1e-7, True)
print('Problem solved in %f milliseconds' % solver.wall_time())
print('Optimal objective value = %f' % solver.Objective().Value())
for variable in variable_list:
print('%s = %f' % (variable.name(), variable.solution_value()))
print('Problem solved in %d iterations' % solver.iterations())
for variable in variable_list:
print('%s: reduced cost = %f' %
(variable.name(), variable.reduced_cost()))
activities = solver.ComputeConstraintActivities()
for i, constraint in enumerate(constraint_list):
print(('constraint %d: dual value (shadow price) = %f\n'
' final value (activity) = %f' %
(i, constraint.dual_value(), activities[constraint.index()])))
# Define the solver to use
solver = pywraplp.Solver.CreateSolver('GLOP')
# Define the problem
infinity = solver.infinity()
x1 = solver.NumVar(0.0, infinity, 'x1')
x2 = solver.NumVar(0.0, infinity, 'x2')
solver.Maximize(50 * x1 + 60 * x2)
c0 = solver.Add(50 * x1 + 30 * x2 <= 2000, 'Material')
c1 = solver.Add(6 * x1 + 5 * x2 <= 300, 'MachineTime')
c2 = solver.Add(3 * x1 + 5 * x2 <= 200, 'Labor')
ShowResults(solver, [x1, x2], [c0, c1, c2])
``` |
{
"source": "jordiwes/namex",
"score": 2
} |
#### File: namex/models/nr_number.py
```python
from datetime import datetime
from . import db, ma
class NRNumber(db.Model):
__tablename__ = 'nr_number'
# core fields
id = db.Column(db.Integer, primary_key=True)
nrNum = db.Column('nr_num', db.String(10), unique=True)
lastUpdate = db.Column('last_update', db.DateTime(timezone=True), default=datetime.utcnow, onupdate=datetime.utcnow)
@classmethod
def get_next_nr_num(cls, last_nr):
last_nr_header = last_nr[0:4]
last_number = last_nr[4:10]
if last_number == '999999':
# next_nr_header = #next letter in the alphabet starting at a specific letter
next_number = '000000'
else:
next_nr_header = last_nr_header
next_number = str((int(last_number) + 1)).zfill(6)
next_nr_num = next_nr_header + next_number
return (next_nr_num)
def json(self):
return {'id': self.id,
'nrNum': self.nrNum
}
def save_to_db(self):
db.session.add(self)
db.session.commit()
class NRNumberSchema(ma.ModelSchema):
class Meta:
model = NRNumber
```
#### File: namex/resources/requests.py
```python
from flask import request, jsonify, g, current_app, get_flashed_messages
from flask_restx import Namespace, Resource, fields, cors
from flask_jwt_oidc import AuthError
from namex.utils.logging import setup_logging
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import func, text
from sqlalchemy.inspection import inspect
from namex import jwt, nro, services
from namex.exceptions import BusinessException
from namex.models import db, ValidationError
from namex.models import Request as RequestDAO, RequestsSchema, RequestsHeaderSchema, RequestsSearchSchema
from namex.models import Name, NameSchema, PartnerNameSystemSchema
from namex.models import User, State, Comment, NameCommentSchema, Event
from namex.models import ApplicantSchema
from namex.models import DecisionReason
from namex.services import ServicesError, MessageServices, EventRecorder
from namex.services.name_request.utils import check_ownership, get_or_create_user_by_jwt, valid_state_transition
from namex.utils.common import convert_to_ascii
from namex.utils.auth import cors_preflight
from namex.analytics import SolrQueries, RestrictedWords, VALID_ANALYSIS as ANALYTICS_VALID_ANALYSIS
from namex.services.nro import NROServicesError
import datetime
setup_logging() # Important to do this first
# Register a local namespace for the requests
api = Namespace('namexRequests', description='Namex - Requests API')
# Marshmallow schemas
request_schema = RequestsSchema(many=False)
request_schemas = RequestsSchema(many=True)
request_header_schema = RequestsHeaderSchema(many=False)
request_search_schemas = RequestsSearchSchema(many=True)
names_schema = NameSchema(many=False)
names_schemas = NameSchema(many=True)
nwpta_schema = PartnerNameSystemSchema(many=False)
name_comment_schema = NameCommentSchema(many=False)
applicant_schema = ApplicantSchema(many=False)
@api.errorhandler(AuthError)
def handle_auth_error(ex):
response = jsonify(ex.error)
response.status_code = ex.status_code
return response
# noinspection PyUnresolvedReferences
@cors_preflight("GET")
@api.route('/echo', methods=['GET', 'OPTIONS'])
class Echo(Resource):
"""Helper method to echo back all your JWT token info
"""
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get(*args, **kwargs):
try:
return jsonify(g.jwt_oidc_token_info), 200
except Exception as err:
return {"error": "{}".format(err)}, 500
#################### QUEUES #######################
@cors_preflight("GET")
@api.route('/queues/@me/oldest', methods=['GET', 'OPTIONS'])
class RequestsQueue(Resource):
"""Acting like a QUEUE this gets the next NR (just the NR number)
and assigns it to your auth id, and marks it as INPROGRESS
"""
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_roles([User.APPROVER])
def get():
""" Gets the oldest nr num, that is in DRAFT status
It then marks the NR as INPROGRESS, and assigns it to the User as found in the JWT
It also moves control of the Request from NRO so that NameX fully owns it
:Authorization: (JWT): valid JWT with the User.APPROVER role
:return: (str) (dict) (https status): 500, 404, or NR NUM, or NR NUM and a system alert in the dict
"""
# GET existing or CREATE new user based on the JWT info
try:
user = get_or_create_user_by_jwt(g.jwt_oidc_token_info)
except ServicesError as se:
return jsonify(message='unable to get ot create user, aborting operation'), 500
except Exception as unmanaged_error:
current_app.logger.error(unmanaged_error.with_traceback(None))
return jsonify(message='internal server error'), 500
# get the next NR assigned to the User
try:
nr, new_assignment = RequestDAO.get_queued_oldest(user)
except BusinessException as be:
return jsonify(message='There are no more requests in the {} Queue'.format(State.DRAFT)), 404
except Exception as unmanaged_error:
current_app.logger.error(unmanaged_error.with_traceback(None))
return jsonify(message='internal server error'), 500
current_app.logger.debug('got the nr:{} and its a new assignment?{}'.format(nr.nrNum, new_assignment))
# if no NR returned
if 'nr' not in locals() or not nr:
return jsonify(message='No more NRs in Queue to process'), 200
# if it's an NR already INPROGRESS and assigned to the user
if nr and not new_assignment:
return jsonify(nameRequest='{}'.format(nr.nrNum)), 200
# if it's a new assignment, then LOGICALLY lock the record in NRO
# if we fail to do that, send back the NR and the errors for user-intervention
if new_assignment:
warnings = nro.move_control_of_request_from_nro(nr, user)
if 'warnings' in locals() and warnings:
return jsonify(nameRequest='{}'.format(nr.nrNum), warnings=warnings), 206
EventRecorder.record(user, Event.GET, nr, {})
return jsonify(nameRequest='{}'.format(nr.nrNum)), 200
@cors_preflight('GET, POST')
@api.route('', methods=['GET', 'POST', 'OPTIONS'])
class Requests(Resource):
a_request = api.model('Request', {'submitter': fields.String('The submitter name'),
'corpType': fields.String('The corporation type'),
'reqType': fields.String('The name request type')
})
START = 0
ROWS = 10
# search_request_schemas = RequestsSchema(many=True)
# ,exclude=['id'
# ,'applicants'
# ,'partnerNS'
# ,'requestId'
# ,'previousRequestId'])
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get(*args, **kwargs):
# validate row & start params
start = request.args.get('start', Requests.START)
rows = request.args.get('rows', Requests.ROWS)
try:
start = int(start)
rows = int(rows)
except Exception as err:
current_app.logger.info('start or rows not an int, err: {}'.format(err))
return jsonify({'message': 'paging parameters were not integers'}), 406
# queue must be a list of states
queue = request.args.get('queue', None)
if queue:
if queue == 'COMPLETED':
queue = 'APPROVED,CONDITIONAL,REJECTED'
queue = queue.upper().split(',')
for q in queue:
if q not in State.VALID_STATES:
return jsonify({'message': '\'{}\' is not a valid queue'.format(queue)}), 406
# order must be a string of 'column:asc,column:desc'
order = request.args.get('order', 'submittedDate:desc,stateCd:desc')
# order=dict((x.split(":")) for x in order.split(',')) // con't pass as a dict as the order is lost
# create the order by txt, looping through Request Attributes and mapping to column names
# TODO: this is fragile across joins, fix it up if queries are going to sort across joins
cols = inspect(RequestDAO).columns
col_keys = cols.keys()
sort_by = ''
order_list = ''
for k, v in ((x.split(":")) for x in order.split(',')):
vl = v.lower()
if (k in col_keys) and (vl == 'asc' or vl == 'desc'):
if len(sort_by) > 0:
sort_by = sort_by + ', '
order_list = order_list + ', '
sort_by = sort_by + '{columns} {direction} NULLS LAST'.format(columns=cols[k], direction=vl)
order_list = order_list + '{attribute} {direction} NULLS LAST'.format(attribute=k, direction=vl)
# Assemble the query
nrNum = request.args.get('nrNum', None)
activeUser = request.args.get('activeUser', None)
compName = request.args.get('compName', None)
priority = request.args.get('ranking', None)
notification = request.args.get('notification', None)
submittedInterval = request.args.get('submittedInterval', None)
lastUpdateInterval = request.args.get('lastUpdateInterval', None)
current_hour = int(request.args.get('hour', 0))
q = RequestDAO.query.filter()
if queue:
q = q.filter(RequestDAO.stateCd.in_(queue))
q = q.filter(RequestDAO.nrNum.notlike('NR L%'))
if nrNum:
nrNum = nrNum.replace('NR', '').strip()
nrNum = nrNum.replace('nr', '').strip()
nrNum = '%' + nrNum + '%'
q = q.filter(RequestDAO.nrNum.like(nrNum))
if activeUser:
q = q.join(RequestDAO.activeUser).filter(User.username.ilike('%' + activeUser + '%'))
# TODO: fix count on search by compName -- returns count of all names that match
# -- want it to be all NRs (nrs can have multiple names that match)
# ---- right now count is adjusted on the frontend in method 'populateTable'
if compName:
q = q.join(RequestDAO.names).filter(Name.name.ilike('%' + compName + '%'))
if priority == 'Standard':
q = q.filter(RequestDAO.priorityCd != 'Y')
elif priority == 'Priority':
q = q.filter(RequestDAO.priorityCd != 'N')
if notification == 'Notified':
q = q.filter(RequestDAO.furnished != 'N')
elif notification == 'Not Notified':
q = q.filter(RequestDAO.furnished != 'Y')
if submittedInterval == 'Today':
q = q.filter(RequestDAO.submittedDate > text(
'NOW() - INTERVAL \'{hour_offset} HOURS\''.format(hour_offset=current_hour)))
elif submittedInterval == '7 days':
q = q.filter(RequestDAO.submittedDate > text(
'NOW() - INTERVAL \'{hour_offset} HOURS\''.format(hour_offset=current_hour + 24 * 6)))
elif submittedInterval == '30 days':
q = q.filter(RequestDAO.submittedDate > text(
'NOW() - INTERVAL \'{hour_offset} HOURS\''.format(hour_offset=current_hour + 24 * 29)))
elif submittedInterval == '90 days':
q = q.filter(RequestDAO.submittedDate > text(
'NOW() - INTERVAL \'{hour_offset} HOURS\''.format(hour_offset=current_hour + 24 * 89)))
elif submittedInterval == '1 year':
q = q.filter(RequestDAO.submittedDate > text('NOW() - INTERVAL \'1 YEARS\''))
elif submittedInterval == '3 years':
q = q.filter(RequestDAO.submittedDate > text('NOW() - INTERVAL \'3 YEARS\''))
elif submittedInterval == '5 years':
q = q.filter(RequestDAO.submittedDate > text('NOW() - INTERVAL \'5 YEARS\''))
if lastUpdateInterval == 'Today':
q = q.filter(RequestDAO.lastUpdate > text(
'(now() at time zone \'utc\') - INTERVAL \'{hour_offset} HOURS\''.format(hour_offset=current_hour)))
if lastUpdateInterval == 'Yesterday':
today_offset = current_hour
yesterday_offset = today_offset + 24
q = q.filter(RequestDAO.lastUpdate < text(
'(now() at time zone \'utc\') - INTERVAL \'{today_offset} HOURS\''.format(today_offset=today_offset)))
q = q.filter(RequestDAO.lastUpdate > text(
'(now() at time zone \'utc\') - INTERVAL \'{yesterday_offset} HOURS\''.format(yesterday_offset=yesterday_offset)))
elif lastUpdateInterval == '2 days':
q = q.filter(RequestDAO.lastUpdate > text(
'(now() at time zone \'utc\') - INTERVAL \'{hour_offset} HOURS\''.format(hour_offset=current_hour + 24)))
elif lastUpdateInterval == '7 days':
q = q.filter(RequestDAO.lastUpdate > text(
'(now() at time zone \'utc\') - INTERVAL \'{hour_offset} HOURS\''.format(hour_offset=current_hour + 24 * 6)))
elif lastUpdateInterval == '30 days':
q = q.filter(RequestDAO.lastUpdate > text(
'(now() at time zone \'utc\') - INTERVAL \'{hour_offset} HOURS\''.format(hour_offset=current_hour + 24 * 29)))
q = q.order_by(text(sort_by))
# get a count of the full set size, this ignore the offset & limit settings
count_q = q.statement.with_only_columns([func.count()]).order_by(None)
count = db.session.execute(count_q).scalar()
# Add the paging
q = q.offset(start)
q = q.limit(rows)
# create the response
rep = {'response': {'start': start,
'rows': rows,
'numFound': count,
'numPriorities': 0,
'numUpdatedToday': 0,
'queue': queue,
'order': order_list
},
'nameRequests': request_search_schemas.dump(q.all())
}
return jsonify(rep), 200
# @api.errorhandler(AuthError)
# def handle_auth_error(ex):
# response = jsonify(ex.error)
# response.status_code = ex.status_code
# return response, 401
# return {}, 401
# noinspection PyUnusedLocal,PyUnusedLocal
@api.expect(a_request)
@cors.crossdomain(origin='*')
@jwt.requires_auth
def post(self, *args, **kwargs):
current_app.logger.info('Someone is trying to post a new request')
return jsonify({'message': 'Not Implemented'}), 501
# noinspection PyUnresolvedReferences
@cors_preflight("GET, PATCH, PUT, DELETE")
@api.route('/<string:nr>', methods=['GET', 'PATCH', 'PUT', 'DELETE', 'OPTIONS'])
class Request(Resource):
@staticmethod
@cors.crossdomain(origin='*')
@jwt.has_one_of_roles([User.APPROVER, User.EDITOR, User.VIEWONLY])
def get(nr):
# return jsonify(request_schema.dump(RequestDAO.query.filter_by(nr=nr.upper()).first_or_404()))
return jsonify(RequestDAO.query.filter_by(nrNum=nr.upper()).first_or_404().json())
@staticmethod
# @cors.crossdomain(origin='*')
@jwt.requires_roles([User.APPROVER, User.EDITOR])
def delete(nr):
return '', 501 # not implemented
# nrd = RequestDAO.find_by_nr(nr)
# even if not found we still return a 204, which is expected spec behaviour
# if nrd:
# nrd.stateCd = State.CANCELLED
# nrd.save_to_db()
#
# return '', 204
@staticmethod
@cors.crossdomain(origin='*')
@jwt.has_one_of_roles([User.APPROVER, User.EDITOR])
def patch(nr, *args, **kwargs):
""" Patches the NR. Currently only handles STATE (with optional comment) and Previous State.
:param nr (str): NameRequest Number in the format of 'NR 000000000'
:param args: __futures__
:param kwargs: __futures__
:return: 200 - success; 40X for errors
:HEADER: Valid JWT Bearer Token for a valid REALM
:JWT Scopes: - USER.APPROVER, USER.EDITOR
APPROVERS: Can change from almost any state, other than CANCELLED, EXPIRED and ( COMPLETED not yet furnished )
EDITOR: Can't change to a COMPLETED state (ACCEPTED, REJECTED, CONDITION)
"""
# do the cheap check first before the more expensive ones
# check states
json_input = request.get_json()
if not json_input:
return jsonify({'message': 'No input data provided'}), 400
# find NR
try:
user = get_or_create_user_by_jwt(g.jwt_oidc_token_info)
nrd = RequestDAO.find_by_nr(nr)
if not nrd:
return jsonify({"message": "Request:{} not found".format(nr)}), 404
start_state = nrd.stateCd
except NoResultFound as nrf:
# not an error we need to track in the log
return jsonify({"message": "Request:{} not found".format(nr)}), 404
except Exception as err:
current_app.logger.error("Error when patching NR:{0} Err:{1}".format(nr, err))
return jsonify({"message": "NR had an internal error"}), 404
try:
### STATE ###
# all these checks to get removed to marshmallow
state = json_input.get('state', None)
if state:
if state not in State.VALID_STATES:
return jsonify({"message": "not a valid state"}), 406
if not nrd:
return jsonify({"message": "Request:{} not found".format(nr)}), 404
if not valid_state_transition(user, nrd, state):
return jsonify(message='you are not authorized to make these changes'), 401
# if the user has an existing (different) INPROGRESS NR, revert to previous state (default to HOLD)
existing_nr = RequestDAO.get_inprogress(user)
if existing_nr:
if existing_nr.previousStateCd:
existing_nr.stateCd = existing_nr.previousStateCd
existing_nr.previousStateCd = None
else:
existing_nr.stateCd = State.HOLD
existing_nr.save_to_db()
# if the NR is in DRAFT then LOGICALLY lock the record in NRO
# if we fail to do that, send back the NR and the errors for user-intervention
if nrd.stateCd == State.DRAFT:
warnings = nro.move_control_of_request_from_nro(nrd, user)
# if we're changing to DRAFT, update NRO status to "D" in NRO
if state == State.DRAFT:
change_flags = {
'is_changed__request': False,
'is_changed__previous_request': False,
'is_changed__applicant': False,
'is_changed__address': False,
'is_changed__name1': False,
'is_changed__name2': False,
'is_changed__name3': False,
'is_changed__nwpta_ab': False,
'is_changed__nwpta_sk': False,
'is_changed__request_state': True,
'is_changed_consent': False
}
warnings = nro.change_nr(nrd, change_flags)
if warnings:
MessageServices.add_message(MessageServices.ERROR,
'change_request_in_NRO', warnings)
nrd.stateCd = state
nrd.userId = user.id
if state == State.CANCELLED:
nro.cancel_nr(nrd, user.username)
# if our state wasn't INPROGRESS and it is now, ensure the furnished flag is N
if (start_state in locals()
and start_state != State.INPROGRESS
and nrd.stateCd == State.INPROGRESS):
# set / reset the furnished flag to N
nrd.furnished = 'N'
# if we're changing to a completed or cancelled state, clear reset flag on NR record
if state in State.COMPLETED_STATE + [State.CANCELLED]:
nrd.hasBeenReset = False
if nrd.stateCd == State.CONDITIONAL and nrd.consentFlag is None:
nrd.consentFlag = 'Y'
### COMMENTS ###
# we only add new comments, we do not change existing comments
# - we can find new comments in json as those with no ID
if json_input.get('comments', None):
for in_comment in json_input['comments']:
is_new_comment = False
try:
if in_comment['id'] is None or in_comment['id'] == 0:
is_new_comment = True
except KeyError:
is_new_comment = True
if is_new_comment and in_comment['comment'] is not None:
new_comment = Comment()
new_comment.comment = convert_to_ascii(in_comment['comment'])
new_comment.examiner = user
new_comment.nrId = nrd.id
### END comments ###
### PREVIOUS STATE ###
# - None (null) is a valid value for Previous State
if 'previousStateCd' in json_input.keys():
nrd.previousStateCd = json_input.get('previousStateCd', None)
# save record
nrd.save_to_db()
EventRecorder.record(user, Event.PATCH, nrd, json_input)
except Exception as err:
current_app.logger.debug(err.with_traceback(None))
return jsonify(message='Internal server error'), 500
if 'warnings' in locals() and warnings:
return jsonify(message='Request:{} - patched'.format(nr), warnings=warnings), 206
return jsonify(message='Request:{} - patched'.format(nr)), 200
@staticmethod
@cors.crossdomain(origin='*')
@jwt.has_one_of_roles([User.APPROVER, User.EDITOR])
def put(nr, *args, **kwargs):
# do the cheap check first before the more expensive ones
json_input = request.get_json()
if not json_input:
return jsonify(message='No input data provided'), 400
current_app.logger.debug(json_input)
nr_num = json_input.get('nrNum', None)
if nr_num and nr_num != nr:
return jsonify(message='Data contains a different NR# than this resource'), 400
state = json_input.get('state', None)
if not state:
return jsonify({"message": "state not set"}), 406
if state not in State.VALID_STATES:
return jsonify({"message": "not a valid state"}), 406
try:
user = get_or_create_user_by_jwt(g.jwt_oidc_token_info)
nrd = RequestDAO.find_by_nr(nr)
if not nrd:
return jsonify({"message": "Request:{} not found".format(nr)}), 404
orig_nrd = nrd.json()
except NoResultFound as nrf:
# not an error we need to track in the log
return jsonify({"message": "Request:{} not found".format(nr)}), 404
except Exception as err:
current_app.logger.error("Error when patching NR:{0} Err:{1}".format(nr, err))
return jsonify({"message": "NR had an internal error"}), 404
if not valid_state_transition(user, nrd, state):
return jsonify(message='you are not authorized to make these changes'), 401
name_choice_exists = {1: False, 2: False, 3: False}
for name in json_input.get('names', None):
if name['name'] and name['name'] is not '':
name_choice_exists[name['choice']] = True
if not name_choice_exists[1]:
return jsonify(message='Data does not include a name choice 1'), 400
if not name_choice_exists[2] and name_choice_exists[3]:
return jsonify(message='Data contains a name choice 3 without a name choice 2'), 400
try:
existing_nr = RequestDAO.get_inprogress(user)
if existing_nr:
existing_nr.stateCd = State.HOLD
existing_nr.save_to_db()
# convert Expiration Date to correct format
if json_input.get('expirationDate', None):
json_input['expirationDate'] = str(datetime.datetime.strptime(
str(json_input['expirationDate'][5:]), '%d %b %Y %H:%M:%S %Z'))
# convert Submitted Date to correct format
if json_input.get('submittedDate', None):
json_input['submittedDate'] = str(datetime.datetime.strptime(
str(json_input['submittedDate'][5:]), '%d %b %Y %H:%M:%S %Z'))
if json_input.get('consent_dt', None):
json_input['consent_dt'] = str(datetime.datetime.strptime(
str(json_input['consent_dt'][5:]), '%d %b %Y %H:%M:%S %Z'))
# convert NWPTA dates to correct format
if json_input.get('nwpta', None):
for region in json_input['nwpta']:
try:
if region['partnerNameDate'] == '':
region['partnerNameDate'] = None
if region['partnerNameDate']:
region['partnerNameDate'] = str(datetime.datetime.strptime(
str(region['partnerNameDate']), '%d-%m-%Y'))
except ValueError:
pass
# pass on this error and catch it when trying to add to record, to be returned
# ## If the current state is DRAFT, the transfer control from NRO to NAMEX
# if the NR is in DRAFT then LOGICALLY lock the record in NRO
# if we fail to do that, send back the NR and the errors for user-intervention
if nrd.stateCd == State.DRAFT:
warnings = nro.move_control_of_request_from_nro(nrd, user)
if warnings:
MessageServices.add_message(MessageServices.WARN, 'nro_lock', warnings)
### REQUEST HEADER ###
# update request header
errors = request_header_schema.validate(json_input, partial=True)
if errors:
# return jsonify(errors), 400
MessageServices.add_message(MessageServices.ERROR, 'request_validation', errors)
# if reset is set to true then this nr will be set to H + name_examination proc will be called in oracle
reset = False
if nrd.furnished == RequestDAO.REQUEST_FURNISHED and json_input.get('furnished', None) == 'N':
reset = True
request_header_schema.load(json_input, instance=nrd, partial=True)
nrd.additionalInfo = convert_to_ascii(json_input.get('additionalInfo', None))
nrd.furnished = json_input.get('furnished', 'N')
nrd.natureBusinessInfo = convert_to_ascii(json_input.get('natureBusinessInfo', None))
nrd.stateCd = state
nrd.userId = user.id
nrd.consentFlag = json_input.get('consentFlag', None)
nrd.consent_dt = json_input.get('consent_dt', None)
if reset:
# set the flag indicating that the NR has been reset
nrd.hasBeenReset = True
# add a generated comment re. this NR being reset
json_input['comments'].append({'comment': 'This NR was RESET.'})
try:
previousNr = json_input['previousNr']
nrd.previousRequestId = RequestDAO.find_by_nr(previousNr).requestId
except AttributeError:
nrd.previousRequestId = None
except KeyError:
nrd.previousRequestId = None
# if we're changing to a completed or cancelled state, clear reset flag on NR record
if state in State.COMPLETED_STATE + [State.CANCELLED]:
nrd.hasBeenReset = False
# check if any of the Oracle db fields have changed, so we can send them back
is_changed__request = False
is_changed__previous_request = False
is_changed__request_state = False
is_changed_consent = False
if nrd.requestTypeCd != orig_nrd['requestTypeCd']:
is_changed__request = True
if nrd.expirationDate != orig_nrd['expirationDate']:
is_changed__request = True
if nrd.xproJurisdiction != orig_nrd['xproJurisdiction']:
is_changed__request = True
if nrd.additionalInfo != orig_nrd['additionalInfo']:
is_changed__request = True
if nrd.natureBusinessInfo != orig_nrd['natureBusinessInfo']:
is_changed__request = True
if nrd.previousRequestId != orig_nrd['previousRequestId']:
is_changed__previous_request = True
if nrd.stateCd != orig_nrd['state']:
is_changed__request_state = True
if nrd.consentFlag != orig_nrd['consentFlag']:
is_changed_consent = True
# Need this for a re-open
if nrd.stateCd != State.CONDITIONAL and is_changed__request_state:
nrd.consentFlag = None
nrd.consent_dt = None
### END request header ###
### APPLICANTS ###
is_changed__applicant = False
is_changed__address = False
applicants_d = nrd.applicants.one_or_none()
if applicants_d:
orig_applicant = applicants_d.as_dict()
appl = json_input.get('applicants', None)
if appl:
errm = applicant_schema.validate(appl, partial=True)
if errm:
# return jsonify(errm), 400
MessageServices.add_message(MessageServices.ERROR, 'applicants_validation', errm)
applicant_schema.load(appl, instance=applicants_d, partial=True)
# convert data to ascii, removing data that won't save to Oracle
applicants_d.lastName = convert_to_ascii(applicants_d.lastName)
applicants_d.firstName = convert_to_ascii(applicants_d.firstName)
applicants_d.middleName = convert_to_ascii(applicants_d.middleName)
applicants_d.phoneNumber = convert_to_ascii(applicants_d.phoneNumber)
applicants_d.faxNumber = convert_to_ascii(applicants_d.faxNumber)
applicants_d.emailAddress = convert_to_ascii(applicants_d.emailAddress)
applicants_d.contact = convert_to_ascii(applicants_d.contact)
applicants_d.clientFirstName = convert_to_ascii(applicants_d.clientFirstName)
applicants_d.clientLastName = convert_to_ascii(applicants_d.clientLastName)
applicants_d.addrLine1 = convert_to_ascii(applicants_d.addrLine1)
applicants_d.addrLine2 = convert_to_ascii(applicants_d.addrLine2)
applicants_d.addrLine3 = convert_to_ascii(applicants_d.addrLine3)
applicants_d.city = convert_to_ascii(applicants_d.city)
applicants_d.postalCd = convert_to_ascii(applicants_d.postalCd)
applicants_d.stateProvinceCd = convert_to_ascii(applicants_d.stateProvinceCd)
applicants_d.countryTypeCd = convert_to_ascii(applicants_d.countryTypeCd)
# check if any of the Oracle db fields have changed, so we can send them back
if applicants_d.lastName != orig_applicant['lastName']:
is_changed__applicant = True
if applicants_d.firstName != orig_applicant['firstName']:
is_changed__applicant = True
if applicants_d.middleName != orig_applicant['middleName']:
is_changed__applicant = True
if applicants_d.phoneNumber != orig_applicant['phoneNumber']:
is_changed__applicant = True
if applicants_d.faxNumber != orig_applicant['faxNumber']:
is_changed__applicant = True
if applicants_d.emailAddress != orig_applicant['emailAddress']:
is_changed__applicant = True
if applicants_d.contact != orig_applicant['contact']:
is_changed__applicant = True
if applicants_d.clientFirstName != orig_applicant['clientFirstName']:
is_changed__applicant = True
if applicants_d.clientLastName != orig_applicant['clientLastName']:
is_changed__applicant = True
if applicants_d.declineNotificationInd != orig_applicant['declineNotificationInd']:
is_changed__applicant = True
if applicants_d.addrLine1 != orig_applicant['addrLine1']:
is_changed__address = True
if applicants_d.addrLine2 != orig_applicant['addrLine2']:
is_changed__address = True
if applicants_d.addrLine3 != orig_applicant['addrLine3']:
is_changed__address = True
if applicants_d.city != orig_applicant['city']:
is_changed__address = True
if applicants_d.postalCd != orig_applicant['postalCd']:
is_changed__address = True
if applicants_d.stateProvinceCd != orig_applicant['stateProvinceCd']:
is_changed__address = True
if applicants_d.countryTypeCd != orig_applicant['countryTypeCd']:
is_changed__address = True
else:
applicants_d.delete_from_db()
is_changed__applicant = True
is_changed__address = True
### END applicants ###
### NAMES ###
# TODO: set consumptionDate not working -- breaks changing name values
is_changed__name1 = False
is_changed__name2 = False
is_changed__name3 = False
deleted_names = [False] * 3
if len(nrd.names.all()) == 0:
new_name_choice = Name()
new_name_choice.nrId = nrd.id
# convert data to ascii, removing data that won't save to Oracle
new_name_choice.name = convert_to_ascii(new_name_choice.name)
nrd.names.append(new_name_choice)
for nrd_name in nrd.names.all():
orig_name = nrd_name.as_dict()
for in_name in json_input.get('names', []):
if len(nrd.names.all()) < in_name['choice']:
errors = names_schema.validate(in_name, partial=False)
if errors:
MessageServices.add_message(MessageServices.ERROR, 'names_validation', errors)
# return jsonify(errors), 400
new_name_choice = Name()
new_name_choice.nrId = nrd.id
names_schema.load(in_name, instance=new_name_choice, partial=False)
# convert data to ascii, removing data that won't save to Oracle
# - also force uppercase
new_name_choice.name = convert_to_ascii(new_name_choice.name.upper())
nrd.names.append(new_name_choice)
if new_name_choice.choice == 2:
is_changed__name2 = True
if new_name_choice.choice == 3:
is_changed__name3 = True
elif nrd_name.choice == in_name['choice']:
errors = names_schema.validate(in_name, partial=False)
if errors:
MessageServices.add_message(MessageServices.ERROR, 'names_validation', errors)
# return jsonify(errors), 400
names_schema.load(in_name, instance=nrd_name, partial=False)
# set comments (existing or cleared)
if in_name.get('comment', None) is not None:
# if there is a comment ID in data, just set it
if in_name['comment'].get('id', None) is not None:
nrd_name.commentId = in_name['comment'].get('id')
# if no comment id, it's a new comment, so add it
else:
# no business case for this at this point - this code will never run
pass
else:
nrd_name.comment = None
# convert data to ascii, removing data that won't save to Oracle
# - also force uppercase
nrd_name.name = convert_to_ascii(nrd_name.name)
if (nrd_name.name is not None):
nrd_name.name = nrd_name.name.upper()
# check if any of the Oracle db fields have changed, so we can send them back
# - this is only for editing a name from the Edit NR section, NOT making a decision
if nrd_name.name != orig_name['name']:
if nrd_name.choice == 1:
is_changed__name1 = True
json_input['comments'].append({'comment': 'Name choice 1 changed from {0} to {1}'
.format(orig_name['name'], nrd_name.name)})
if nrd_name.choice == 2:
is_changed__name2 = True
if not nrd_name.name:
deleted_names[nrd_name.choice - 1] = True
json_input['comments'].append({'comment': 'Name choice 2 changed from {0} to {1}'
.format(orig_name['name'], nrd_name.name)})
if nrd_name.choice == 3:
is_changed__name3 = True
if not nrd_name.name:
deleted_names[nrd_name.choice - 1] = True
json_input['comments'].append({'comment': 'Name choice 3 changed from {0} to {1}'
.format(orig_name['name'], nrd_name.name)})
### END names ###
### COMMENTS ###
# we only add new comments, we do not change existing comments
# - we can find new comments in json as those with no ID
# - This must come after names section above, to handle comments re. changed names.
for in_comment in json_input['comments']:
is_new_comment = False
try:
if in_comment['id'] is None or in_comment['id'] == 0:
is_new_comment = True
except KeyError:
is_new_comment = True
if is_new_comment and in_comment['comment'] is not None:
new_comment = Comment()
new_comment.comment = convert_to_ascii(in_comment['comment'])
new_comment.examiner = user
new_comment.nrId = nrd.id
### END comments ###
### NWPTA ###
is_changed__nwpta_ab = False
is_changed__nwpta_sk = False
for nrd_nwpta in nrd.partnerNS.all():
orig_nwpta = nrd_nwpta.as_dict()
for in_nwpta in json_input['nwpta']:
if nrd_nwpta.partnerJurisdictionTypeCd == in_nwpta['partnerJurisdictionTypeCd']:
errors = nwpta_schema.validate(in_nwpta, partial=False)
if errors:
MessageServices.add_message(MessageServices.ERROR, 'nwpta_validation', errors)
# return jsonify(errors), 400
nwpta_schema.load(in_nwpta, instance=nrd_nwpta, partial=False)
# convert data to ascii, removing data that won't save to Oracle
nrd_nwpta.partnerName = convert_to_ascii(nrd_nwpta.partnerName)
nrd_nwpta.partnerNameNumber = convert_to_ascii(nrd_nwpta.partnerNameNumber)
# check if any of the Oracle db fields have changed, so we can send them back
tmp_is_changed = False
if nrd_nwpta.partnerNameTypeCd != orig_nwpta['partnerNameTypeCd']:
tmp_is_changed = True
if nrd_nwpta.partnerNameNumber != orig_nwpta['partnerNameNumber']:
tmp_is_changed = True
if nrd_nwpta.partnerNameDate != orig_nwpta['partnerNameDate']:
tmp_is_changed = True
if nrd_nwpta.partnerName != orig_nwpta['partnerName']:
tmp_is_changed = True
if tmp_is_changed:
if nrd_nwpta.partnerJurisdictionTypeCd == 'AB':
is_changed__nwpta_ab = True
if nrd_nwpta.partnerJurisdictionTypeCd == 'SK':
is_changed__nwpta_sk = True
### END nwpta ###
# if there were errors, abandon changes and return the set of errors
warning_and_errors = MessageServices.get_all_messages()
if warning_and_errors:
for we in warning_and_errors:
if we['type'] == MessageServices.ERROR:
return jsonify(errors=warning_and_errors), 400
# update oracle if this nr was reset
# - first set status to H via name_examination proc, which handles clearing all necessary data and states
# - then set status to D so it's back in draft in NRO for customer to understand status
if reset:
current_app.logger.debug('set state to h for RESET')
try:
nro.set_request_status_to_h(nr, user.username)
except (NROServicesError, Exception) as err:
MessageServices.add_message('error', 'reset_request_in_NRO', err)
nrd.expirationDate = None
nrd.consentFlag = None
nrd.consent_dt = None
is_changed__request = True
is_changed_consent = True
change_flags = {
'is_changed__request': is_changed__request,
'is_changed__previous_request': False,
'is_changed__applicant': False,
'is_changed__address': False,
'is_changed__name1': False,
'is_changed__name2': False,
'is_changed__name3': False,
'is_changed__nwpta_ab': False,
'is_changed__nwpta_sk': False,
'is_changed__request_state': is_changed__request_state,
'is_changed_consent': is_changed_consent
}
warnings = nro.change_nr(nrd, change_flags)
if warnings:
MessageServices.add_message(MessageServices.ERROR, 'change_request_in_NRO', warnings)
# Update NR Details in NRO (not for reset)
else:
try:
change_flags = {
'is_changed__request': is_changed__request,
'is_changed__previous_request': is_changed__previous_request,
'is_changed__applicant': is_changed__applicant,
'is_changed__address': is_changed__address,
'is_changed__name1': is_changed__name1,
'is_changed__name2': is_changed__name2,
'is_changed__name3': is_changed__name3,
'is_changed__nwpta_ab': is_changed__nwpta_ab,
'is_changed__nwpta_sk': is_changed__nwpta_sk,
'is_changed__request_state': is_changed__request_state,
'is_changed_consent': is_changed_consent
}
# if any data has changed from an NR Details edit, update it in Oracle
if any(value is True for value in change_flags.values()):
warnings = nro.change_nr(nrd, change_flags)
if warnings:
MessageServices.add_message(MessageServices.ERROR, 'change_request_in_NRO', warnings)
else:
# now it's safe to delete any names that were blanked out
for nrd_name in nrd.names:
if deleted_names[nrd_name.choice - 1]:
nrd_name.delete_from_db()
except (NROServicesError, Exception) as err:
MessageServices.add_message('error', 'change_request_in_NRO', err)
# if there were errors, return the set of errors
warning_and_errors = MessageServices.get_all_messages()
if warning_and_errors:
for we in warning_and_errors:
if we['type'] == MessageServices.ERROR:
return jsonify(errors=warning_and_errors), 400
# Finally save the entire graph
nrd.save_to_db()
EventRecorder.record(user, Event.PUT, nrd, json_input)
except ValidationError as ve:
return jsonify(ve.messages), 400
except NoResultFound as nrf:
# not an error we need to track in the log
return jsonify(message='Request:{} not found'.format(nr)), 404
except Exception as err:
current_app.logger.error("Error when replacing NR:{0} Err:{1}".format(nr, err))
return jsonify(message='NR had an internal error'), 500
# if we're here, messaging only contains warnings
warning_and_errors = MessageServices.get_all_messages()
if warning_and_errors:
current_app.logger.debug(nrd.json(), warning_and_errors)
return jsonify(nameRequest=nrd.json(), warnings=warning_and_errors), 206
current_app.logger.debug(nrd.json())
return jsonify(nrd.json()), 200
@cors_preflight("GET")
@api.route('/<string:nr>/analysis/<int:choice>/<string:analysis_type>', methods=['GET', 'OPTIONS'])
class RequestsAnalysis(Resource):
"""Acting like a QUEUE this gets the next NR (just the NR number)
and assigns it to your auth id
:param nr (str): NameRequest Number in the format of 'NR 000000000'
:param choice (int): name choice number (1..3)
:param args: start: number of hits to start from, default is 0
:param args: names_per_page: number of names to return per page, default is 50
:param kwargs: __futures__
:return: 200 - success; 40X for errors
"""
START = 0
ROWS = 50
# @auth_services.requires_auth
# noinspection PyUnusedLocal,PyUnusedLocal
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get(nr, choice, analysis_type, *args, **kwargs):
start = request.args.get('start', RequestsAnalysis.START)
rows = request.args.get('rows', RequestsAnalysis.ROWS)
if analysis_type not in ANALYTICS_VALID_ANALYSIS:
return jsonify(message='{analysis_type} is not a valid analysis type for that name choice'
.format(analysis_type=analysis_type)), 404
nrd = RequestDAO.find_by_nr(nr)
if not nrd:
return jsonify(message='{nr} not found'.format(nr=nr)), 404
nrd_name = nrd.names.filter_by(choice=choice).one_or_none()
if not nrd_name:
return jsonify(message='Name choice:{choice} not found for {nr}'.format(nr=nr, choice=choice)), 404
if analysis_type in RestrictedWords.RESTRICTED_WORDS:
results, msg, code = RestrictedWords.get_restricted_words_conditions(nrd_name.name)
else:
results, msg, code = SolrQueries.get_results(analysis_type, nrd_name.name, start=start, rows=rows)
if code:
return jsonify(message=msg), code
return jsonify(results), 200
@cors_preflight("GET")
@api.route('/synonymbucket/<string:name>/<string:advanced_search>', methods=['GET', 'OPTIONS'])
class SynonymBucket(Resource):
START = 0
ROWS = 1000
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get(name, advanced_search, *args, **kwargs):
start = request.args.get('start', SynonymBucket.START)
rows = request.args.get('rows', SynonymBucket.ROWS)
exact_phrase = '' if advanced_search == '*' else advanced_search
results, msg, code = SolrQueries.get_conflict_results(name.upper(), bucket='synonym', exact_phrase=exact_phrase, start=start, rows=rows)
if code:
return jsonify(message=msg), code
return jsonify(results), 200
@cors_preflight("GET")
@api.route('/cobrsphonetics/<string:name>/<string:advanced_search>', methods=['GET', 'OPTIONS'])
class CobrsPhoneticBucket(Resource):
START = 0
ROWS = 500
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get(name, advanced_search, *args, **kwargs):
start = request.args.get('start', CobrsPhoneticBucket.START)
rows = request.args.get('rows', CobrsPhoneticBucket.ROWS)
name = '' if name == '*' else name
exact_phrase = '' if advanced_search == '*' else advanced_search
results, msg, code = SolrQueries.get_conflict_results(name.upper(), bucket='cobrs_phonetic', exact_phrase=exact_phrase, start=start, rows=rows)
if code:
return jsonify(message=msg), code
return jsonify(results), 200
@cors_preflight("GET")
@api.route('/phonetics/<string:name>/<string:advanced_search>', methods=['GET', 'OPTIONS'])
class PhoneticBucket(Resource):
START = 0
ROWS = 100000
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get(name, advanced_search, *args, **kwargs):
start = request.args.get('start', PhoneticBucket.START)
rows = request.args.get('rows', PhoneticBucket.ROWS)
name = '' if name == '*' else name
exact_phrase = '' if advanced_search == '*' else advanced_search
results, msg, code = SolrQueries.get_conflict_results(name.upper(), bucket='phonetic', exact_phrase=exact_phrase, start=start, rows=rows)
if code:
return jsonify(message=msg), code
return jsonify(results), 200
@cors_preflight("GET, PUT, PATCH")
@api.route('/<string:nr>/names/<int:choice>', methods=['GET', "PUT", "PATCH", 'OPTIONS'])
class NRNames(Resource):
@staticmethod
def common(nr, choice):
""":returns: object, code, msg
"""
if not RequestDAO.validNRFormat(nr):
return None, None, jsonify({'message': 'NR is not a valid format \'NR 9999999\''}), 400
nrd = RequestDAO.find_by_nr(nr)
if not nrd:
return None, None, jsonify({"message": "{nr} not found".format(nr=nr)}), 404
name = nrd.names.filter_by(choice=choice).one_or_none()
if not name:
return None, None, jsonify({"message": "Choice {choice} for {nr} not found".format(choice=choice, nr=nr)}), 404
return nrd, name, None, 200
# noinspection PyUnusedLocal,PyUnusedLocal
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get(nr, choice, *args, **kwargs):
nrd, nrd_name, msg, code = NRNames.common(nr, choice)
if not nrd:
return msg, code
return names_schema.dumps(nrd_name).data, 200
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def put(nr, choice, *args, **kwargs):
json_data = request.get_json()
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
errors = names_schema.validate(json_data, partial=False)
if errors:
return jsonify(errors), 400
errors = name_comment_schema.validate(json_data['comment'], partial=True)
if errors:
return jsonify(errors), 400
nrd, nrd_name, msg, code = NRNames.common(nr, choice)
if not nrd:
return msg, code
user = User.find_by_jwtToken(g.jwt_oidc_token_info)
if not check_ownership(nrd, user):
return jsonify({"message": "You must be the active editor and it must be INPROGRESS"}), 403
names_schema.load(json_data, instance=nrd_name, partial=False)
if json_data['comment'] is not None and json_data['comment']['comment'] is not None:
comment_instance = Comment()
name_comment_schema.load(json_data['comment'], instance=comment_instance, partial=True)
comment_instance.examinerId = user.id
comment_instance.nrId = nrd_name.nrId
comment_instance.save_to_db()
nrd_name.commentId = comment_instance.id
else:
nrd_name.comment = None
try:
nrd_name.save_to_db()
except Exception as error:
current_app.logger.error("Error on nrd_name update, Error:{0}".format(error))
return jsonify({"message": "Error on name update, saving to the db."}), 500
EventRecorder.record(user, Event.PUT, nrd, json_data)
return jsonify({"message": "Replace {nr} choice:{choice} with {json}".format(nr=nr, choice=choice, json=json_data)}), 200
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def patch(nr, choice, *args, **kwargs):
json_data = request.get_json()
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
errors = names_schema.validate(json_data, partial=True)
if errors:
return jsonify(errors), 400
nrd, nrd_name, msg, code = NRNames.common(nr, choice)
if not nrd:
return msg, code
user = User.find_by_jwtToken(g.jwt_oidc_token_info)
if not check_ownership(nrd, user):
return jsonify({"message": "You must be the active editor and it must be INPROGRESS"}), 403
names_schema.load(json_data, instance=nrd_name, partial=True)
nrd_name.save_to_db()
EventRecorder.record(user, Event.PATCH, nrd, json_data)
return jsonify({"message": "Patched {nr} - {json}".format(nr=nr, json=json_data)}), 200
# TODO: This should be in it's own file, not in the requests
@cors_preflight("GET")
@api.route('/decisionreasons', methods=['GET', 'OPTIONS'])
class DecisionReasons(Resource):
@staticmethod
@cors.crossdomain(origin='*')
def get():
response = []
for reason in DecisionReason.query.order_by(DecisionReason.name).all():
response.append(reason.json())
return jsonify(response), 200
@cors_preflight("GET")
@api.route('/<string:nr>/syncnr', methods=['GET', 'OPTIONS'])
class SyncNR(Resource):
@staticmethod
@cors.crossdomain(origin='*')
@jwt.has_one_of_roles([User.APPROVER, User.EDITOR])
def get(nr):
try:
user = get_or_create_user_by_jwt(g.jwt_oidc_token_info)
nrd = RequestDAO.find_by_nr(nr)
except NoResultFound as nrf:
# not an error we need to track in the log
return jsonify({"message": "Request:{} not found".format(nr)}), 404
except Exception as err:
current_app.logger.error("Error when patching NR:{0} Err:{1}".format(nr, err))
return jsonify({"message": "NR had an internal error"}), 404
if not nrd:
return jsonify({"message": "Request:{} not found".format(nr)}), 404
warnings = nro.move_control_of_request_from_nro(nrd, user, True)
if warnings:
resp = RequestDAO.query.filter_by(nrNum=nr.upper()).first_or_404().json()
resp['warnings'] = warnings
return jsonify(resp), 206
return jsonify(RequestDAO.query.filter_by(nrNum=nr.upper()).first_or_404().json())
@cors_preflight("GET")
@api.route('/stats', methods=['GET', 'OPTIONS'])
class Stats(Resource):
@staticmethod
@cors.crossdomain(origin='*')
@jwt.requires_auth
def get(*args, **kwargs):
# default is last 1 hour, but can be sent as parameter
timespan = int(request.args.get('timespan', 1))
# validate row & start params
start = request.args.get('currentpage', 1)
rows = request.args.get('perpage', 50)
try:
rows = int(rows)
start = (int(start) - 1) * rows
except Exception as err:
current_app.logger.info('start or rows not an int, err: {}'.format(err))
return jsonify({'message': 'paging parameters were not integers'}), 406
q = RequestDAO.query \
.filter(RequestDAO.stateCd.in_(State.COMPLETED_STATE))\
.filter(RequestDAO.lastUpdate >= text('(now() at time zone \'utc\') - INTERVAL \'{delay} HOURS\''.format(delay=timespan))) \
.order_by(RequestDAO.lastUpdate.desc())
count_q = q.statement.with_only_columns([func.count()]).order_by(None)
count = db.session.execute(count_q).scalar()
q = q.offset(start)
q = q.limit(rows)
# current_app.logger.debug(str(q.statement.compile(
# dialect=postgresql.dialect(),
# compile_kwargs={"literal_binds": True}))
# )
requests = q.all()
rep = {
'numRecords': count,
'nameRequests': request_search_schemas.dump(requests)[0]
}
return jsonify(rep)
@cors_preflight("POST")
@api.route('/<string:nr>/comments', methods=["POST", 'OPTIONS'])
class NRComment(Resource):
@staticmethod
def common(nr):
""":returns: object, code, msg
"""
if not RequestDAO.validNRFormat(nr):
return None, jsonify({'message': 'NR is not a valid format \'NR 9999999\''}), 400
nrd = RequestDAO.find_by_nr(nr)
if not nrd:
return None, jsonify({"message": "{nr} not found".format(nr=nr)}), 404
return nrd, None, 200
@staticmethod
@cors.crossdomain(origin='*')
@jwt.has_one_of_roles([User.APPROVER, User.EDITOR])
def post(nr, *args, **kwargs):
json_data = request.get_json()
if not json_data:
return jsonify({'message': 'No input data provided'}), 400
nrd, msg, code = NRComment.common(nr)
if not nrd:
return msg, code
errors = name_comment_schema.validate(json_data, partial=False)
if errors:
return jsonify(errors), 400
# find NR
try:
nrd = RequestDAO.find_by_nr(nr)
if not nrd:
return jsonify({"message": "Request:{} not found".format(nr)}), 404
except NoResultFound as nrf:
# not an error we need to track in the log
return jsonify({"message": "Request:{} not found".format(nr)}), 404
except Exception as err:
current_app.logger.error("Error when trying to post a comment NR:{0} Err:{1}".format(nr, err))
return jsonify({"message": "NR had an internal error"}), 404
nr_id = nrd.id
user = User.find_by_jwtToken(g.jwt_oidc_token_info)
if user is None:
return jsonify({'message': 'No User'}), 404
if json_data.get('comment') is None:
return jsonify({"message": "No comment supplied"}), 400
comment_instance = Comment()
comment_instance.examinerId = user.id
comment_instance.nrId = nr_id
comment_instance.comment = convert_to_ascii(json_data.get('comment'))
comment_instance.save_to_db()
EventRecorder.record(user, Event.POST, nrd, json_data)
return jsonify(comment_instance.as_dict()), 200
```
#### File: services/name_request/utils.py
```python
import re
from flask import current_app
from namex import jwt
from namex.constants import PaymentState, request_type_mapping, reverse_request_type_mapping
from namex.models import State, User
from namex.services import ServicesError
from .exceptions import MapRequestTypeError
nr_regex = r'^(NR\ ?L{0,1}|L{0,1})?([\d]{6,8})$'
def normalize_nr_num(nr_num_str):
matches = re.findall(nr_regex, nr_num_str, flags=re.IGNORECASE)
# If there's a match and the match has a second capturing group (valid NR digits) then proceed
if len(matches) == 1 and matches[0][1]:
# Get the first capturing group if it exists, convert to upper case, and remove any spaces
nr_type = str(matches[0][0]).upper().replace(' ', '') if matches[0][
0] else 'NR' # Default to NR if not supplied
# Grab the NR digits from the second capturing group
digits = matches[0][1]
if nr_type == 'NR':
return 'NR ' + digits
if nr_type in ['NRL', 'L']:
return 'NR L' + digits
return None
def is_temp_nr_num(nr_num_str):
matches = re.findall(nr_regex, nr_num_str, flags=re.IGNORECASE)
# If there's a match and the match has a second capturing group (valid NR digits) then proceed
if len(matches) == 1 and matches[0][1]:
# Get the first capturing group if it exists, convert to upper case, and remove any spaces
nr_type = str(matches[0][0]).upper().replace(' ', '') if matches[0][
0] else 'NR' # Default to NR if not supplied
if nr_type in ['NRL', 'L']:
return True
return False
def has_active_payment(nr, payment_id=None):
payments = nr.payments.all()
if payments and payment_id:
return len(list(filter(lambda p: p.id == payment_id, payments))) > 0
elif payments:
return len(list(filter(lambda p: p.payment_status_code == PaymentState.CREATED.value, payments))) > 0
def has_complete_payment(nr, payment_id=None):
payments = nr.payments.all()
if payments and payment_id:
return len(list(filter(lambda p: p.id == payment_id, payments))) > 0
elif payments:
return len(list(filter(lambda p: p.payment_status_code == PaymentState.COMPLETED.value, payments))) > 0
def get_active_payment(nr, payment_id):
payments = nr.payments.all()
if payments:
payments = list(filter(lambda p: p.id == payment_id, nr.payments.all()))
return payments[0] if len(payments) > 0 else None
return None
def get_mapped_request_type(entity_type, request_action):
output = None
for item in request_type_mapping:
if item[1] == entity_type and item[2] == request_action:
output = item
break
if output:
request_mapping = list(output)
return request_mapping
def get_mapped_entity_and_action_code(request_type):
output = None
for item in reverse_request_type_mapping:
if item[0] == request_type:
output = item
break
if output:
entity_type = output[1]
request_action = output[2]
return entity_type, request_action
else:
raise MapRequestTypeError(message='Error mapping the requestTypeCd to an entity type and action - no default was found in the request type mappings!')
# TODO: Move these out into auth utils in the main utils module
def check_ownership(nrd, user):
if nrd.stateCd == State.INPROGRESS and nrd.userId == user.id:
return True
return False
def get_or_create_user_by_jwt(jwt_oidc_token):
# GET existing or CREATE new user based on the JWT info
try:
user = User.find_by_jwtToken(jwt_oidc_token)
current_app.logger.debug('finding user: {}'.format(jwt_oidc_token))
if not user:
current_app.logger.debug(
'didnt find user, attempting to create new user from the JWT info:{}'.format(jwt_oidc_token))
user = User.create_from_jwtToken(jwt_oidc_token)
return user
except Exception as err:
current_app.logger.error(err.with_traceback(None))
raise ServicesError('unable_to_get_or_create_user',
'{"code": "unable_to_get_or_create_user",'
'"description": "Unable to get or create user from the JWT, ABORT"}'
)
def valid_state_transition(user, nr, new_state):
"""
:param user:
:param nr:
:param new_state:
:return: (bool)
"""
if (new_state in (State.APPROVED,
State.REJECTED,
State.CONDITIONAL)) \
and not jwt.validate_roles([User.APPROVER]):
return False
# allow any type of user to CANCEL an NR
if new_state == State.CANCELLED and nr.stateCd in State.CANCELLABLE_STATES:
return True
# NR is in a final state, but maybe the user wants to pull it back for corrections
if nr.stateCd in State.COMPLETED_STATE:
if not jwt.validate_roles([User.APPROVER]) and not jwt.validate_roles([User.EDITOR]):
return False
# return jsonify({"message": "Only Names Examiners can alter completed Requests"}), 401
# TODO what are the business rules about editing a finalized name
# if nr.furnished == Request.REQUEST_FURNISHED:
# return jsonify({"message": "Request has already been furnished and cannot be altered"}), 409
# A completed Request can only be moved to editable (INPROGRESS)
# OR remain in its current state (editing a closed request)
if new_state != State.INPROGRESS and new_state != nr.stateCd:
return False
elif new_state in State.RELEASE_STATES:
if nr.userId != user.id or nr.stateCd != State.INPROGRESS:
return False
elif nr.stateCd == State.INPROGRESS:
if nr.userId != user.id:
return False
return True
def get_item_from_list(items, item_id, item_prop='id'):
"""
:param items:
:param item_id:
:param item_prop:
:return:
"""
matches = [i for i in items if i.__getattribute__(item_prop) == item_id]
if len(matches) == 0:
return None
if len(matches) == 1:
return matches[0]
if len(matches) > 1:
raise Exception('More than one match found for a given ID!')
```
#### File: python/models/test_user.py
```python
from namex.models import User
def test_user(session, client):
"""Start with a blank database."""
user1 = User(username = 'thor', firstname = 'thor', lastname = 'g', sub = 'abcdefg', iss='http://nowhere.localdomain')
session.add(user1)
session.commit()
assert user1.id is not None
``` |
{
"source": "jordiwes/ppr",
"score": 2
} |
#### File: ppr_api/models/trust_indenture.py
```python
from __future__ import annotations
from .db import db
TRUST_INDENTURE_YES = 'Y'
TRUST_INDENTURE_NO = 'N'
class TrustIndenture(db.Model): # pylint: disable=too-many-instance-attributes
"""This class manages all of the financing statement trust indenture information."""
__tablename__ = 'trust_indentures'
id = db.Column('id', db.Integer, db.Sequence('trust_id_seq'), primary_key=True)
trust_indenture = db.Column('trust_indenture', db.String(1), nullable=False)
# parent keys
registration_id = db.Column('registration_id', db.Integer, db.ForeignKey('registrations.id'), nullable=False,
index=True)
financing_id = db.Column('financing_id', db.Integer, db.ForeignKey('financing_statements.id'), nullable=False,
index=True)
registration_id_end = db.Column('registration_id_end', db.Integer, nullable=True,
index=True)
# db.ForeignKey('registration.registration_id'), nullable=True)
# Relationships - Registration
registration = db.relationship('Registration', foreign_keys=[registration_id],
back_populates='trust_indenture', cascade='all, delete', uselist=False)
# registration_end = db.relationship("Registration", foreign_keys=[registration_id_end])
# Relationships - FinancingStatement
financing_statement = db.relationship('FinancingStatement', foreign_keys=[financing_id],
back_populates='trust_indenture', cascade='all, delete', uselist=False)
@classmethod
def find_by_id(cls, trust_id: int = None):
"""Return a trust indenture object by expiry ID."""
trust_indenture = None
if trust_id:
trust_indenture = cls.query.get(trust_id)
return trust_indenture
@classmethod
def find_by_registration_id(cls, registration_id: int):
"""Return a list of trust indenture objects by registration number."""
trust_indenture = None
if registration_id:
trust_indenture = cls.query.filter(TrustIndenture.registration_id == registration_id) \
.order_by(TrustIndenture.id).one_or_none()
return trust_indenture
@classmethod
def find_by_financing_id(cls, financing_id: int):
"""Return a list of trust indenture objects by financing statement ID."""
trust_indenture = None
if financing_id:
trust_indenture = cls.query.filter(TrustIndenture.financing_id == financing_id) \
.order_by(TrustIndenture.id).all()
return trust_indenture
@staticmethod
def create_from_json(json_data, registration_id: int = None):
"""Create a trust indenture object from a json schema object: map json to db."""
trust_indenture = TrustIndenture()
if registration_id:
trust_indenture.registration_id = registration_id
if 'trustIndenture' in json_data and json_data['trustIndenture']:
trust_indenture.trust_indenture = TRUST_INDENTURE_YES
else:
trust_indenture.trust_indenture = TRUST_INDENTURE_NO
return [trust_indenture]
@staticmethod
def create_from_amendment_json(financing_id: int, registration_id: int):
"""Create a trust indenture object as part of an amendment registration: map json to db."""
trust_indenture = TrustIndenture()
trust_indenture.registration_id = registration_id
trust_indenture.financing_id = financing_id
trust_indenture.trust_indenture = TRUST_INDENTURE_YES
return trust_indenture
```
#### File: ppr_api/reports/__init__.py
```python
from http import HTTPStatus
from flask import jsonify
from flask_babel import _
from ppr_api.resources.utils import get_account_name
from .report import Report, ReportTypes
def get_pdf(report_data, account_id, report_type=None, token=None):
"""Generate a PDF of the provided report type using the provided data."""
try:
account_name = get_account_name(token, account_id)
return Report(report_data, account_id, report_type, account_name).get_pdf()
except FileNotFoundError:
# We don't have a template for it, so it must only be available on paper.
return jsonify({'message': _('No PDF report found.')}), HTTPStatus.NOT_FOUND
```
#### File: ppr_api/resources/utils.py
```python
from http import HTTPStatus
from flask import jsonify, current_app
from ppr_api.exceptions import BusinessException
from ppr_api.services.authz import user_orgs
from ppr_api.utils.validators import financing_validator, party_validator, registration_validator
ACCOUNT_REQUIRED = 'Account-Id header required.'
CROWN_CHARGE_FORBIDDEN = 'The account ID {account_id} is not authorized to access a Crown Charge registration.'
def serialize(errors):
"""Serialize errors."""
error_message = []
if errors:
for error in errors:
error_message.append('Schema validation: ' + error.message + '.')
return error_message
def get_account_id(req):
"""Get account ID from request headers."""
return req.headers.get('Account-Id')
def is_pdf(req):
"""Check if request headers Accept is application/pdf."""
accept = req.headers.get('Accept')
return accept and accept.upper() == 'APPLICATION/PDF'
def get_apikey(req):
"""Get gateway api key from request headers."""
return req.headers.get('x-apikey')
def account_required_response():
"""Build account required error response."""
return jsonify({'message': ACCOUNT_REQUIRED}), HTTPStatus.BAD_REQUEST
def validation_error_response(errors, cause, additional_msg: str = None):
"""Build a schema validation error response."""
details = serialize(errors)
if additional_msg:
details.append('Additional validation: ' + additional_msg)
return jsonify({'message': cause, 'detail': details}), HTTPStatus.BAD_REQUEST
def business_exception_response(exception):
"""Build business exception error response."""
current_app.logger.error(repr(exception))
return jsonify({'message': exception.error}), exception.status_code
def pay_exception_response(exception):
"""Build pay 402 exception error response."""
current_app.logger.error(repr(exception))
return jsonify({'message': repr(exception)}), HTTPStatus.PAYMENT_REQUIRED
def default_exception_response(exception):
"""Build default 500 exception error response."""
current_app.logger.error(repr(exception))
return jsonify({'message': repr(exception)}), HTTPStatus.INTERNAL_SERVER_ERROR
def not_found_error_response(item, key):
"""Build a not found error response."""
message = f'No {item} found for {key}.'
current_app.logger.info(str(HTTPStatus.NOT_FOUND.value) + ': ' + message)
return jsonify({'message': message}), HTTPStatus.NOT_FOUND
def duplicate_error_response(message):
"""Build a duplicate request error response."""
current_app.logger.info(str(HTTPStatus.CONFLICT.value) + ': ' + message)
return jsonify({'message': message}), HTTPStatus.CONFLICT
def unauthorized_error_response(account_id):
"""Build an unauthorized error response."""
message = f'Authorization failure submitting a request for {account_id}.'
current_app.logger.info(str(HTTPStatus.UNAUTHORIZED.value) + ': ' + message)
return jsonify({'message': message}), HTTPStatus.UNAUTHORIZED
def cc_forbidden_error_response(account_id):
"""Build a crown charge registration class access forbidden error response."""
message = CROWN_CHARGE_FORBIDDEN.format(account_id=account_id)
current_app.logger.info(str(HTTPStatus.FORBIDDEN.value) + ': ' + message)
return jsonify({'message': message}), HTTPStatus.FORBIDDEN
def path_param_error_response(param_name):
"""Build a bad request param missing error response."""
message = f'A {param_name} path parameter is required.'
current_app.logger.info(str(HTTPStatus.BAD_REQUEST.value) + ': ' + message)
return jsonify({'message': message}), HTTPStatus.BAD_REQUEST
def unprocessable_error_response(description):
"""Build an unprocessable entity error response."""
message = f'The {description} request could not be processed (no change/results).'
current_app.logger.info(str(HTTPStatus.UNPROCESSABLE_ENTITY.value) + ': ' + message)
return jsonify({'message': message}), HTTPStatus.UNPROCESSABLE_ENTITY
def path_data_mismatch_error_response(path_value, description, data_value):
"""Build a bad request path param - payload data mismatch error."""
message = f'The path value ({path_value}) does not match the data ' + \
f'{description} value ({data_value}).'
current_app.logger.info(str(HTTPStatus.BAD_REQUEST.value) + ': ' + message)
return jsonify({'message': message}), HTTPStatus.BAD_REQUEST
def historical_error_response(reg_num):
"""Build a bad request financing statement discharged (non-staff) error response."""
message = f'The Financing Statement for registration number {reg_num} has been discharged.'
current_app.logger.info(str(HTTPStatus.BAD_REQUEST.value) + ': ' + message)
return jsonify({'message': message}), HTTPStatus.BAD_REQUEST
def base_debtor_invalid_response():
"""Build an error response for no match on base debtor name."""
message = 'No exact match found for provided base debtor name.'
current_app.logger.info(str(HTTPStatus.BAD_REQUEST.value) + ': ' + message)
return jsonify({'message': message}), HTTPStatus.BAD_REQUEST
def validate_financing(json_data):
"""Perform non-schema extra validation on a financing statement."""
error_msg = party_validator.validate_financing_parties(json_data)
error_msg += financing_validator.validate(json_data)
return error_msg
def validate_registration(json_data):
"""Perform non-schema extra validation on a non-financing registrations."""
error_msg = party_validator.validate_registration_parties(json_data)
error_msg += registration_validator.validate_collateral_ids(json_data)
return error_msg
def validate_delete_ids(json_data, financing_statement):
"""Perform non-schema extra validation on a change amendment delete party, collateral ID's."""
error_msg = party_validator.validate_party_ids(json_data, financing_statement)
error_msg += registration_validator.validate_collateral_ids(json_data, financing_statement)
if error_msg != '':
raise BusinessException(
error=error_msg,
status_code=HTTPStatus.BAD_REQUEST
)
def get_account_name(token: str, account_id: str = None):
"""Lookup the account organization name from the user token with an auth api call."""
orgs = user_orgs(token)
if orgs and 'orgs' in orgs:
if (len(orgs['orgs']) == 1 or not account_id):
return orgs['orgs'][0]['name']
for org in orgs['orgs']:
if org['id'] == int(account_id):
return org['name']
return None
```
#### File: unit/api/test_utils.py
```python
import pytest
from flask import current_app
from ppr_api.resources.utils import get_account_name
from ppr_api.services.authz import PPR_ROLE
from tests.unit.services.utils import helper_create_jwt
MOCK_URL_NO_KEY = 'https://bcregistry-bcregistry-mock.apigee.net/mockTarget/auth/api/v1/'
# testdata pattern is ({description}, {account id}, {has name})
TEST_USER_ORGS_DATA_JSON = [
('Valid no account', None, True),
('Valid account', '2617', True),
('No token', '<PASSWORD>', False),
]
@pytest.mark.parametrize('desc, account_id, has_name', TEST_USER_ORGS_DATA_JSON)
def test_get_account_name(session, client, jwt, desc, account_id, has_name):
"""Assert that a get user profile returns the expected response code and data."""
# setup
current_app.config.update(AUTH_SVC_URL=MOCK_URL_NO_KEY)
token = helper_create_jwt(jwt, [PPR_ROLE]) if has_name else None
# test
name = get_account_name(token, account_id)
# check
if has_name:
assert name
else:
assert not name
```
#### File: unit/models/test_utils.py
```python
import copy
from datetime import timedelta as _timedelta
import pytest
from registry_schemas.example_data.ppr import AMENDMENT_STATEMENT
from ppr_api.models import utils as model_utils
# testdata pattern is ({registration_ts}, {years}, {expiry_ts})
TEST_DATA_EXPIRY = [
('2021-08-31T00:00:01-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T01:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T04:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T08:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T12:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T16:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T16:01:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T17:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T17:01:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T18:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T19:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T20:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T21:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T22:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T23:00:00-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-08-31T23:59:59-07:00', 1, '2022-09-01T06:59:59+00:00'),
('2021-09-01T00:00:01-07:00', 4, '2025-09-02T06:59:59+00:00'),
('2021-11-30T00:00:01-08:00', 4, '2025-12-01T07:59:59+00:00')
]
# testdata pattern is ({registration_ts}, {expiry_ts})
TEST_DATA_EXPIRY_RL = [
('2021-08-31T00:00:01-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T01:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T04:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T08:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T12:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T16:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T16:01:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T17:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T18:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T19:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T20:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T21:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T22:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T23:00:00-07:00', '2022-02-28T07:59:59+00:00'),
('2021-08-31T23:59:59-07:00', '2022-02-28T07:59:59+00:00'),
('2021-01-01T01:00:00-07:00', '2021-07-01T06:59:59+00:00')
]
# testdata pattern is ({registration_ts}, {add_1}, {add_2}, {add_3}, {expiry_ts})
TEST_DATA_EXPIRY_ADD = [
('2021-08-31T00:00:01-07:00', 10, 5, 2, '2038-09-01T06:59:59+00:00'),
('2021-01-31T00:00:01-08:00', 2, 3, 5, '2031-02-01T07:59:59+00:00')
]
# testdata pattern is ({desc}, {registration_ts}, {renew_count}, {expiry_ts})
TEST_DATA_EXPIRY_RENEW_RL = [
('Registration', '2021-08-31T00:00:01-07:00', 0, '2022-02-28T07:59:59+00:00'),
('1 renewal', '2021-08-31T00:00:01-07:00', 1, '2022-08-27T06:59:59+00:00'),
('2 renewals', '2021-08-31T00:00:01-07:00', 2, '2023-02-23T07:59:59+00:00')
]
# testdata pattern is ({desc}, {registration_ts}, {life_years}, {hour})
TEST_DATA_EXPIRY_REGISTRATION = [
('Daylight savings', '2021-08-31T12:00:01-07:00', 5, 6),
('No daylight savings', '2021-01-31T13:00:01-07:00', 10, 7)
]
# testdata pattern is ({desc}, {utc_ts}, {local_ts})
TEST_DATA_LOCAL_TIMEZONE = [
('Daylight savings', '2021-09-01T06:59:59-00:00', '2021-08-31T23:59:59-07:00'),
('No daylight savings', '2021-02-01T07:59:59-00:00', '2021-01-31T23:59:59-08:00')
]
# testdata pattern is ({change_type}, {is_general_collateral})
TEST_DATA_AMENDMENT_CHANGE_TYPE = [
(model_utils.REG_TYPE_AMEND, False),
(model_utils.REG_TYPE_AMEND_COURT, False),
(model_utils.REG_TYPE_AMEND_SUBSTITUTION_COLLATERAL, False),
(model_utils.REG_TYPE_AMEND_ADDITION_COLLATERAL, False),
(model_utils.REG_TYPE_AMEND_SUBSTITUTION_COLLATERAL, True),
(model_utils.REG_TYPE_AMEND_ADDITION_COLLATERAL, True),
(model_utils.REG_TYPE_AMEND_DEBTOR_RELEASE, False),
(model_utils.REG_TYPE_AMEND_DEBTOR_TRANSFER, False),
(model_utils.REG_TYPE_AMEND_PARIAL_DISCHARGE, False),
(model_utils.REG_TYPE_AMEND_SP_TRANSFER, False)
]
@pytest.mark.parametrize('registration_ts,offset,expiry_ts', TEST_DATA_EXPIRY)
def test_expiry_date(session, registration_ts, offset, expiry_ts):
"""Assert that computing expiry ts from registraton ts works as expected."""
# reg_ts = model_utils.ts_from_iso_format(registration_ts)
expiry_test = model_utils.expiry_dt_from_years(offset, registration_ts)
expiry_iso = model_utils.format_ts(expiry_test)
# print(registration_ts + ', ' + model_utils.format_ts(reg_ts) + ', ' + expiry_iso)
assert expiry_ts == expiry_iso
@pytest.mark.parametrize('registration_ts,expiry_ts', TEST_DATA_EXPIRY_RL)
def test_expiry_date_rl(session, registration_ts, expiry_ts):
"""Assert that computing an RL expiry ts from registraton ts works as expected."""
reg_ts = model_utils.ts_from_iso_format(registration_ts)
expiry_test = model_utils.expiry_dt_repairer_lien(reg_ts)
expiry_iso = model_utils.format_ts(expiry_test)
# print(registration_ts + ', ' + model_utils.format_ts(reg_ts) + ', ' + expiry_iso)
assert expiry_ts == expiry_iso
@pytest.mark.parametrize('registration_ts,add_1,add_2,add_3,expiry_ts', TEST_DATA_EXPIRY_ADD)
def test_expiry_date_add(session, registration_ts, add_1, add_2, add_3, expiry_ts):
"""Assert that computing an renewal non-RL expiry ts from registraton ts works as expected."""
reg_ts = model_utils.ts_from_iso_format(registration_ts)
expiry_add_1 = model_utils.expiry_dt_from_years(add_1, registration_ts)
assert expiry_add_1.year - add_1 == reg_ts.year
assert expiry_add_1.hour in (6, 7)
assert expiry_add_1.minute == 59
assert expiry_add_1.second == 59
expiry_add_2 = model_utils.expiry_dt_add_years(expiry_add_1, add_2)
assert expiry_add_2.year - expiry_add_1.year == add_2
assert expiry_add_2.hour in (6, 7)
assert expiry_add_2.minute == 59
assert expiry_add_2.second == 59
expiry_add_3 = model_utils.expiry_dt_add_years(expiry_add_2, add_3)
assert expiry_add_3.year - expiry_add_2.year == add_3
assert expiry_add_3.hour in (6, 7)
assert expiry_add_3.minute == 59
assert expiry_add_3.second == 59
expiry_iso = model_utils.format_ts(expiry_add_3)
# print(registration_ts + ', ' + model_utils.format_ts(reg_ts) + ', ' + expiry_iso)
assert expiry_ts == expiry_iso
@pytest.mark.parametrize('desc,registration_ts,renew_count,expiry_ts', TEST_DATA_EXPIRY_RENEW_RL)
def test_expiry_date_renew_rl(session, desc, registration_ts, renew_count, expiry_ts):
"""Assert that computing multiple RL renewal expiry ts from registration ts works as expected."""
reg_ts = model_utils.ts_from_iso_format(registration_ts)
expiry_test = model_utils.expiry_dt_repairer_lien(reg_ts)
# print(model_utils.format_ts(expiry_test))
if renew_count > 0:
for x in range(renew_count):
expiry_test = model_utils.expiry_dt_repairer_lien(expiry_test)
# print(model_utils.format_ts(expiry_test))
expiry_iso = model_utils.format_ts(expiry_test)
assert expiry_ts == expiry_iso
def test_expiry_dt_from_years():
"""Assert that generating an expiry date from life years is performing as expected."""
expiry_ts = model_utils.expiry_dt_from_years(5)
now_ts = model_utils.now_ts()
print('Expiry timestamp: ' + model_utils.format_ts(expiry_ts))
print('Now timestamp: ' + model_utils.format_ts(now_ts))
assert (expiry_ts.year - now_ts.year) == 5
assert expiry_ts.hour in (6, 7)
assert expiry_ts.minute == 59
assert expiry_ts.second == 59
assert expiry_ts.day in (1, now_ts.day, (now_ts.day + 1))
assert expiry_ts.month in (now_ts.month, (now_ts.month + 1))
def test_ts_from_iso_format():
"""Assert that creating a UTC datetime object from an ISO date-time formatted string is performing as expected."""
test_ts = model_utils.ts_from_iso_format('2021-02-16T23:00:00-08:00')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day == 17
assert test_ts.month == 2
assert test_ts.year == 2021
assert test_ts.hour == 7
assert test_ts.minute == 0
assert test_ts.second == 0
test_ts = model_utils.ts_from_iso_format('2021-02-16T23:00:00+00:00')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day == 16
assert test_ts.hour == 23
test_ts = model_utils.ts_from_iso_format('2021-02-16T13:00:00-08:00')
# print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day == 16
assert test_ts.hour == 21
test_ts = model_utils.ts_from_iso_format('2021-03-31T23:00:00-08:00')
# print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.month == 4
assert test_ts.day == 1
assert test_ts.hour == 7
def test_ts_from_date_iso_format():
"""Assert that creating a UTC datetime object from an ISO date-time formatted string is performing as expected."""
test_ts = model_utils.ts_from_date_iso_format('2021-02-16')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day in (16, 17)
assert test_ts.month == 2
assert test_ts.year == 2021
if test_ts.day == 16:
assert test_ts.hour >= 8
else:
assert test_ts.hour <= 7
def test_now_ts_offset():
"""Assert that adjusting UTC now by a number of days is performing as expected."""
now_ts = model_utils.now_ts() + _timedelta(days=60)
test_ts = model_utils.now_ts_offset(60, True)
print('Now timestamp + 60 days: ' + model_utils.format_ts(test_ts))
assert test_ts.day == now_ts.day
assert test_ts.month == now_ts.month
assert test_ts.year == now_ts.year
now_ts = model_utils.now_ts() - _timedelta(days=60)
test_ts = model_utils.now_ts_offset(60, False)
print('Now timestamp - 60 days: ' + model_utils.format_ts(test_ts))
assert test_ts.day == now_ts.day
assert test_ts.month == now_ts.month
assert test_ts.year == now_ts.year
def test_today_ts_offset():
"""Assert that adjusting UTC today by a number of days is performing as expected."""
test_now_ts = model_utils.now_ts_offset(7, False)
test_today_ts = model_utils.today_ts_offset(7, False)
# print('test now - 7 days: ' + model_utils.format_ts(test_now_ts))
# print('test today - 7 days: ' + model_utils.format_ts(test_today_ts))
assert test_today_ts.hour == 0
assert test_today_ts.minute == 0
assert test_today_ts.second == 0
assert test_today_ts < test_now_ts
def test_expiry_dt_repairer_lien_now():
"""Assert that the computed expiry date for a repairer's lien performs as expected."""
test_ts = model_utils.expiry_dt_repairer_lien()
now_ts = model_utils.now_ts()
delta = test_ts - now_ts
assert delta.days == model_utils.REPAIRER_LIEN_DAYS
assert test_ts.hour in (6, 7)
assert test_ts.minute == 59
assert test_ts.second == 59
@pytest.mark.parametrize('desc,registration_ts,life_years,hour', TEST_DATA_EXPIRY_REGISTRATION)
def test_expiry_dt_from_registration(session, desc, registration_ts, life_years, hour):
"""Assert that creating an expiry timestamp from a registration timestamp is performing as expected."""
test_ts = model_utils.ts_from_iso_format(registration_ts)
expiry_ts = model_utils.expiry_dt_from_registration(test_ts, life_years)
print(model_utils.format_ts(expiry_ts))
assert expiry_ts.year - test_ts.year == life_years
assert expiry_ts.hour == hour
assert expiry_ts.minute == 59
assert expiry_ts.second == 59
@pytest.mark.parametrize('desc,utc_ts,local_ts', TEST_DATA_LOCAL_TIMEZONE)
def test_to_local_timezone(session, desc, utc_ts, local_ts):
"""Assert that converting UTC time to local time is performing as expected."""
adjusted_ts = model_utils.to_local_timestamp(model_utils.ts_from_iso_format(utc_ts))
local_iso = adjusted_ts.isoformat()
print(utc_ts + ' ' + local_iso + ' ' + local_ts)
assert adjusted_ts.hour == 23
assert adjusted_ts.minute == 59
assert adjusted_ts.second == 59
assert local_iso == local_ts
@pytest.mark.parametrize('change_type, is_general_collateral', TEST_DATA_AMENDMENT_CHANGE_TYPE)
def test_amendment_change_type(change_type, is_general_collateral):
"""Assert that setting the amendment change type from the amendment data works as expected."""
json_data = copy.deepcopy(AMENDMENT_STATEMENT)
if change_type != model_utils.REG_TYPE_AMEND_COURT:
del json_data['courtOrderInformation']
if change_type != model_utils.REG_TYPE_AMEND:
del json_data['addTrustIndenture']
del json_data['removeTrustIndenture']
if change_type in (model_utils.REG_TYPE_AMEND_ADDITION_COLLATERAL,
model_utils.REG_TYPE_AMEND_SUBSTITUTION_COLLATERAL,
model_utils.REG_TYPE_AMEND_PARIAL_DISCHARGE):
del json_data['addSecuredParties']
del json_data['deleteSecuredParties']
del json_data['addDebtors']
del json_data['deleteDebtors']
if change_type == model_utils.REG_TYPE_AMEND_PARIAL_DISCHARGE:
del json_data['addVehicleCollateral']
del json_data['addGeneralCollateral']
del json_data['deleteGeneralCollateral']
elif change_type == model_utils.REG_TYPE_AMEND_ADDITION_COLLATERAL:
del json_data['deleteVehicleCollateral']
del json_data['deleteGeneralCollateral']
if is_general_collateral:
del json_data['addVehicleCollateral']
else:
del json_data['addGeneralCollateral']
elif change_type == model_utils.REG_TYPE_AMEND_SUBSTITUTION_COLLATERAL:
if is_general_collateral:
del json_data['addVehicleCollateral']
del json_data['deleteVehicleCollateral']
else:
del json_data['addGeneralCollateral']
del json_data['deleteGeneralCollateral']
if change_type in (model_utils.REG_TYPE_AMEND_DEBTOR_RELEASE,
model_utils.REG_TYPE_AMEND_DEBTOR_TRANSFER,
model_utils.REG_TYPE_AMEND_SP_TRANSFER):
del json_data['addVehicleCollateral']
del json_data['deleteVehicleCollateral']
del json_data['addGeneralCollateral']
del json_data['deleteGeneralCollateral']
if change_type == model_utils.REG_TYPE_AMEND_DEBTOR_RELEASE:
del json_data['addSecuredParties']
del json_data['deleteSecuredParties']
del json_data['addDebtors']
elif change_type == model_utils.REG_TYPE_AMEND_DEBTOR_TRANSFER:
del json_data['addSecuredParties']
del json_data['deleteSecuredParties']
elif change_type == model_utils.REG_TYPE_AMEND_SP_TRANSFER:
del json_data['addDebtors']
del json_data['deleteDebtors']
# print(json_data)
type = model_utils.amendment_change_type(json_data)
assert type == change_type
def test_cleanup_amendment():
"""Assert that removing empty lists/arrays from amendment data works as expected."""
json_data = copy.deepcopy(AMENDMENT_STATEMENT)
# print(json_data)
json_data = model_utils.cleanup_amendment(json_data)
assert 'addVehicleCollateral' in json_data
assert 'deleteVehicleCollateral' in json_data
assert 'addGeneralCollateral' in json_data
assert 'deleteGeneralCollateral' in json_data
assert 'addSecuredParties' in json_data
assert 'deleteSecuredParties' in json_data
assert 'addDebtors' in json_data
assert 'deleteDebtors' in json_data
json_data['addVehicleCollateral'] = []
json_data['deleteVehicleCollateral'] = []
json_data['addGeneralCollateral'] = []
json_data['deleteGeneralCollateral'] = []
json_data['addSecuredParties'] = []
json_data['deleteSecuredParties'] = []
json_data['addDebtors'] = []
json_data['deleteDebtors'] = []
json_data = model_utils.cleanup_amendment(json_data)
assert 'addVehicleCollateral' not in json_data
assert 'deleteVehicleCollateral' not in json_data
assert 'addGeneralCollateral' not in json_data
assert 'deleteGeneralCollateral' not in json_data
assert 'addSecuredParties' not in json_data
assert 'deleteSecuredParties' not in json_data
assert 'addDebtors' not in json_data
assert 'deleteDebtors' not in json_data
``` |
{
"source": "jordiyapz/ai-toolkit",
"score": 3
} |
#### File: model/gea/__init__.py
```python
from random import random as rand
from math import floor
from jordiyapz_ai_toolkit.model.gea.util import Individu, Stop
class Gea:
def __init__(self,
fungsi_fitness: callable,
ranges: tuple,
resolusi: int,
ukuran_populasi=50):
for rg in ranges:
assert len(rg) is 2
self.fungsi_fitness = fungsi_fitness
self.resolusi = resolusi
self.ranges = ranges
self.ukuran_populasi = ukuran_populasi
self.reset()
def reset(self):
self.best_individu = (None, 0)
self.fitness = 0
self.populasi = [Individu(self.ranges, self.resolusi)
for _ in range(self.ukuran_populasi)]
def __hitungFitness(self):
fitness_list = []
for individu in self.populasi:
fenotip = individu.getFenotip()
fit = self.fungsi_fitness(fenotip)
fitness_list.append(fit)
return fitness_list
def __hitungPeluang(self, fitness_list, verbose=False):
total_fitness = sum(fitness_list)
peluang_list = list(map(lambda x: x/total_fitness, fitness_list))
return peluang_list
def __printTabel(self, fitness_list, peluang_list, verbose=2):
if verbose > 2:
print('fen', 'fitness', 'peluang', 'kromosom', sep='\t')
for individu, fit, peluang in zip(self.populasi,
fitness_list,
peluang_list):
fen = individu.getFenotip()
print(tuple('%.2f' % f for f in fen),
'%.2f' % fit,
'%.2f' % peluang,
individu.kromosom,
sep='\t')
print('fitness: %.2f' % max(fitness_list))
def __rouletteWheel(self, peluang_list):
r = rand()
batas = 0
for idx, p in enumerate(peluang_list):
batas += p
if r < batas:
return idx
def __seleksiOrtu(self, peluang_list, banyak_pasangan):
return [(self.__rouletteWheel(peluang_list),
self.__rouletteWheel(peluang_list))
for _ in range(banyak_pasangan)]
def __pindahSilang(self, pasangan):
tipot = floor(rand() * (len(pasangan[0].kromosom) - 1)) + 1
k1 = pasangan[0].kromosom[:tipot] + pasangan[1].kromosom[tipot:]
k2 = pasangan[1].kromosom[:tipot] + pasangan[0].kromosom[tipot:]
return (k1, k2)
def __mutasi(self, peluang_mutasi):
for individu in self.populasi:
if (rand() < peluang_mutasi):
posisi = floor(rand() * len(individu.kromosom))
individu.kromosom[posisi] = (individu.kromosom[posisi] + 1) % 2
def __urutanPopulasi(self, populasi, urutan):
urutan_list = sorted(range(len(urutan)), key=lambda i: urutan[i])
urutan_list.reverse()
populasi_terurut = [populasi[urutan] for urutan in urutan_list]
return populasi_terurut
def __routineFitPel(self, verbose):
fitness_list = self.__hitungFitness()
peluang_list = self.__hitungPeluang(fitness_list)
best_fit = max(fitness_list)
if best_fit > self.best_individu[1]:
self.best_individu = (self.populasi[fitness_list.index(best_fit)],
best_fit)
if verbose:
if verbose is 1:
print('*', end='')
else:
self.__printTabel(fitness_list, peluang_list, verbose)
return (best_fit, peluang_list)
def __routineRegenerasi(self, peluang_list, crossover_rate):
banyak_pasangan = round((self.ukuran_populasi * crossover_rate) / 2)
pasangan_index_ortu = self.__seleksiOrtu(peluang_list, banyak_pasangan)
self.populasi = self.__urutanPopulasi(self.populasi, peluang_list)
populasi_baru = []
for pasangan_idx in pasangan_index_ortu:
pasangan = tuple(self.populasi[idx] for idx in pasangan_idx)
kromosom_list = self.__pindahSilang(pasangan)
for kromosom in kromosom_list:
populasi_baru.append(Individu(self.ranges,
self.resolusi,
kromosom))
l = len(self.populasi) - len(populasi_baru)
self.populasi[l:] = populasi_baru
def fit(self,
stopping_crit: tuple = (Stop.MAX_IT,),
maks_generasi: int = 200,
crossover_rate: float = .5,
peluang_mutasi: float = .03,
rekam_history: bool = True,
verbose=1):
assert crossover_rate >= 0 and crossover_rate <= 1
iterasi = 0
fitness_history = []
if verbose is 1:
print('Progress: [', end='')
best_fit, peluang_list = self.__routineFitPel(verbose)
if rekam_history:
fitness_history.append(best_fit)
while iterasi < maks_generasi:
# mulai mekanisme stopping
if stopping_crit[0] == Stop.TRESHOLD:
if best_fit >= stopping_crit[1]:
# stop iterasi
break
elif stopping_crit[0] == Stop.NO_IMPROVE:
l = len(fitness_history)
if l > stopping_crit[1]:
is_improving = False
for i in range(stopping_crit[1]):
if fitness_history[l-1-i] != fitness_history[l-i-2]:
is_improving = True
break
if not is_improving:
# stop iterasi
break
# akhir mekanisme stopping
self.__routineRegenerasi(peluang_list, crossover_rate)
self.__mutasi(peluang_mutasi)
best_fit, peluang_list = self.__routineFitPel(verbose)
if rekam_history:
fitness_history.append(best_fit)
iterasi += 1
if verbose is 1:
print(']')
fenotip = self.best_individu[0].getFenotip()
return (fenotip, iterasi, fitness_history)
```
#### File: model/gea/util.py
```python
import enum
from random import random as rand
def translate(value, leftMin, leftMax, rightMin, rightMax):
leftSpan = leftMax - leftMin
rightSpan = rightMax - rightMin
valueScaled = float(value - leftMin) / float(leftSpan)
return rightMin + (valueScaled * rightSpan)
def toDecimal(bin_list):
dec = 0
l = len(bin_list) - 1
for i, x in enumerate(bin_list):
dec += pow(2, l - i) * x
return dec
# %% enumerasi
class Stop(enum.Enum):
MAX_IT = 0 # always on, stop when max iteration reached
TRESHOLD = 1 # stop if fitness >= treshold value
NO_IMPROVE = 3 # stop if no improvement for certain generation
# %% Gea
class Individu:
def __init__(self,
ranges: tuple = ((0, 1), (0, 1)),
resolusi: int = 5,
kromosom: list = None):
assert type(ranges) is tuple
for rg in ranges:
assert len(rg) is 2
self.kromosom = kromosom or \
[round(rand()) for _ in range(resolusi * len(ranges))]
self.res = resolusi
self.ranges = ranges
def getFenotip(self):
l = self.res
up = 2**l-1
return tuple(translate(toDecimal(self.kromosom[l*i:l*(i+1)]),
0, up, ran[0], ran[1])
for i, ran in enumerate(self.ranges))
```
#### File: jordiyapz_ai_toolkit/model/kaenn.py
```python
import numpy as np
from jordiyapz_ai_toolkit.method import Distance, Validation
class Knn:
@staticmethod
def get_distances(X, y, X_test, distance_func=Distance.manhattan):
# unsorted distances
return np.array([distance_func(X, X_test.iloc[i]) for i in range(X_test.shape[0])])
@staticmethod
def predict(X, y, X_test, K=3, distance_func=Distance.manhattan):
D = Knn.get_distances(X, y, X_test, distance_func).argsort()
return np.array([y.iloc[d][:K].mode()[0] for d in D])
@staticmethod
def validate(X, y, X_test, y_truth, K=3, distance_func=Distance.manhattan, validation_func=Validation.sse):
return validation_func(y_truth, Knn.predict(X, y, X_test, K, distance_func))
@staticmethod
def impute(dataset, K=3, distance_func=Distance.euclidean):
dataset = dataset.copy()
nan_sum = dataset.isna().sum().sort_values()
for col in nan_sum[nan_sum != 0].index:
na_mask = dataset[col].isna()
train = dataset[na_mask == False].dropna(axis=1)
X_test = dataset.loc[na_mask, train.columns[train.columns != col]]
X = train.loc[:, train.columns != col]
y = train[col]
D = Knn.get_distances(X, y, X_test, distance_func).argsort()[:, :3]
nan_val = [y.iloc[d].mean() for d in D]
dataset.loc[na_mask, col] = nan_val
return dataset
# Knn.validate(*train_test_split(*get_fold(1, dataset)), K=6, validation_func=Validation.accuracy)
# dataset = data_diabetes
# Knn.impute(dataset)
# dataset
class Dwknn(Knn):
# Distance Weighted K-Nearest Neighbor
@staticmethod
def predict(X, y, X_test, K=3, distance_func=Distance.manhattan):
D = Knn.get_distances(X, y, X_test, distance_func)
iD_sorted = D.argsort()[:, :K]
weights = 1 / np.array([D[i].take(iD_sorted[i])
for i in range(D.shape[0])])
knn = np.array([y.iloc[ids][:K] for ids in iD_sorted])
return (((1-knn)*weights).sum(1) < (knn*weights).sum(1)).astype(int)
@staticmethod
def validate(X, y, X_test, y_truth, K=3, distance_func=Distance.manhattan, validation_func=Validation.sse):
return validation_func(y_truth, Dwknn.predict(X, y, X_test, K, distance_func))
# X, y, X_test, y_true = train_test_split(*get_fold(4, dataset))
# display(y_true.loc[:3])
# Dwknn.predict(X, y, X_test.loc[:3], K=5)
# conf = Dwknn.validate(X, y, X_test, y_true, validation_func=Validation.confusion_matrix)
# print(conf)
# get_scores(conf)
``` |
{
"source": "jordiyapz/fazy",
"score": 3
} |
#### File: jordiyapz/fazy/fazy.py
```python
import util
import numpy as np
import pandas as pd
# %% class set fungsi anggota
class FungSet:
def __init__ (self, label, fungsi, bilangan:tuple):
prev = bilangan[0]
for bil in bilangan[1:]:
assert prev <= bil, 'Bilangan harus terurut'
prev = bil
self.label = label
self.fungsi = fungsi
self.bilangan = bilangan
def hitung(self, x, up=1):
return self.fungsi(x, *self.bilangan, up=up)
# %% fungsi keanggotaan
class Fang:
@staticmethod
def linier_atas(x, a, b, up=1):
return util.clamp((x - a) / (b - a), 0, up)
@staticmethod
def linier_bawah(x, a, b, up=1):
return util.clamp((b - x) / (b - a), 0, up)
@staticmethod
def segitiga(x, a, b, c, up=1):
if type(x) is np.ndarray:
arr = np.where(x <= b,
Fang.linier_atas(x, a, b, up),
Fang.linier_bawah(x, b, c, up))
return arr
elif x <= b:
return Fang.linier_atas(x, a, b, up)
return Fang.linier_bawah(x, b, c, up)
@staticmethod
def trapesium(x, a, b, c, d, up=1):
if type(x) is np.ndarray:
arr = np.where(x <= c,
Fang.linier_atas(x, a, b, up),
Fang.linier_bawah(x, c, d, up))
return arr
elif x <= c:
return Fang.linier_atas(x, a, b, up)
return Fang.linier_bawah(x, c, d, up)
# %% class utamanya
class Fazy:
def __init__(self, arr_fset:tuple, lookup_inferensi:pd.core.frame.DataFrame):
# definisi notasi:
# m : banyaknya data (baris) di dataset
# n : banyaknya rules inferensi
# c : banyaknya kolom inferensi
# nfs_i : banyaknya fungset di arr_fset di kolom ke-i
for fset_tup in arr_fset:
for fs in fset_tup:
assert type(fs) == FungSet, \
'arr_fset harus merupakan tuple berisi tuple FungSet'
self.arr_fset = arr_fset
# nama-nama kolom di inferensi
self.cols = [col for col in lookup_inferensi]
# one hot encoding setiap kategori di setiap kolom inferensi
self.one_hot = tuple(
pd.get_dummies(lookup_inferensi[col])[\
[f.label for f in arr_fset[i]]] \
for i, col in enumerate(self.cols))
def _fazify(self, nilai:np.ndarray, fset_tup:tuple):
return np.array([fs.hitung(nilai) for fs in fset_tup])
def _inferensi(self, fazys):
masked = tuple(
np.dot(self.one_hot[i], fazys[i]) for i in range(len(fazys))
)
konjungsi = np.minimum(*masked) # shape = (n, m)
# matriks disjungsi
disjungsi = np.dot(self.one_hot[-1].T, konjungsi) # shape = (nfs_out, m)
return disjungsi
def _defazify(self, inferensi, step, maks, mins):
sumbu_x = np.arange(mins, maks, step=step)
derajat = np.max([
[fs.hitung(x, up=inferensi[i]) for x in sumbu_x] \
for i, fs in enumerate(self.arr_fset[-1])
], axis=0)
return np.dot(sumbu_x, derajat) / np.sum(derajat, axis=0)
def klasify(self, dataset: pd.core.frame.DataFrame, step=10, maks=100, mins=0):
fazys = tuple( # fazys_i.shape = (nfs_i, m)
self._fazify(dataset[self.cols[i]].to_numpy(), fset_tup) \
for i, fset_tup in enumerate(self.arr_fset[:len(self.arr_fset)-1]))
inferensi = self._inferensi(fazys)
return self._defazify(inferensi, step, maks, mins)
```
#### File: jordiyapz/fazy/util.py
```python
import numpy as np
import pandas as pd
def clamp(x, a, b):
if type(x) is np.ndarray or type(b) is np.ndarray:
return np.clip(x, a, b)
return max(a, min(x, b))
read_excel = pd.read_excel
``` |
{
"source": "jordiyapz/yes-capture",
"score": 2
} |
#### File: yesc/data/cli.py
```python
import click
from yesc.data.install import install
@click.group()
def cli():
pass
cli.add_command(install)
if __name__ == '__main__':
cli()
```
#### File: yesc/utils/video.py
```python
import cv2
from contextlib import contextmanager
@contextmanager
def open_video(*args):
'''
OpenCV2 video capture wrapper.
@author <NAME>.
This returns a frame generator that can be iterated to produce
video frames
Usage example:
```
with open_video(0) as video:
for frame in video:
# Do something with the video frame
# Press `q` to stop the video.
```
'''
video_capture = cv2.VideoCapture(*args)
def frame_generator():
while video_capture.isOpened():
success, frame = video_capture.read()
if not success:
break
yield frame
if cv2.waitKey(1) & 0xFF == ord('q'):
break
return
try:
yield frame_generator()
finally:
video_capture.release()
``` |
{
"source": "jordiyeh/safrs",
"score": 2
} |
#### File: codegen/test/test_books_api.py
```python
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.books_api import BooksApi # noqa: E501
from swagger_client.rest import ApiException
class TestBooksApi(unittest.TestCase):
"""BooksApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.books_api.BooksApi() # noqa: E501
def tearDown(self):
pass
def test_2(self):
"""Test case for 2
Update a Book object # noqa: E501
"""
pass
def test_3(self):
"""Test case for 3
Delete a Book object # noqa: E501
"""
pass
def test_4(self):
"""Test case for 4
Delete from Book user # noqa: E501
"""
pass
def test_5(self):
"""Test case for 5
Retrieve a Book object # noqa: E501
"""
pass
def test_5_0(self):
"""Test case for 5_0
Create a Book object # noqa: E501
"""
pass
def test_6(self):
"""Test case for 6
Retrieve a Book object # noqa: E501
"""
pass
def test_6_0(self):
"""Test case for 6_0
Create a Book object # noqa: E501
"""
pass
def test_7(self):
"""Test case for 7
Retrieve a user object # noqa: E501
"""
pass
def test_7_0(self):
"""Test case for 7_0
Update user # noqa: E501
"""
pass
def test_8(self):
"""Test case for 8
Invoke Book.get_list # noqa: E501
"""
pass
def test_8_0(self):
"""Test case for 8_0
Retrieve a user object # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
```
#### File: examples/authentication/demo_auth.py
```python
import sys
import os
import logging
import builtins
from functools import wraps
from flask import Flask, redirect, jsonify, make_response
from flask import abort, request, g, url_for
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, String
from safrs import SAFRSBase, SAFRSJSONEncoder, Api, jsonapi_rpc
from flask_swagger_ui import get_swaggerui_blueprint
from flask_sqlalchemy import SQLAlchemy
from flask_httpauth import HTTPBasicAuth
from passlib.apps import custom_app_context as pwd_context
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired)
from flask.ext.login import LoginManager, UserMixin, \
login_required, login_user, logout_user
db = SQLAlchemy()
auth = HTTPBasicAuth()
# Example sqla database object
class Item(SAFRSBase, db.Model):
'''
description: Item description
'''
__tablename__ = 'items'
id = Column(String, primary_key=True)
name = Column(String, default = '')
class User(SAFRSBase, db.Model):
'''
description: User description
'''
__tablename__ = 'users'
id = db.Column(String, primary_key=True)
username = db.Column(db.String(32), index=True)
password_hash = db.Column(db.String(64))
custom_decorators = [auth.login_required]
@jsonapi_rpc(http_methods = ['POST'])
def hash_password(self, password):
self.password_hash = pwd_context.encrypt(password)
@jsonapi_rpc(http_methods = ['POST'])
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
@jsonapi_rpc(http_methods = ['POST'])
def generate_auth_token(self, expiration=600):
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
@jsonapi_rpc(http_methods = ['POST'])
def verify_auth_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = User.query.get(data['id'])
return user
def start_app(app):
api = Api(app, api_spec_url = '/api/swagger', host = '{}:{}'.format(HOST,PORT), schemes = [ "http" ] )
item = Item(name='test',email='em@il')
user = User(username='admin')
user.hash_password('<PASSWORD>')
api.expose_object(Item)
api.expose_object(User)
# Set the JSON encoder used for object to json marshalling
app.json_encoder = SAFRSJSONEncoder
# Register the API at /api/docs
swaggerui_blueprint = get_swaggerui_blueprint('/api', '/api/swagger.json')
app.register_blueprint(swaggerui_blueprint, url_prefix='/api')
print('Starting API: http://{}:{}/api'.format(HOST,PORT))
app.run(host=HOST, port = PORT)
#
# APP Initialization
#
app = Flask('demo_app')
app.config.update( SQLALCHEMY_DATABASE_URI = 'sqlite://',
SQLALCHEMY_TRACK_MODIFICATIONS = False,
SECRET_KEY = b'<KEY>',
DEBUG = True)
HOST = sys.argv[1] if len(sys.argv) > 1 else '0.0.0.0'
PORT = 5000
db.init_app(app)
#
# Authentication and custom routes
#
@auth.verify_password
def verify_password(username_or_token, password):
user = User.verify_auth_token(username_or_token)
if not user:
# try to authenticate with username/password
user = User.query.filter_by(username=username_or_token).first()
if not user or not user.verify_password(password):
return False
print('Authentication Successful for "{}"'.format(user.username))
return True
@app.route('/')
def goto_api():
return redirect('/api')
@app.teardown_appcontext
def shutdown_session(exception=None):
'''cfr. http://flask.pocoo.org/docs/0.12/patterns/sqlalchemy/'''
db.session.remove()
# Start the application
with app.app_context():
db.create_all()
start_app(app)
```
#### File: safrs/examples/demo_json.py
```python
import sys
from flask import Flask, redirect
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, String
from safrs.db import SAFRSBase, documented_api_method
from safrs.jsonapi import SAFRSRestAPI, SAFRSJSONEncoder, Api
from flask_swagger_ui import get_swaggerui_blueprint
from flask_marshmallow import Marshmallow
from safrs.safrs_types import JSONType
app = Flask('demo_app')
app.config.update( SQLALCHEMY_DATABASE_URI = 'sqlite://',
SQLALCHEMY_TRACK_MODIFICATIONS = False,
DEBUG = True)
db = SQLAlchemy(app)
# Example sqla database object
class User(SAFRSBase, db.Model):
'''
description: User description
'''
__tablename__ = 'users'
id = Column(String, primary_key=True)
name = Column(String, default = '')
email = Column(String, default = '')
json = Column(JSONType, default = {} )
# Following method is exposed through the REST API
# This means it can be invoked with a HTTP POST
@documented_api_method
def send_mail(self, email):
'''
description : Send an email
args:
email:
type : string
example : test email
'''
content = 'Mail to {} : {}\n'.format(self.name, email)
with open('/tmp/mail.txt', 'a+') as mailfile :
mailfile.write(content)
return { 'result' : 'sent {}'.format(content)}
def create_api(app):
api = Api(app, api_spec_url = '/api/swagger', host = '{}:{}'.format(HOST,PORT), schemes = [ "http" ] )
# Expose the User object
api.expose_object(User)
user = User(name='test',email='<EMAIL>', json = { 'test' : 'data' } )
# Set the JSON encoder used for object to json marshalling
app.json_encoder = SAFRSJSONEncoder
# Register the API at /api/docs
swaggerui_blueprint = get_swaggerui_blueprint('/api', '/api/swagger.json')
app.register_blueprint(swaggerui_blueprint, url_prefix='/api')
print('Starting API: http://{}:{}/api'.format(HOST,PORT))
app.run(host=HOST, port = PORT)
@app.route('/')
def goto_api():
return redirect('/api')
@app.teardown_appcontext
def shutdown_session(exception=None):
'''cfr. http://flask.pocoo.org/docs/0.12/patterns/sqlalchemy/'''
db.session.remove()
# Start the application
HOST = sys.argv[1] if len(sys.argv) > 1 else '0.0.0.0'
PORT = 5000
db.init_app(app)
# Create the database
db.create_all()
# bind marshmallow
ma = Marshmallow(app)
ma.init_app(app)
with app.app_context():
create_api(app)
```
#### File: safrs/examples/demo_pythonanywhere_com.py
```python
import sys
from flask import Flask, render_template, Flask, redirect, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
from flask_cors import CORS
from flask_admin import Admin, BaseView
from flask_admin.contrib import sqla
from safrs import SAFRSBase, jsonapi_rpc, SAFRSJSONEncoder, Api, SAFRS
from safrs import search, startswith
app = Flask('SAFRS Demo App', template_folder='/home/thomaxxl/mysite/templates')
app.secret_key ='not so secret'
CORS( app,
origins="*",
allow_headers=[ "Content-Type", "Authorization", "Access-Control-Allow-Credentials"],
supports_credentials = True)
app.config.update( SQLALCHEMY_DATABASE_URI = 'sqlite://',
DEBUG = True ) # DEBUG will also show safrs log messages + exception messages
db = SQLAlchemy(app)
prefix = '/api'
SAFRS(app, db, prefix = prefix)
# Add search and startswith methods so we can perform lookups from the frontend
SAFRSBase.search = search
SAFRSBase.startswith = startswith
# Needed because we don't want to implicitly commit when using flask-admin
SAFRSBase.db_commit = False
class Book(SAFRSBase, db.Model):
'''
description: Book description
'''
__tablename__ = 'Books'
id = db.Column(db.String, primary_key=True)
title = db.Column(db.String, default = '')
reader_id = db.Column(db.String, db.ForeignKey('People.id'))
author_id = db.Column(db.String, db.ForeignKey('People.id'))
publisher_id = db.Column(db.String, db.ForeignKey('Publishers.id'))
publisher = db.relationship('Publisher', back_populates='books')
reviews = db.relationship('Review', backref="book", cascade="save-update, merge, delete, delete-orphan")
class Person(SAFRSBase, db.Model):
'''
description: People description
'''
__tablename__ = 'People'
id = db.Column(db.String, primary_key=True)
name = db.Column(db.String, default = '')
email = db.Column(db.String, default = '')
comment = db.Column(db.Text, default = '')
books_read = db.relationship('Book', backref = "reader", foreign_keys = [Book.reader_id], cascade="save-update, merge, delete, delete-orphan")
books_written = db.relationship('Book', backref = "author", foreign_keys = [Book.author_id])
reviews = db.relationship('Review', backref = "reader")
# Following method is exposed through the REST API
# This means it can be invoked with a HTTP POST
@classmethod
@jsonapi_rpc(http_methods = ['POST'])
def send_mail(self, email):
'''
description : Send an email
args:
email:
type : string
example : test email
'''
content = 'Mail to {} : {}\n'.format(self.name, email)
with open('/tmp/mail.txt', 'a+') as mailfile :
mailfile.write(content)
return { 'result' : 'sent {}'.format(content)}
class Publisher(SAFRSBase, db.Model):
'''
description: Publisher description
'''
__tablename__ = 'Publishers'
id = db.Column(db.Integer, primary_key=True) # Integer pk instead of str
name = db.Column(db.String, default = '')
books = db.relationship('Book', back_populates = "publisher")
class Review(SAFRSBase, db.Model):
'''
description: Review description
'''
__tablename__ = 'Reviews'
reader_id = db.Column(db.String, db.ForeignKey('People.id',ondelete="CASCADE"), primary_key=True)
book_id = db.Column(db.String, db.ForeignKey('Books.id'), primary_key=True)
review = db.Column(db.String, default = '')
def start_api(HOST = '0.0.0.0' ,PORT = None):
db.create_all()
with app.app_context():
# populate the database
for i in range(300):
reader = Person(name='Reader '+str(i), email="reader_email"+str(i) )
author = Person(name='Author '+str(i), email="author_email"+str(i) )
book = Book(title='book_title' + str(i))
review = Review(reader_id=reader.id, book_id=book.id, review='review ' + str(i))
publisher = Publisher(name = 'name' + str(i))
publisher.books.append(book)
reader.books_read.append(book)
author.books_written.append(book)
db.session.add(reader)
db.session.add(author)
db.session.add(book)
db.session.add(publisher)
db.session.add(review)
db.session.commit()
swagger_host = HOST
if PORT and PORT != 80:
swagger_host += ':{}'.format(PORT)
api = Api(app, api_spec_url = '/api/swagger', host = swagger_host, schemes = [ "http", "https" ], description = description )
# Flask-Admin Config
admin = Admin(app, url='/admin')
for model in [ Person, Book, Review, Publisher] :
# add the flask-admin view
admin.add_view(sqla.ModelView(model, db.session))
# Create an API endpoint
api.expose_object(model)
@app.route('/ja')
@app.route('/ja/<path:path>', endpoint="jsonapi_admin")
def send_ja(path='index.html'):
return send_from_directory('/home/thomaxxl/mysite/jsonapi-admin/build', path)
@app.route('/')
def goto_api():
return redirect(prefix)
description = '''<a href=http://jsonapi.org>Json-API</a> compliant API built with https://github.com/thomaxxl/safrs <br/>
- <a href="https://github.com/thomaxxl/safrs/blob/master/examples/demo_pythonanywhere_com.py">Source code of this page</a> (only 150 lines!)<br/>
- <a href="http://thomaxxl.pythonanywhere.com/ja/index.html">reactjs+redux frontend</a>
- <a href="/admin/person">Flask-Admin frontend</a>
- Auto-generated swagger spec: <a href=/api/swagger.json>swagger.json</a><br/>
- Petstore <a href=http://petstore.swagger.io/?url=http://thomaxxl.pythonanywhere.com/api/swagger.json>Swagger2 UI</a><br/>
'''
if __name__ == '__main__':
HOST = sys.argv[1] if len(sys.argv) > 1 else 'thomaxxl.pythonanywhere.com'
PORT = int(sys.argv[2]) if len(sys.argv) > 2 else 5000
start_api(HOST,PORT)
app.run(host=HOST, port=PORT)
```
#### File: examples/expose_existing/mysql.py
```python
from sqlalchemy import BIGINT, CHAR, Column, DateTime, Enum, Float, INTEGER, LargeBinary, SMALLINT, String, TEXT, TIME, TIMESTAMP, Table, Text, text
from sqlalchemy.dialects.mysql.enumerated import ENUM, SET
from sqlalchemy.dialects.mysql.types import LONGBLOB, MEDIUMBLOB, MEDIUMTEXT, TINYINT
from sqlalchemy.ext.declarative import declarative_base
########################################################################################################################
# Manually Added for safrs, TODO: improve this crap
#
Base = db.Model
metadata = Base.metadata
def BIGINT(_):
return db.SMALLINT
def SMALLINT(_):
return db.SMALLINT
def INTEGER(_):
return db.INTEGER
def TIME(**kwargs):
return db.TIME
TIMESTAMP= db.TIMESTAMP
NullType = db.String
########################################################################################################################
class ColumnsPriv(SAFRSBase, Base):
__tablename__ = 'columns_priv'
Host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Db = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
User = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Table_name = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Column_name = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Timestamp = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
Column_priv = Column(SET, nullable=False, server_default=text("''"))
class Db(SAFRSBase, Base):
__tablename__ = 'db'
Host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Db = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
User = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, index=True, server_default=text("''"))
Select_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Insert_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Update_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Delete_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Drop_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Grant_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
References_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Index_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Alter_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_tmp_table_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Lock_tables_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_view_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Show_view_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_routine_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Alter_routine_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Execute_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Event_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Trigger_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
class EngineCost(SAFRSBase, Base):
__tablename__ = 'engine_cost'
engine_name = Column(String(64), primary_key=True, nullable=False)
device_type = Column(INTEGER(11), primary_key=True, nullable=False)
cost_name = Column(String(64), primary_key=True, nullable=False)
cost_value = Column(Float)
last_update = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
comment = Column(String(1024))
class Event(SAFRSBase, Base):
__tablename__ = 'event'
db = Column(CHAR(64), primary_key=True, nullable=False, server_default=text("''"))
name = Column(CHAR(64), primary_key=True, nullable=False, server_default=text("''"))
body = Column(LONGBLOB, nullable=False)
definer = Column(CHAR(93), nullable=False, server_default=text("''"))
execute_at = Column(DateTime)
interval_value = Column(INTEGER(11))
interval_field = Column(Enum('YEAR', 'QUARTER', 'MONTH', 'DAY', 'HOUR', 'MINUTE', 'WEEK', 'SECOND', 'MICROSECOND', 'YEAR_MONTH', 'DAY_HOUR', 'DAY_MINUTE', 'DAY_SECOND', 'HOUR_MINUTE', 'HOUR_SECOND', 'MINUTE_SECOND', 'DAY_MICROSECOND', 'HOUR_MICROSECOND', 'MINUTE_MICROSECOND', 'SECOND_MICROSECOND'))
created = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
modified = Column(TIMESTAMP, nullable=False, server_default=text("'0000-00-00 00:00:00'"))
last_executed = Column(DateTime)
starts = Column(DateTime)
ends = Column(DateTime)
status = Column(Enum('ENABLED', 'DISABLED', 'SLAVESIDE_DISABLED'), nullable=False, server_default=text("'ENABLED'"))
on_completion = Column(Enum('DROP', 'PRESERVE'), nullable=False, server_default=text("'DROP'"))
sql_mode = Column(SET, nullable=False, server_default=text("''"))
comment = Column(CHAR(64), nullable=False, server_default=text("''"))
originator = Column(INTEGER(10), nullable=False)
time_zone = Column(CHAR(64), nullable=False, server_default=text("'SYSTEM'"))
character_set_client = Column(CHAR(32))
collation_connection = Column(CHAR(32))
db_collation = Column(CHAR(32))
body_utf8 = Column(LONGBLOB)
class Func(SAFRSBase, Base):
__tablename__ = 'func'
name = Column(CHAR(64, 'utf8_bin'), primary_key=True, server_default=text("''"))
ret = Column(TINYINT(1), nullable=False, server_default=text("'0'"))
dl = Column(CHAR(128, 'utf8_bin'), nullable=False, server_default=text("''"))
type = Column(ENUM('function', 'aggregate'), nullable=False)
t_general_log = Table(
'general_log', metadata,
#Column('event_time', TIMESTAMP(fsp=6), nullable=False, server_default=text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)")),
#
#MANUAL EDIT:
Column('event_time', TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)")),
Column('user_host', MEDIUMTEXT, nullable=False),
Column('thread_id', BIGINT(21), nullable=False),
Column('server_id', INTEGER(10), nullable=False),
Column('command_type', String(64), nullable=False),
Column('argument', MEDIUMBLOB, nullable=False)
)
class GtidExecuted(SAFRSBase, Base):
__tablename__ = 'gtid_executed'
source_uuid = Column(CHAR(36), primary_key=True, nullable=False)
interval_start = Column(BIGINT(20), primary_key=True, nullable=False)
interval_end = Column(BIGINT(20), nullable=False)
class HelpCategory(SAFRSBase, Base):
__tablename__ = 'help_category'
help_category_id = Column(SMALLINT(5), primary_key=True)
name = Column(CHAR(64), nullable=False, unique=True)
parent_category_id = Column(SMALLINT(5))
url = Column(Text, nullable=False)
class HelpKeyword(SAFRSBase, Base):
__tablename__ = 'help_keyword'
help_keyword_id = Column(INTEGER(10), primary_key=True)
name = Column(CHAR(64), nullable=False, unique=True)
class HelpRelation(SAFRSBase, Base):
__tablename__ = 'help_relation'
help_topic_id = Column(INTEGER(10), primary_key=True, nullable=False)
help_keyword_id = Column(INTEGER(10), primary_key=True, nullable=False)
class HelpTopic(SAFRSBase, Base):
__tablename__ = 'help_topic'
help_topic_id = Column(INTEGER(10), primary_key=True)
name = Column(CHAR(64), nullable=False, unique=True)
help_category_id = Column(SMALLINT(5), nullable=False)
description = Column(Text, nullable=False)
example = Column(Text, nullable=False)
url = Column(Text, nullable=False)
class InnodbIndexStat(SAFRSBase, Base):
__tablename__ = 'innodb_index_stats'
database_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
table_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
index_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
last_update = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
stat_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
stat_value = Column(BIGINT(20), nullable=False)
sample_size = Column(BIGINT(20))
stat_description = Column(String(1024, 'utf8_bin'), nullable=False)
class InnodbTableStat(SAFRSBase, Base):
__tablename__ = 'innodb_table_stats'
database_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
table_name = Column(String(64, 'utf8_bin'), primary_key=True, nullable=False)
last_update = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
n_rows = Column(BIGINT(20), nullable=False)
clustered_index_size = Column(BIGINT(20), nullable=False)
sum_of_other_index_sizes = Column(BIGINT(20), nullable=False)
class NdbBinlogIndex(SAFRSBase, Base):
__tablename__ = 'ndb_binlog_index'
Position = Column(BIGINT(20), nullable=False)
File = Column(String(255), nullable=False)
epoch = Column(BIGINT(20), primary_key=True, nullable=False)
inserts = Column(INTEGER(10), nullable=False)
updates = Column(INTEGER(10), nullable=False)
deletes = Column(INTEGER(10), nullable=False)
schemaops = Column(INTEGER(10), nullable=False)
orig_server_id = Column(INTEGER(10), primary_key=True, nullable=False)
orig_epoch = Column(BIGINT(20), primary_key=True, nullable=False)
gci = Column(INTEGER(10), nullable=False)
next_position = Column(BIGINT(20), nullable=False)
next_file = Column(String(255), nullable=False)
class Plugin(SAFRSBase, Base):
__tablename__ = 'plugin'
name = Column(String(64), primary_key=True, server_default=text("''"))
dl = Column(String(128), nullable=False, server_default=text("''"))
class Proc(SAFRSBase, Base):
__tablename__ = 'proc'
db = Column(CHAR(64), primary_key=True, nullable=False, server_default=text("''"))
name = Column(CHAR(64), primary_key=True, nullable=False, server_default=text("''"))
type = Column(Enum('FUNCTION', 'PROCEDURE'), primary_key=True, nullable=False)
specific_name = Column(CHAR(64), nullable=False, server_default=text("''"))
language = Column(Enum('SQL'), nullable=False, server_default=text("'SQL'"))
sql_data_access = Column(Enum('CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA'), nullable=False, server_default=text("'CONTAINS_SQL'"))
is_deterministic = Column(Enum('YES', 'NO'), nullable=False, server_default=text("'NO'"))
security_type = Column(Enum('INVOKER', 'DEFINER'), nullable=False, server_default=text("'DEFINER'"))
param_list = Column(LargeBinary, nullable=False)
returns = Column(LONGBLOB, nullable=False)
body = Column(LONGBLOB, nullable=False)
definer = Column(CHAR(93), nullable=False, server_default=text("''"))
created = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
modified = Column(TIMESTAMP, nullable=False, server_default=text("'0000-00-00 00:00:00'"))
sql_mode = Column(SET, nullable=False, server_default=text("''"))
comment = Column(TEXT, nullable=False)
character_set_client = Column(CHAR(32))
collation_connection = Column(CHAR(32))
db_collation = Column(CHAR(32))
body_utf8 = Column(LONGBLOB)
class ProcsPriv(SAFRSBase, Base):
__tablename__ = 'procs_priv'
Host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Db = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
User = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Routine_name = Column(CHAR(64), primary_key=True, nullable=False, server_default=text("''"))
Routine_type = Column(ENUM('FUNCTION', 'PROCEDURE'), primary_key=True, nullable=False)
Grantor = Column(CHAR(93, 'utf8_bin'), nullable=False, index=True, server_default=text("''"))
Proc_priv = Column(SET, nullable=False, server_default=text("''"))
Timestamp = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class ProxiesPriv(SAFRSBase, Base):
__tablename__ = 'proxies_priv'
Host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
User = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Proxied_host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Proxied_user = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
With_grant = Column(TINYINT(1), nullable=False, server_default=text("'0'"))
Grantor = Column(CHAR(93, 'utf8_bin'), nullable=False, index=True, server_default=text("''"))
Timestamp = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
class ServerCost(SAFRSBase, Base):
__tablename__ = 'server_cost'
cost_name = Column(String(64), primary_key=True)
cost_value = Column(Float)
last_update = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
comment = Column(String(1024))
class Server(SAFRSBase, Base):
__tablename__ = 'servers'
Server_name = Column(CHAR(64), primary_key=True, server_default=text("''"))
Host = Column(CHAR(64), nullable=False, server_default=text("''"))
Db = Column(CHAR(64), nullable=False, server_default=text("''"))
Username = Column(CHAR(64), nullable=False, server_default=text("''"))
Password = Column(CHAR(64), nullable=False, server_default=text("''"))
Port = Column(INTEGER(4), nullable=False, server_default=text("'0'"))
Socket = Column(CHAR(64), nullable=False, server_default=text("''"))
Wrapper = Column(CHAR(64), nullable=False, server_default=text("''"))
Owner = Column(CHAR(64), nullable=False, server_default=text("''"))
class SlaveMasterInfo(SAFRSBase, Base):
__tablename__ = 'slave_master_info'
Number_of_lines = Column(INTEGER(10), nullable=False)
Master_log_name = Column(TEXT, nullable=False)
Master_log_pos = Column(BIGINT(20), nullable=False)
Host = Column(CHAR(64))
User_name = Column(TEXT)
User_password = Column(TEXT)
Port = Column(INTEGER(10), nullable=False)
Connect_retry = Column(INTEGER(10), nullable=False)
Enabled_ssl = Column(TINYINT(1), nullable=False)
Ssl_ca = Column(TEXT)
Ssl_capath = Column(TEXT)
Ssl_cert = Column(TEXT)
Ssl_cipher = Column(TEXT)
Ssl_key = Column(TEXT)
Ssl_verify_server_cert = Column(TINYINT(1), nullable=False)
Heartbeat = Column(Float, nullable=False)
Bind = Column(TEXT)
Ignored_server_ids = Column(TEXT)
Uuid = Column(TEXT)
Retry_count = Column(BIGINT(20), nullable=False)
Ssl_crl = Column(TEXT)
Ssl_crlpath = Column(TEXT)
Enabled_auto_position = Column(TINYINT(1), nullable=False)
Channel_name = Column(CHAR(64), primary_key=True)
Tls_version = Column(TEXT)
class SlaveRelayLogInfo(SAFRSBase, Base):
__tablename__ = 'slave_relay_log_info'
Number_of_lines = Column(INTEGER(10), nullable=False)
Relay_log_name = Column(TEXT, nullable=False)
Relay_log_pos = Column(BIGINT(20), nullable=False)
Master_log_name = Column(TEXT, nullable=False)
Master_log_pos = Column(BIGINT(20), nullable=False)
Sql_delay = Column(INTEGER(11), nullable=False)
Number_of_workers = Column(INTEGER(10), nullable=False)
Id = Column(INTEGER(10), nullable=False)
Channel_name = Column(CHAR(64), primary_key=True)
class SlaveWorkerInfo(SAFRSBase, Base):
__tablename__ = 'slave_worker_info'
Id = Column(INTEGER(10), primary_key=True, nullable=False)
Relay_log_name = Column(TEXT, nullable=False)
Relay_log_pos = Column(BIGINT(20), nullable=False)
Master_log_name = Column(TEXT, nullable=False)
Master_log_pos = Column(BIGINT(20), nullable=False)
Checkpoint_relay_log_name = Column(TEXT, nullable=False)
Checkpoint_relay_log_pos = Column(BIGINT(20), nullable=False)
Checkpoint_master_log_name = Column(TEXT, nullable=False)
Checkpoint_master_log_pos = Column(BIGINT(20), nullable=False)
Checkpoint_seqno = Column(INTEGER(10), nullable=False)
Checkpoint_group_size = Column(INTEGER(10), nullable=False)
Checkpoint_group_bitmap = Column(LargeBinary, nullable=False)
Channel_name = Column(CHAR(64), primary_key=True, nullable=False)
t_slow_log = Table(
'slow_log', metadata,
#Column('start_time', TIMESTAMP(fsp=6), nullable=False, server_default=text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)")),
#
#Manual Edit:
Column('start_time', TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6)")),
Column('user_host', MEDIUMTEXT, nullable=False),
Column('query_time', TIME(fsp=6), nullable=False),
Column('lock_time', TIME(fsp=6), nullable=False),
Column('rows_sent', INTEGER(11), nullable=False),
Column('rows_examined', INTEGER(11), nullable=False),
Column('db', String(512), nullable=False),
Column('last_insert_id', INTEGER(11), nullable=False),
Column('insert_id', INTEGER(11), nullable=False),
Column('server_id', INTEGER(10), nullable=False),
Column('sql_text', MEDIUMBLOB, nullable=False),
Column('thread_id', BIGINT(21), nullable=False)
)
class TablesPriv(SAFRSBase, Base):
__tablename__ = 'tables_priv'
Host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Db = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
User = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Table_name = Column(CHAR(64, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Grantor = Column(CHAR(93, 'utf8_bin'), nullable=False, index=True, server_default=text("''"))
Timestamp = Column(TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"))
Table_priv = Column(SET, nullable=False, server_default=text("''"))
Column_priv = Column(SET, nullable=False, server_default=text("''"))
class TimeZone(SAFRSBase, Base):
__tablename__ = 'time_zone'
Time_zone_id = Column(INTEGER(10), primary_key=True)
Use_leap_seconds = Column(Enum('Y', 'N'), nullable=False, server_default=text("'N'"))
class TimeZoneLeapSecond(SAFRSBase, Base):
__tablename__ = 'time_zone_leap_second'
Transition_time = Column(BIGINT(20), primary_key=True)
Correction = Column(INTEGER(11), nullable=False)
class TimeZoneName(SAFRSBase, Base):
__tablename__ = 'time_zone_name'
Name = Column(CHAR(64), primary_key=True)
Time_zone_id = Column(INTEGER(10), nullable=False)
class TimeZoneTransition(SAFRSBase, Base):
__tablename__ = 'time_zone_transition'
Time_zone_id = Column(INTEGER(10), primary_key=True, nullable=False)
Transition_time = Column(BIGINT(20), primary_key=True, nullable=False)
Transition_type_id = Column(INTEGER(10), nullable=False)
class TimeZoneTransitionType(SAFRSBase, Base):
__tablename__ = 'time_zone_transition_type'
Time_zone_id = Column(INTEGER(10), primary_key=True, nullable=False)
Transition_type_id = Column(INTEGER(10), primary_key=True, nullable=False)
Offset = Column(INTEGER(11), nullable=False, server_default=text("'0'"))
Is_DST = Column(TINYINT(3), nullable=False, server_default=text("'0'"))
Abbreviation = Column(CHAR(8), nullable=False, server_default=text("''"))
class User(SAFRSBase, Base):
__tablename__ = 'user'
Host = Column(CHAR(60, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
User = Column(CHAR(32, 'utf8_bin'), primary_key=True, nullable=False, server_default=text("''"))
Select_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Insert_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Update_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Delete_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Drop_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Reload_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Shutdown_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Process_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
File_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Grant_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
References_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Index_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Alter_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Show_db_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Super_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_tmp_table_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Lock_tables_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Execute_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Repl_slave_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Repl_client_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_view_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Show_view_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_routine_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Alter_routine_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_user_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Event_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Trigger_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
Create_tablespace_priv = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
ssl_type = Column(ENUM('', 'ANY', 'X509', 'SPECIFIED'), nullable=False, server_default=text("''"))
ssl_cipher = Column(LargeBinary, nullable=False)
x509_issuer = Column(LargeBinary, nullable=False)
x509_subject = Column(LargeBinary, nullable=False)
max_questions = Column(INTEGER(11), nullable=False, server_default=text("'0'"))
max_updates = Column(INTEGER(11), nullable=False, server_default=text("'0'"))
max_connections = Column(INTEGER(11), nullable=False, server_default=text("'0'"))
max_user_connections = Column(INTEGER(11), nullable=False, server_default=text("'0'"))
plugin = Column(CHAR(64, 'utf8_bin'), nullable=False, server_default=text("'mysql_native_password'"))
authentication_string = Column(Text(collation='utf8_bin'))
password_expired = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
password_last_changed = Column(TIMESTAMP)
password_lifetime = Column(SMALLINT(5))
account_locked = Column(ENUM('N', 'Y'), nullable=False, server_default=text("'N'"))
```
#### File: safrs/expose_existing/expose_existing.py
```python
import sys, logging, inspect, builtins, os, argparse, tempfile, atexit, shutil, io
from sqlalchemy import CHAR, Column, DateTime, Float, ForeignKey, Index, Integer, String, TIMESTAMP, Table, Text, UniqueConstraint, text
from sqlalchemy.sql.sqltypes import NullType
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, redirect
from flask_swagger_ui import get_swaggerui_blueprint
from safrs import SAFRSBase, jsonapi_rpc, SAFRSJSONEncoder, Api
from safrs import search, SAFRS
from io import StringIO
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import MetaData
from flask_cors import CORS
MODEL_DIR=tempfile.mkdtemp() # directory where the generated models.py will be saved
sqlacodegen_dir = os.path.join(os.path.dirname(__file__), 'sqlacodegen')
if not os.path.isdir(sqlacodegen_dir):
print('sqlacodegen not found')
sys.path.insert(0,MODEL_DIR)
sys.path.insert(0,sqlacodegen_dir)
from sqlacodegen.codegen import CodeGenerator
def get_args():
parser = argparse.ArgumentParser(
description='Generates SQLAlchemy model code from an existing database.')
parser.add_argument('url', nargs='?', help='SQLAlchemy url to the database')
parser.add_argument('--version', action='store_true', help="print the version number and exit")
parser.add_argument('--host', default = '0.0.0.0', help="host (interface ip) to run")
parser.add_argument('--port', default = 5000, type=int, help="host (interface ip) to run")
parser.add_argument('--models', default=None, help="Load models from file instead of generating them dynamically")
parser.add_argument('--schema', help='load tables from an alternate schema')
parser.add_argument('--tables', help='tables to process (comma-separated, default: all)')
parser.add_argument('--noviews', action='store_true', help="ignore views")
parser.add_argument('--noindexes', action='store_true', help='ignore indexes')
parser.add_argument('--noconstraints', action='store_true', help='ignore constraints')
parser.add_argument('--nojoined', action='store_true',
help="don't autodetect joined table inheritance")
parser.add_argument('--noinflect', action='store_true',
help="don't try to convert tables names to singular form")
parser.add_argument('--noclasses', action='store_true',
help="don't generate classes, only tables")
parser.add_argument('--outfile', help='file to write output to (default: stdout)')
args = parser.parse_args()
if args.version:
version = pkg_resources.get_distribution('sqlacodegen').parsed_version
print(version.public)
exit()
if not args.url:
print('You must supply a url\n', file=sys.stderr)
parser.print_help()
exit(1)
return args
def fix_generated(code):
if db.session.bind.dialect.name == 'sqlite':
code = code.replace('Numeric', 'String')
return code
def codegen(args):
# Use reflection to fill in the metadata
engine = create_engine(args.url)
metadata = MetaData(engine)
tables = args.tables.split(',') if args.tables else None
metadata.reflect(engine, args.schema, not args.noviews, tables)
if db.session.bind.dialect.name == 'sqlite':
# dirty hack for sqlite
engine.execute('''PRAGMA journal_mode = OFF''')
# Write the generated model code to the specified file or standard output
capture = StringIO()
#outfile = io.open(args.outfile, 'w', encoding='utf-8') if args.outfile else capture # sys.stdout
generator = CodeGenerator(metadata, args.noindexes, args.noconstraints, args.nojoined,
args.noinflect, args.noclasses)
generator.render(capture)
generated = capture.getvalue()
generated = fix_generated(generated)
if args.outfile:
outfile = io.open(args.outfile, 'w', encoding='utf-8')
outfile.write(generated)
return generated
args = get_args()
app = Flask('DB App')
CORS(app, origins= ["*"])
app.config.update( SQLALCHEMY_DATABASE_URI = args.url,
DEBUG = True)
SAFRS(app)
app.url_map.strict_slashes = False
SAFRSBase.db_commit = False
builtins.db = SQLAlchemy(app) # set db as a global variable to be used in employees.py
models = codegen(args)
#
# Write the models to file, we could try to exec() but this makes our code more complicated
# Also, we can modify models.py in case things go awry
#
if args.models:
model_dir = os.path.basename(args.models)
sys.path.insert(0,model_dir)
else:
with open(os.path.join(MODEL_DIR, 'models.py'),'w+') as models_f:
models_f.write(models)
#atexit.register(lambda : shutil.rmtree(MODEL_DIR))
import models
def start_api(HOST = '0.0.0.0', PORT = 80):
with app.app_context():
api = Api(app, api_spec_url = '/api/swagger', host = '{}:{}'.format(HOST,PORT), schemes = [ "http" ], description = '' )
for name, model in inspect.getmembers(models):
bases = getattr(model, '__bases__', [] )
if SAFRSBase in bases:
# Create an API endpoint
# Add search method so we can perform lookups from the frontend
model.search = search
api.expose_object(model)
# Set the JSON encoder used for object to json marshalling
#app.json_encoder = SAFRSJSONEncoder
# Register the API at /api
#swaggerui_blueprint = get_swaggerui_blueprint('/api', '/api/swagger.json')
#app.register_blueprint(swaggerui_blueprint, url_prefix='/api')
@app.route('/')
def goto_api():
return redirect('/api')
if __name__ == '__main__':
HOST = args.host
PORT = args.port
start_api(HOST,PORT)
print('API URL: http://{}:{}/api , model dir: {}'.format(HOST,PORT,MODEL_DIR))
app.run(host=HOST, port=PORT)
```
#### File: tests/relationship/test_relationship.py
```python
import jsonapi_requests
api = jsonapi_requests.orm.OrmApi.config({
'API_ROOT': 'http://127.0.0.1:5000/',
'AUTH': ('basic_auth_login', 'basic_auth_password'),
'VALIDATE_SSL': False,
'TIMEOUT': 1,
})
class User(jsonapi_requests.orm.ApiModel):
'''
__tablename__ = 'Users'
id = db.Column(db.String, primary_key=True)
name = db.Column(db.String, default='')
email = db.Column(db.String, default='')
books = db.relationship('Book', back_populates="user", lazy='dynamic')
'''
class Meta:
type = 'Users'
api = api
name = jsonapi_requests.orm.AttributeField('name')
email = jsonapi_requests.orm.AttributeField('email')
books = jsonapi_requests.orm.RelationField('books')
class Book(jsonapi_requests.orm.ApiModel):
'''
__tablename__ = 'Books'
id = db.Column(db.String, primary_key=True)
name = db.Column(db.String, default='')
user_id = db.Column(db.String, db.ForeignKey('Users.id'))
user = db.relationship('User', back_populates='books')
'''
class Meta:
type = 'Books'
api = api
name = jsonapi_requests.orm.AttributeField('name')
user_id = jsonapi_requests.orm.AttributeField('user_id')
user = jsonapi_requests.orm.RelationField('user')
def test_get():
endpoint = api.endpoint('Users')
response = endpoint.get(params={'include' : 'books'})
for user in response.data:
print(user.id, user.attributes['name'])
user = User.from_id(user.id)
endpoint = api.endpoint('Books')
response = endpoint.get(params={'include' : 'user'})
for book in response.data:
print(book.id, book.attributes['name'], book.attributes['user_id'])
print(book.relationships)
book = Book.from_id(book.id,params={'include' : 'user'})
print(user.books)
print(book.user)
assert book.attributes['user_id'] == user.id
if __name__ == '__main__':
test_get()
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.