metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jkha-unist/for_test",
"score": 2
} |
#### File: src/mqc/ct.py
```python
from __future__ import division
from build.el_propagator_ct import el_run
from mqc.mqc import MQC
from misc import eps, au_to_K, au_to_A, call_name, typewriter, gaussian1d
import os, shutil, textwrap
import numpy as np
import pickle
class CT(MQC):
""" Class for coupled-trajectory mixed quantum-classical (CTMQC) dynamics
:param object,list molecules: List for molecule objects
:param object thermostat: Thermostat object
:param integer,list istates: List for initial state
:param double dt: Time interval
:param integer nsteps: Total step of nuclear propagation
:param integer nesteps: Total step of electronic propagation
:param string elec_object: Electronic equation of motions
:param string propagator: Electronic propagator
:param boolean l_print_dm: Logical to print BO population and coherence
:param boolean l_adj_nac: Adjust nonadiabatic coupling to align the phases
:param double rho_threshold: Electronic density threshold for decoherence term calculation
:param double sigma_threshold: Sigma threshold for quantum momentum calculation
:param double dist_cutoff: Distance cutoff for quantum momentum calculation
:param double dist_parameter: Distance parameter to determine quantum momentum center
:param double sigma: Sigma to determine quantum momentum center
:param init_coefs: Initial BO coefficient
:type init_coefs: double, list, list or complex, list, list
:param integer out_freq: Frequency of printing output
:param integer verbosity: Verbosity of output
"""
def __init__(self, molecules, thermostat=None, istates=None, dt=0.5, nsteps=1000, nesteps=20, \
elec_object="coefficient", propagator="rk4", l_print_dm=True, l_adj_nac=True, \
rho_threshold=0.01, sigma_threshold=0.25, dist_cutoff=0.5, dist_parameter=10., sigma=0.3, \
init_coefs=None, unit_dt="fs", out_freq=1, verbosity=0):
# Save name of MQC dynamics
self.md_type = self.__class__.__name__
# Initialize input values
self.mols = molecules
self.ntrajs = len(self.mols)
self.digit = len(str(self.ntrajs))
self.nst = self.mols[0].nst
self.nat_qm = self.mols[0].nat_qm
self.ndim = self.mols[0].ndim
if (istates == None):
raise ValueError (f"( {self.md_type}.{call_name()} ) istates should be required! {istates}")
if (isinstance(istates, list)):
if (len(istates) != self.ntrajs):
raise ValueError (f"( {self.md_type}.{call_name()} ) The length of istates should be same to total number of trajectories! {istates}")
else:
if (max(istates) >= self.nst):
raise ValueError (f"( {self.md_type}.{call_name()} ) Index for initial state must be smaller than number of states! {max(istates)}")
else:
raise ValueError (f"( {self.md_type}.{call_name()} ) The type of istates should be list! {istates}")
if (init_coefs == None):
init_coefs = [None] * self.ntrajs
else:
if (len(init_coefs) != self.ntrajs):
raise ValueError (f"( {self.md_type}.{call_name()} ) The length of init_coefs should be same to total number of trajectories! {len(init_coefs)}")
# Initialize input values and coefficient for first trajectory
super().__init__(self.mols[0], thermostat, istates[0], dt, nsteps, nesteps, \
elec_object, propagator, l_print_dm, l_adj_nac, init_coefs[0], unit_dt, out_freq, verbosity)
if (self.elec_object != "coefficient"):
raise ValueError (f"( {self.md_type}.{call_name()} ) coefficient propagation is only valid! {self.elec_object}")
# Initialize coefficient for other trajectories
for itraj in range(1, self.ntrajs):
self.mols[itraj].get_coefficient(init_coefs[itraj], istates[itraj])
# Initialize variables for CTMQC
self.phase = np.zeros((self.ntrajs, self.nst, self.nat_qm, self.ndim))
self.nst_pair = int(self.nst * (self.nst - 1) / 2)
self.qmom = np.zeros((self.ntrajs, self.nst_pair, self.nat_qm, self.ndim))
self.K_lk = np.zeros((self.ntrajs, self.nst, self.nst))
# Initialize variables to calculate quantum momentum
self.count_ntrajs = np.zeros((self.ntrajs, self.nat_qm))
self.sigma_lk = np.ones((self.ntrajs, self.nst_pair, self.nat_qm, self.ndim))
self.slope_i = np.zeros((self.ntrajs, self.nat_qm, self.ndim))
self.center_lk = np.zeros((self.ntrajs, self.nst_pair, self.nat_qm, self.ndim))
# Determine parameters to calculate decoherenece effect
self.small = 1.0E-08
self.upper_th = 1. - rho_threshold
self.lower_th = rho_threshold
self.sigma_threshold = sigma_threshold
self.dist_cutoff = dist_cutoff
self.dist_parameter = dist_parameter
self.sigma = sigma
self.dotpopd = np.zeros(self.nst)
def run(self, qm, mm=None, output_dir="./", l_save_qm_log=False, l_save_mm_log=False, l_save_scr=True, restart=None):
""" Run MQC dynamics according to CTMQC dynamics
:param object qm: QM object containing on-the-fly calculation infomation
:param object mm: MM object containing MM calculation infomation
:param string output_dir: Name of directory where outputs to be saved.
:param boolean l_save_qm_log: Logical for saving QM calculation log
:param boolean l_save_mm_log: Logical for saving MM calculation log
:param boolean l_save_scr: Logical for saving scratch directory
:param string restart: Option for controlling dynamics restarting
"""
# Initialize PyUNIxMD
base_dirs, unixmd_dirs, qm_log_dirs, mm_log_dirs =\
self.run_init(qm, mm, output_dir, l_save_qm_log, l_save_mm_log, l_save_scr, restart)
bo_list = [ist for ist in range(self.nst)]
qm.calc_coupling = True
self.print_init(qm, mm, restart)
if (restart == None):
# Calculate initial input geometry for all trajectories at t = 0.0 s
self.istep = -1
for itraj in range(self.ntrajs):
self.mol = self.mols[itraj]
self.mol.reset_bo(qm.calc_coupling)
qm.get_data(self.mol, base_dirs[itraj], bo_list, self.dt, self.istep, calc_force_only=False)
# TODO: QM/MM
self.mol.get_nacme()
self.update_energy()
self.get_phase(itraj)
self.calculate_qmom(self.istep)
for itraj in range(self.ntrajs):
self.write_md_output(itraj, unixmd_dirs[itraj], self.istep)
self.print_traj(self.istep, itraj)
self.print_step(self.istep)
#TODO: restart
else:
raise ValueError (f"( {self.md_type}.{call_name()} ) restart is not valid in CTMQC ! {restart}")
self.istep += 1
# Main MD loop
for istep in range(self.istep, self.nsteps):
for itraj in range(self.ntrajs):
self.mol = self.mols[itraj]
self.calculate_force(itraj)
self.cl_update_position()
self.mol.backup_bo()
self.mol.reset_bo(qm.calc_coupling)
qm.get_data(self.mol, base_dirs[itraj], bo_list, self.dt, istep, calc_force_only=False)
#TODO: QM/MM
if (not self.mol.l_nacme and self.l_adj_nac):
self.mol.adjust_nac()
self.calculate_force(itraj)
self.cl_update_velocity()
self.mol.get_nacme()
el_run(self, itraj)
#TODO: thermostat
#if (self.thermo != None):
# self.thermo.run(self)
self.update_energy()
self.get_phase(itraj)
#TODO: restart
#self.fstep = istep
#restart_file = os.path.join(base_dir, "RESTART.bin")
#with open(restart_file, 'wb') as f:
# pickle.dump({'qm':qm, 'md':self}, f)
self.calculate_qmom(istep)
for itraj in range(self.ntrajs):
if ((istep + 1) % self.out_freq == 0):
self.write_md_output(itraj, unixmd_dirs[itraj], istep)
self.print_traj(istep, itraj)
if (istep == self.nsteps - 1):
self.write_final_xyz(unixmd_dirs[itraj], istep)
self.print_step(istep)
# Delete scratch directory
if (not l_save_scr):
for itraj in range(self.ntrajs):
tmp_dir = os.path.join(unixmd_dirs[itraj], "scr_qm")
if (os.path.exists(tmp_dir)):
shutil.rmtree(tmp_dir)
def calculate_force(self, itrajectory):
""" Routine to calculate force
:param integer itrajectory: Index for trajectories
"""
self.rforce = np.zeros((self.nat_qm, self.ndim))
# Derivatives of energy
for ist, istate in enumerate(self.mols[itrajectory].states):
self.rforce += istate.force * self.mol.rho.real[ist, ist]
# Non-adiabatic forces
for ist in range(self.nst):
for jst in range(ist + 1, self.nst):
self.rforce += 2. * self.mol.nac[ist, jst] * self.mol.rho.real[ist, jst] \
* (self.mol.states[ist].energy - self.mol.states[jst].energy)
# CT forces
ctforce = np.zeros((self.nat_qm, self.ndim))
for ist in range(self.nst):
for jst in range(self.nst):
ctforce += 0.5 * self.K_lk[itrajectory, ist, jst] * \
(self.phase[itrajectory, jst] - self.phase[itrajectory, ist]) * \
self.mol.rho.real[ist, ist] * self.mol.rho.real[jst, jst]
# Finally, force is Ehrenfest force + CT force
self.rforce += ctforce
def update_energy(self):
""" Routine to update the energy of molecules in CTMQC dynamics
"""
# Update kinetic energy
self.mol.update_kinetic()
self.mol.epot = 0.
for ist, istate in enumerate(self.mol.states):
self.mol.epot += self.mol.rho.real[ist, ist] * istate.energy
self.mol.etot = self.mol.epot + self.mol.ekin
def get_phase(self, itrajectory):
""" Routine to calculate phase
:param integer itrajectory: Index for trajectories
"""
for ist in range(self.nst):
rho = self.mol.rho[ist, ist].real
if (rho > self.upper_th or rho < self.lower_th):
self.phase[itrajectory, ist] = np.zeros((self.nat_qm, self.ndim))
else:
self.phase[itrajectory, ist] += self.mol.states[ist].force * self.dt
def calculate_qmom(self, istep):
""" Routine to calculate quantum momentum
:param integer istep: Current MD step
"""
# _lk means state_pair dependency.
# i and j are trajectory index.
# -------------------------------------------------------------------
# 1. Calculate variances for each trajectory
# TODO: method to calculate sigma
self.sigma_lk = np.ones((self.ntrajs, self.nst_pair, self.nat_qm, self.ndim)) # TODO: state-pair
for itraj in range(self.ntrajs):
# Variable to determine how many trajecories are in cutoff.
self.count_ntrajs[itraj] = np.zeros((self.nat_qm))
R2_tmp = np.zeros((self.nat_qm, self.ndim)) # Temporary variable for R**2
R_tmp = np.zeros((self.nat_qm, self.ndim)) # Temporary variable for R
for jtraj in range(self.ntrajs):
pos_diff = self.mols[jtraj].pos - self.mols[itraj].pos # Dimension = (self.nat_qm, self.ndim)
pos_diff2 = np.sum(pos_diff * pos_diff, axis=1) # Dimension = (self.nat_qm)
for iat in range(self.nat_qm):
distance = np.sqrt(pos_diff2[iat]) # Distance between i-th atom in itraj and jtraj
if (distance <= self.dist_cutoff):
R_tmp[iat] += self.mols[jtraj].pos[iat] # Dimension = (self.nat_qm, self.ndim)
R2_tmp[iat] += self.mols[jtraj].pos[iat] * self.mols[jtraj].pos[iat] # Dimension = (self.nat_qm, self.ndim)
self.count_ntrajs[itraj, iat] += 1
for iat in range(self.nat_qm):
avg_R = R_tmp[iat] / self.count_ntrajs[itraj, iat]
avg_R2 = R2_tmp[iat] / self.count_ntrajs[itraj, iat]
for idim in range(self.ndim):
self.sigma_lk[itraj, 0, iat, idim] = np.sqrt((avg_R2[idim] - avg_R[idim] ** 2)) \
/ np.sqrt(np.sqrt(self.count_ntrajs[itraj, iat])) # / np.sqrt(np.sqrt(count_ntrajs)) is artifact to modulate sigma.
if (self.sigma_lk[itraj, 0, iat, idim] <= self.sigma_threshold):
self.sigma_lk[itraj, 0, iat, idim] = self.dist_cutoff
# 2. Calculate slope
# (2-1) Calculate w_ij
# g_i means nuclear density at the position of i-th classical trajectory.
# prod_g_i is to multiply gaussians with respect to atoms and spaces.
g_i = np.zeros((self.ntrajs))
prod_g_i = np.ones((self.ntrajs, self.ntrajs))
for itraj in range(self.ntrajs):
for jtraj in range(self.ntrajs):
for iat in range(self.nat_qm):
for idim in range(self.ndim):
# gaussian1d(x, pre-factor, sigma, mean)
# gaussian1d(R^{itraj}, 1.0, sigma^{jtraj}, R^{jtraj})
prod_g_i[itraj, jtraj] *= gaussian1d(self.mols[itraj].pos[iat, idim], 1., \
self.sigma_lk[jtraj, 0, iat, idim], self.mols[jtraj].pos[iat, idim])
g_i[itraj] += prod_g_i[itraj, jtraj]
# w_ij is defined as W_IJ in SI of J. Phys. Chem. Lett., 2017, 8, 3048-3055.
w_ij = np.zeros((self.ntrajs, self.ntrajs, self.nat_qm, self.ndim))
for itraj in range(self.ntrajs):
for jtraj in range(self.ntrajs):
for iat in range(self.nat_qm):
for idim in range(self.ndim):
w_ij[itraj, jtraj, iat, idim] = prod_g_i[itraj, jtraj] /\
(2. * self.sigma_lk[jtraj, 0, iat, idim] ** 2 * g_i[itraj])
# (2-2) Calculate slope_i
# the slope is calculated as a sum over j of w_ij
self.slope_i = np.zeros((self.ntrajs, self.nat_qm, self.ndim))
for itraj in range(self.ntrajs):
for jtraj in range(self.ntrajs):
self.slope_i[itraj] -= w_ij[itraj, jtraj]
# 3. Calculate the center of quantum momentum
rho = np.zeros((self.ntrajs, self.nst))
for itraj in range(self.ntrajs):
for ist in range(self.nst):
rho[itraj, ist] = self.mols[itraj].rho[ist, ist].real
# (3-1) Compute denominator
deno_lk = np.zeros((self.nst_pair, self.nat_qm, self.ndim)) # denominator
for itraj in range(self.ntrajs):
index_lk = -1
for ist in range(self.nst):
for jst in range(ist + 1, self.nst):
index_lk += 1
for iat in range(self.nat_qm):
for idim in range(self.ndim):
deno_lk[index_lk, iat, idim] += rho[itraj, ist] * rho[itraj, jst] * \
(self.phase[itraj, ist, iat, idim] - self.phase[itraj, jst, iat, idim]) * self.slope_i[itraj, iat, idim]
# (3-2) Compute numerator
ratio_lk = np.zeros((self.ntrajs, self.nst_pair, self.nat_qm, self.ndim)) # numerator / denominator
numer_lk = np.zeros((self.ntrajs, self.nst_pair, self.nat_qm, self.ndim)) # numerator
for itraj in range(self.ntrajs):
index_lk = -1
for ist in range(self.nst):
for jst in range(ist + 1, self.nst):
index_lk += 1
for iat in range(self.nat_qm):
for idim in range(self.ndim):
numer_lk[itraj, index_lk, iat, idim] = rho[itraj, ist] * rho[itraj, jst] * self.mols[itraj].pos[iat, idim] * \
(self.phase[itraj, ist, iat, idim] - self.phase[itraj, jst, iat, idim]) * self.slope_i[itraj, iat, idim]
if (abs(deno_lk[index_lk, iat, idim]) <= self.small):
ratio_lk[itraj, index_lk, iat, idim] = 0.
else:
ratio_lk[itraj, index_lk, iat, idim] = numer_lk[itraj, index_lk, iat, idim] / \
deno_lk[index_lk, iat, idim]
# Center of quantum momentum is calculated by Eq.(S28) of J. Phys. Chem. Lett., 2017, 8, 3048-3055.
center_old_lk = np.zeros((self.ntrajs, self.nst_pair, self.nat_qm, self.ndim))
for itraj in range(self.ntrajs):
index_lk = -1
for ist in range(self.nst):
for jst in range(ist + 1, self.nst):
index_lk += 1
for iat in range(self.nat_qm):
for idim in range(self.ndim):
for jtraj in range(self.ntrajs):
center_old_lk[itraj, index_lk, iat, idim] += ratio_lk[jtraj, index_lk, iat, idim]
if ((abs(self.slope_i[itraj, iat, idim]) <= self.small) or (center_old_lk[itraj, index_lk, iat, idim] == 0.)):
center_old_lk[itraj, index_lk, iat, idim] = self.mols[itraj].pos[iat, idim]
# Center of quantum momentum is calculated by Eq.(S21) of J. Phys. Chem. Lett., 2017, 8, 3048-3055.
center_new_lk = np.zeros((self.ntrajs, self.nst_pair, self.nat_qm, self.ndim))
for itraj in range(self.ntrajs):
index_lk = -1
for ist in range(self.nst):
for jst in range(ist + 1, self.nst):
index_lk += 1
for iat in range(self.nat_qm):
for idim in range(self.ndim):
if (abs(self.slope_i[itraj, iat, idim]) <= self.small):
center_new_lk[itraj, index_lk, iat, idim] = self.mols[itraj].pos[iat, idim]
else:
for jtraj in range(self.ntrajs):
center_new_lk[itraj, index_lk, iat, idim] += self.mols[jtraj].pos[iat, idim] * prod_g_i[itraj, jtraj] /\
(2. * self.sigma_lk[jtraj, 0, iat, idim] ** 2 * g_i[itraj] * (- self.slope_i[itraj, iat, idim]))
# (3-3) Determine qauntum momentum center TODO: atomistic flag
self.center_lk = np.zeros((self.ntrajs, self.nst_pair, self.nat_qm, self.ndim)) # Finally, qmom_center
for itraj in range(self.ntrajs):
index_lk = -1
for ist in range(self.nst):
for jst in range(ist + 1, self.nst):
index_lk += 1
for iat in range(self.nat_qm):
for idim in range(self.ndim):
# test how far calculated center of quantum momentum is from current atomic position.
# tmp_var is deviation between position of classical trajectory and quantum momentum center.
tmp_var = center_old_lk[itraj, index_lk, iat, idim] - self.mols[itraj].pos[iat, idim]
if (abs(tmp_var) > self.dist_parameter * self.sigma):
tmp_var = center_new_lk[itraj, index_lk, iat, idim] - self.mols[itraj].pos[iat, idim]
if (abs(tmp_var) > self_dist_parameter * self.sigma):
self.center_lk[itraj, index_lk, iat, idim] = self.mols[itraj].pos[iat, idim]
else:
self.center_lk[itraj, index_lk, iat, idim] = center_new_lk[itraj, index_lk, iat, idim]
else:
self.center_lk[itraj, index_lk, iat, idim] = center_old_lk[itraj, index_lk, iat, idim]
# 4. Compute quantum momentum
for itraj in range(self.ntrajs):
index_lk = -1
for ist in range(self.nst):
for jst in range(ist + 1, self.nst):
index_lk += 1
self.qmom[itraj, index_lk] = self.slope_i[itraj] * (self.mols[itraj].pos - self.center_lk[itraj, index_lk])
# 5. Calculate 2 * Qmom * phase / mass
self.K_lk = np.zeros((self.ntrajs, self.nst, self.nst))
for itraj in range(self.ntrajs):
index_lk = -1
for ist in range(self.nst):
for jst in range(ist + 1, self.nst):
index_lk += 1
self.K_lk[itraj, ist, jst] += 2. * np.sum(1. / self.mol.mass[0:self.nat_qm] * \
np.sum(self.qmom[itraj, index_lk] * self.phase[itraj, ist], axis = 1))
self.K_lk[itraj, jst, ist] += 2. * np.sum(1. / self.mol.mass[0:self.nat_qm] * \
np.sum(self.qmom[itraj, index_lk] * self.phase[itraj, jst], axis = 1))
def write_md_output(self, itrajectory, unixmd_dir, istep):
""" Write output files
:param integer itrajectory: Index for trajectories
:param string unixmd_dir: PyUNIxMD directory
:param integer istep: Current MD step
"""
# Write the common part
super().write_md_output(unixmd_dir, istep)
# Write decoherence information
self.write_deco(itrajectory, unixmd_dir, istep)
def write_deco(self, itrajectory, unixmd_dir, istep):
""" Write CT-based decoherence information
:param integer itrajectory: Index for trajectories
:param string unixmd_dir: PyUNIxMD directory
:param integer istep: Current MD step
"""
# TODO
# Write time-derivative density matrix elements in DOTPOTD
#tmp = f'{istep + 1:9d}' + "".join([f'{pop:15.8f}' for pop in self.dotpopd])
#typewriter(tmp, unixmd_dir, "DOTPOPD", "a")
# Write auxiliary trajectories
if (self.verbosity >= 2):
tmp = f'{self.nat_qm:6d}\n{"":2s}Step:{istep + 1:6d}{"":12s}sigma_x{"":5s}sigma_y{"":5s}sigma_z{"":5s}count_ntrajs' + \
"".join(["\n" + f'{self.mol.symbols[iat]:5s}' + \
"".join([f'{self.sigma_lk[itrajectory, 0, iat, idim]:15.8f}' for idim in range(self.ndim)]) + \
f'{self.count_ntrajs[itrajectory, iat]:15.8f}' for iat in range(self.nat_qm)])
typewriter(tmp, unixmd_dir, f"SIGMA", "a")
tmp = f'{self.nat_qm:6d}\n{"":2s}Step:{istep + 1:6d}{"":12s}slope' + \
"".join(["\n" + f'{self.mol.symbols[iat]:5s}' + \
"".join([f'{self.slope_i[itrajectory, iat, idim]:15.8f}' for idim in range(self.ndim)]) for iat in range(self.nat_qm)])
typewriter(tmp, unixmd_dir, f"SLOPE", "a")
# Write quantum momenta
index_lk = -1
for ist in range(self.nst):
for jst in range(ist + 1, self.nst):
index_lk += 1
tmp = f'{self.nat_qm:6d}\n{"":2s}Step:{istep + 1:6d}{"":12s}Momentum center (au)' + \
"".join(["\n" + f'{self.mol.symbols[iat]:5s}' + \
"".join([f'{self.center_lk[itrajectory, index_lk, iat, idim]:15.8f}' for idim in range(self.ndim)]) for iat in range(self.nat_qm)])
typewriter(tmp, unixmd_dir, f"CENTER_{ist}_{jst}", "a")
tmp = f'{self.nat_qm:6d}\n{"":2s}Step:{istep + 1:6d}{"":12s}Momentum (au)' + \
"".join(["\n" + f'{self.mol.symbols[iat]:5s}' + \
"".join([f'{self.qmom[itrajectory, index_lk, iat, idim]:15.8f}' for idim in range(self.ndim)]) for iat in range(self.nat_qm)])
typewriter(tmp, unixmd_dir, f"QMOM_{ist}_{jst}", "a")
for ist in range(self.nst):
for jst in range(self.nst):
if (ist != jst):
tmp = f'{istep + 1:9d}{self.K_lk[itrajectory, ist, jst]:15.8f}'
typewriter(tmp, unixmd_dir, f"K_lk_{ist}_{jst}", "a")
# Write auxiliary variables
for ist in range(self.mol.nst):
# Write auxiliary phase
tmp = f'{self.nat_qm:6d}\n{"":2s}Step:{istep + 1:6d}{"":12s}Phase (au)' + \
"".join(["\n" + f'{self.mol.symbols[iat]:5s}' + \
"".join([f'{self.phase[itrajectory, ist, iat, idim]:15.8f}' for idim in range(self.ndim)]) for iat in range(self.nat_qm)])
typewriter(tmp, unixmd_dir, f"PHASE_{ist}", "a")
def print_init(self, qm, mm, restart):
""" Routine to print the initial information of dynamics
:param object qm: QM object containing on-the-fly calculation infomation
:param object mm: MM object containing MM calculation infomation
:param string restart: Option for controlling dynamics restarting
"""
# Print initial information about molecule, qm, mm and thermostat
super().print_init(qm, mm, restart)
# Print dynamics information for start line
dynamics_step_info = textwrap.dedent(f"""\
{"-" * 118}
{"Start Dynamics":>65s}
{"-" * 118}
""")
# Print INIT for each trajectory at each step
INIT = f" #INFO_TRAJ{'STEP':>8s}{'Kinetic(H)':>15s}{'Potential(H)':>15s}{'Total(H)':>13s}{'Temperature(K)':>17s}{'norm':>8s}"
dynamics_step_info += INIT
# Print INIT for averaged quantity at each step
DEBUG1 = f" #INFO_AVG{'STEP':>9s}"
for ist in range(self.nst):
DEBUG1 += f"{'BOPOP_':>13s}{ist}"
for ist in range(self.nst):
for jst in range(ist + 1, self.nst):
DEBUG1 += f"{'BOCOH_':>13s}{ist}_{jst}"
dynamics_step_info += "\n" + DEBUG1
print (dynamics_step_info, flush=True)
def print_traj(self, istep, itrajectory):
""" Routine to print each trajectory infomation at each step about dynamics
:param integer istep: Current MD step
:param integer itrajectory: Current trajectory
"""
ctemp = self.mol.ekin * 2. / float(self.mol.ndof) * au_to_K
norm = 0.
for ist in range(self.mol.nst):
norm += self.mol.rho.real[ist, ist]
# Print INFO for each step
INFO = f" INFO_{itrajectory+1}{istep + 1:>9d}"
INFO += f"{self.mol.ekin:14.8f}{self.mol.epot:15.8f}{self.mol.etot:15.8f}"
INFO += f"{ctemp:13.6f}"
INFO += f"{norm:11.5f}"
print (INFO, flush=True)
def print_step(self, istep):
""" Routine to print each steps infomation about dynamics
:param integer istep: Current MD step
"""
rho = np.zeros((self.nst, self.nst))
for itraj in range(self.ntrajs):
for ist in range(self.nst):
for jst in range(ist, self.nst):
if (ist == jst):
rho[ist, jst] += self.mols[itraj].rho[ist, jst].real
else:
rho[ist, jst] += self.mols[itraj].rho[ist, ist].real * self.mols[itraj].rho[jst, jst].real
rho /= self.ntrajs
DEBUG1 = f" INFO_AVG{istep + 1:9d}" + "".join([f'{rho[ist, ist]:15.8f}' for ist in range(self.nst)])
DEBUG1 += "".join([f'{rho[ist, jst]:15.8f}' for ist in range(self.nst) for jst in range(ist + 1, self.nst)])
print(DEBUG1, flush=True)
```
#### File: qm/gaussian09/dft.py
```python
from __future__ import division
from build.cioverlap import wf_overlap
from qm.gaussian09.gaussian09 import Gaussian09
from misc import au_to_A, eV_to_au, call_name
import os, shutil, re, textwrap, subprocess
import numpy as np
class DFT(Gaussian09):
""" Class for the (TD)DFT method of Gaussian 09
:param object molecule: Molecule object
:param string functional: Exchange-correlation functional information
:param string basis_set: Basis set information
:param string memory: Allocatable memory
:param string guess: Initial guess for SCF iterations
:param string guess_file: Initial guess file
:param string root_path: Path for Gaussian 09 root directory
:param integer nthreads: Number of threads in the calculations
:param string version: Version of Gaussian 09
"""
def __init__(self, molecule, nthreads=1, memory="1gb", functional="BLYP", basis_set="STO-3G", \
guess="Harris", guess_file="./g09.chk", root_path="./", version="Revision A.02"):
# Initialize Gaussian09 common variables
super(DFT, self).__init__(basis_set, memory, nthreads, root_path, version)
# Initialize Gaussian09 DFT variables
self.functional = functional
# Set initial guess for DFT calculation
self.guess = guess.lower()
self.guess_file = os.path.abspath(guess_file)
if not (self.guess in ["Harris", "read"]):
error_message = "Invalid initial guess for DFT!"
error_vars = f"guess = {self.guess}"
raise ValueError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
# Set 'l_nacme' with respect to the computational method
molecule.l_nacme = True
# Re-calculation of excited state forces is not needed for ground state dynamics
if (molecule.nst > 1):
self.re_calc = True
else:
self.re_calc = False
# MO dimension, initialized later by reading Gaussian09 log
self.nbasis = 0
self.norb = 0
self.nfc = 0
self.nocc = 0
self.nvirt = 0
# Temporaries for NACME calculation, also initialized later if not allocated
# ao_overlap - the number of AOs, the number of AOs
# mo_coef - the number of MOs, the number of AOs
# ci_coef - the number BO states, the number of occ, the number of virt
self.pos_old = []
self.ao_overlap = []
self.mo_coef_old = []
self.mo_coef_new = []
self.ci_coef_old = []
self.ci_coef_new = []
def get_data(self, molecule, base_dir, bo_list, dt, istep, calc_force_only):
""" Extract energy, gradient from (TD)DFT method
:param object molecule: Molecule object
:param string base_dir: Base directory
:param integer,list bo_list: List of BO states for BO calculation
:param double dt: Time interval
:param integer istep: Current MD step
:param boolean calc_force_only: Logical to decide whether calculate force only
"""
self.copy_files(molecule, istep, calc_force_only)
super().get_data(base_dir, calc_force_only)
self.get_input(molecule, istep, bo_list, calc_force_only)
self.run_QM(base_dir, istep, bo_list)
self.extract_QM(molecule, istep, bo_list, dt, calc_force_only)
self.move_dir(base_dir)
def copy_files(self, molecule, istep, calc_force_only):
""" Copy necessary scratch files in previous step
:param object molecule: Molecule object
:param integer istep: Current MD step
:param boolean calc_force_only: Logical to decide whether calculate force only
"""
# Copy required files for NACME
if (molecule.nst > 1 and not calc_force_only and istep >= 0):
if (istep == 0):
shutil.copy(os.path.join(self.scr_qm_dir, "g09.rwf"), \
os.path.join(self.scr_qm_dir, "../g09.rwf.pre"))
# Copy required files to read initial guess
if (self.guess == "read" and istep >= 0):
# After T = 0.0 s
shutil.copy(os.path.join(self.scr_qm_dir, "g09.chk"), \
os.path.join(self.scr_qm_dir, "../g09.chk.pre"))
def get_input(self, molecule, istep, bo_list, calc_force_only):
""" Generate Gaussian 09 input files: g09.inp
:param object molecule: Molecule object
:param integer istep: Current MD step
:param integer,list bo_list: List of BO states for BO calculation
:param boolean calc_force_only: Logical to decide whether calculate force only
"""
# Read check-point file from previous step
if (self.guess == "read"):
if (istep == -1):
if (os.path.isfile(self.guess_file)):
# Copy guess file to currect directory
shutil.copy(self.guess_file, os.path.join(self.scr_qm_dir, "g09.chk"))
restart = True
else:
# TODO : Printout about reading a checkpoint file for the initial guess
# print(f"( {self.qm_method}.{call_name()} ) Make the initial guess of density only for the 1st step.\n", flush=True)
restart = False
elif (istep >= 0):
# Move previous file to currect directory
os.rename("../g09.chk.pre", "./g09.chk")
restart = True
elif (self.guess == "Harris"):
restart = False
if (calc_force_only):
restart = True
# Make 'g09.inp' file
input_g09 = ""
# Ground-state calculation
input_route = textwrap.dedent(f"""\
%nproc={self.nthreads}
%mem={self.memory}
%chk=g09.chk\n""")
input_route += textwrap.dedent(f"""\
# {self.functional}/{self.basis_set} nosymm""")
if (restart):
input_route += f" guess=read"
if (bo_list[0] == 0):
input_route += f" force"
if (calc_force_only):
input_route += f" geom=allcheck"
input_route += "\n\n"
input_g09 += input_route
if (not calc_force_only):
# Title section block
input_title = f"g09 input\n\n"
input_g09 += input_title
# Molecule specification block
input_molecule = textwrap.dedent(f"""\
{int(molecule.charge)} 1
""")
for iat in range(molecule.nat_qm):
list_pos = list(molecule.pos[iat] * au_to_A)
input_molecule += \
f"{molecule.symbols[iat]}{list_pos[0]:15.8f}{list_pos[1]:15.8f}{list_pos[2]:15.8f}\n"
input_molecule += "\n"
input_g09 += input_molecule
# Excited-state calculation
if (molecule.nst > 1 and not (calc_force_only and bo_list[0] == 0)):
input_route = textwrap.dedent(f"""\
--Link1--
%nproc={self.nthreads}
%mem={self.memory}
%chk=g09.chk\n""")
if (not calc_force_only):
input_route += f"""%rwf=g09.rwf\n"""
input_route += f"""# {self.functional}/{self.basis_set} td(Root={bo_list[0]}, Nstates={molecule.nst - 1})"""\
""" geom=allcheck guess=read nosymm"""
if (bo_list[0] > 0):
input_route += " force"
input_route += "\n\n"
input_g09 += input_route
# Write "doubled molecule" input
if (self.calc_coupling and molecule.nst > 1 and not calc_force_only and istep >= 0):
if (istep == 0):
os.rename('../g09.rwf.pre', './g09.rwf.pre')
# Stop the run after L302 calculating overlap
# Keep running the job regardless of interatomic distances; IOp(2/12=3)
input_route = textwrap.dedent(f"""\
--Link1--
%kjob l302
%rwf=g09_double.rwf
# {self.functional}/{self.basis_set} IOp(2/12=3) nosymm\n\n""")
input_g09 += input_route
# Title section block
input_title = f"g09 double input\n\n"
input_g09 += input_title
# Molecule specification block
input_molecule = textwrap.dedent(f"""\
{2 * int(molecule.charge)} 1
""")
for iat in range(molecule.nat_qm):
list_pos = list(self.pos_old[iat] * au_to_A)
input_molecule += \
f"{molecule.symbols[iat]}{list_pos[0]:15.8f}{list_pos[1]:15.8f}{list_pos[2]:15.8f}\n"
for iat in range(molecule.nat_qm):
list_pos = list(molecule.pos[iat] * au_to_A)
input_molecule += \
f"{molecule.symbols[iat]}{list_pos[0]:15.8f}{list_pos[1]:15.8f}{list_pos[2]:15.8f}\n"
input_molecule += "\n"
input_g09 += input_molecule
file_name = "g09.inp"
with open(file_name, "w") as f:
f.write(input_g09)
def run_QM(self, base_dir, istep, bo_list):
""" Run (TD)DFT calculation and save the output files to qm_log directory
:param string base_dir: Base directory
:param integer istep: Current MD step
:param integer,list bo_list: List of BO states for BO calculation
"""
# Set environment variables
if (istep == -1):
os.environ["g09root"] = self.root_path
os.environ["GAUSS_SCDIR"] = self.scr_qm_dir
path_profile = os.path.join(self.root_path, "g09/bsd/g09.profile")
command = f'env -i sh -c "source {path_profile} && env"'
for line in subprocess.getoutput(command).split("\n"):
key, value = line.split("=")
os.environ[key] = value
# Set run command
qm_command = os.path.join(self.root_path, "g09/g09")
command = f"{qm_command} < g09.inp > log"
# Run Gaussian09
os.system(command)
# Copy the output file to 'qm_log' directory
tmp_dir = os.path.join(base_dir, "qm_log")
if (os.path.exists(tmp_dir)):
log_step = f"log.{istep + 1}.{bo_list[0]}"
shutil.copy("log", os.path.join(tmp_dir, log_step))
def extract_QM(self, molecule, istep, bo_list, dt, calc_force_only):
""" Read the output files to get BO information
:param object molecule: Molecule object
:param integer istep: Current MD step
:param integer,list bo_list: List of BO states for BO calculation
:param double dt: Time interval
:param boolean calc_force_only: Logical to decide whether calculate force only
"""
file_name = "log"
with open(file_name, "r") as f:
log = f.read()
# Check the convergence of the calculation
if ("Convergence failure" in log):
error_message = "SCF iteration not converged, please see the output carefully!"
error_vars = f"output file = {self.scr_qm_dir}/{file_name}"
raise Exception (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
# Energy
if (not calc_force_only):
# Read ground energy
energy = re.findall('SCF Done:\s+E\(\S+\)\s+=\s+([-]\S+)\s+A.U.', log)
energy = np.array(energy[0], dtype=np.float)
molecule.states[0].energy = energy
if (molecule.nst > 1):
energy = re.findall('Excited\sState\s+\w+:\s+\w+-\S+\s+(\S+)\s+eV', log)
energy = np.array(energy, dtype=np.float)
energy *= eV_to_au
for ist in range(1, molecule.nst):
molecule.states[ist].energy = molecule.states[0].energy + energy[ist - 1]
# Force
tmp_f = "Forces\s+\(Hartrees\/Bohr\)\n.+\n.+" \
+ "\n\s+\d*\s+\d*\s+([-]*\S+)\s+([-]*\S+)\s+([-]*\S+)" * molecule.nat_qm
force = re.findall(tmp_f, log)
force = np.array(force[0], dtype=np.float)
force = force.reshape(molecule.nat_qm, 3, order='C')
molecule.states[bo_list[0]].force = np.copy(force)
# NACME
if (self.calc_coupling and molecule.nst > 1 and not calc_force_only):
if (istep == -1):
self.init_buffer(molecule)
else:
self.CI_overlap(molecule, istep, dt)
# Save geometry in the buffer
self.pos_old = np.copy(molecule.pos)
def init_buffer(self, molecule):
""" Initialize buffer variables to get NACME
:param object molecule: Molecule object
"""
file_name = "log"
with open(file_name, "r") as f:
log = f.read()
self.nbasis = re.findall('NBasis=\s+(\d+)\s+', log)
self.nbasis = int(self.nbasis[0])
self.nfc = re.findall('NFC=\s+(\d+)\s+', log)
self.nfc = int(self.nfc[0])
self.nocc = re.findall('NOA=\s+(\d+)\s+', log)
self.nocc = int(self.nocc[0])
self.nvirt = re.findall('NVA=\s+(\d+)\s+', log)
self.nvirt = int(self.nvirt[0])
self.norb = self.nocc + self.nvirt
self.pos_old = np.zeros((molecule.nat_qm, molecule.ndim))
self.ao_overlap = np.zeros((self.nbasis, self.nbasis))
self.mo_coef_old = np.zeros((self.norb, self.nbasis))
self.mo_coef_new = np.zeros((self.norb, self.nbasis))
self.ci_coef_old = np.zeros((molecule.nst, self.nocc, self.nvirt))
self.ci_coef_new = np.zeros((molecule.nst, self.nocc, self.nvirt))
def CI_overlap(self, molecule, istep, dt):
""" Read the necessary files and calculate NACME from tdnac.c routine
note that only reading of several files is required in this method
:param object molecule: Molecule object
:param integer istep: Current MD step
:param double dt: Time interval
"""
path_rwfdump = os.path.join(self.root_path, "g09/rwfdump")
# Read overlap
self.ao_overlap = self.read_ao_overlap(path_rwfdump, "g09_double.rwf")
# Read mo coefficients
if (istep == 0):
self.mo_coef_old = self.read_mo_coef(path_rwfdump, "g09.rwf.pre")
self.mo_coef_new = self.read_mo_coef(path_rwfdump, "g09.rwf")
# Read CI coefficients
if (istep == 0):
self.ci_coef_old[1:] = self.read_xy_coef(molecule, path_rwfdump, "g09.rwf.pre")
self.ci_coef_new[1:] = self.read_xy_coef(molecule, path_rwfdump, "g09.rwf")
# Calculate wavefunction overlap with orbital scheme
wf_overlap(self, molecule, istep, dt)
def read_ao_overlap(self, path_rwfdump, fn_rwf):
""" Read a rwf file to obtain ao_overlap data
:param string path_rwfdump: The path for rwfdump binary
:param string fn_rwf: The name of the rwf file
"""
os.system(path_rwfdump + f" {fn_rwf} ao_overlap.dat 514R")
with open('ao_overlap.dat', "r") as f:
log = f.read()
tmp = re.findall('[-]?\d+\.\d+D[+-]\d\d', log)
tmp = [float(x.replace('D', 'e')) for x in tmp]
tmp_ovr = np.zeros((self.nbasis * 2, self.nbasis * 2))
cnt = 0
for ibasis in range(self.nbasis * 2):
for jbasis in range(ibasis + 1):
tmp_ovr[ibasis, jbasis] = tmp[cnt]
cnt += 1
tmp_ovr += np.transpose(tmp_ovr) - np.diag(np.diag(tmp_ovr))
# Slicing the components between t and t+dt
return tmp_ovr[:self.nbasis, self.nbasis:]
def read_mo_coef(self, path_rwfdump, fn_rwf):
""" Read a rwf file to obtain mo_coef data
:param string path_rwfdump: The path for rwfdump binary
:param string fn_rwf: The name of the rwf file
"""
os.system(path_rwfdump + f" {fn_rwf} mo_coef.dat 524R")
with open('mo_coef.dat', "r") as f:
log = f.read()
tmp = re.findall('[-]?\d+\.\d+D[+-]\d\d', log)
tmp = np.array([x.replace('D','e') for x in tmp], dtype=np.float)
tmp_mo = tmp.reshape(self.nbasis, self.nbasis)
return tmp_mo[self.nfc:self.nbasis]
def read_xy_coef(self, molecule, path_rwfdump, fn_rwf):
""" Read a rwf file to obtain xy_coef data
:param object molecule: Molecule object
:param string path_rwfdump: The path for rwfdump binary
:param string fn_rwf: The name of the rwf file
"""
os.system(path_rwfdump + f" {fn_rwf} xy_coef.dat 635R")
with open(f'xy_coef.dat', "r") as f:
log = f.read()
tmp = re.findall('[-]?\d+\.\S+[+-]\d+', log)
# Drop the first 12 dummy elements
tmp = tmp[12:]
# Gaussian09 deals with 4 times as much roots as the input NStates value.
# the nr. of excitation function => nocc \times nvirt
# spin degrees of freedom => 2
# X+Y, X-Y => 2
roots = (molecule.nst - 1) * 4
num_coef = 4 * (self.nocc * self.nvirt) * roots
tmp = tmp[:num_coef]
tmp = np.array([x.replace('D','e') for x in tmp], dtype=np.float)
xpy, xmy = tmp.reshape(2, roots, 2, -1)
x = 0.5 * (xpy + xmy)
# Drop beta part and unrequested excited states
x = x[:(molecule.nst - 1), 0, :]
return x.reshape(-1, self.nocc, self.nvirt)
```
#### File: qm/turbomole/dft.py
```python
from __future__ import division
from qm.turbomole.turbomole import Turbomole
from misc import call_name
import os, shutil, re, textwrap
import numpy as np
class DFT(Turbomole):
""" Class for (TD)DFT method of Turbomole
:param object molecule: Molecule object
:param string functional: Exchange-correlation functional information
:param string basis_set: Basis set information
:param integer memory: Allocatable memory in the calculations
:param integer scf_max_iter: Maximum number of SCF iterations
:param integer scf_en_tol: Energy convergence for SCF iterations
:param integer cis_max_iter: Maximum number of CIS iterations
:param integer cis_en_tol: Energy convergence for CIS iterations
:param string root_path: Path for Turbomole root directory
:param integer nthreads: Number of threads in the calculations
:param string version: Version of Turbomole
"""
def __init__(self, molecule, functional="b-lyp", basis_set="SV(P)", memory=50, \
scf_max_iter=50, scf_en_tol=6, cis_max_iter=25, cis_en_tol=6, \
root_path="./", nthreads=1, version="6.4"):
# Initialize Turbomole common variables
super(DFT, self).__init__(functional, basis_set, memory, root_path, nthreads, version)
self.scf_max_iter = scf_max_iter
self.scf_en_tol = scf_en_tol
self.cis_max_iter = cis_max_iter
self.cis_en_tol = cis_en_tol
# Set 'l_nacme' with respect to the computational method
# TDDFT cannot produce NAC between excited states,
# so we need to get NACME from CIoverlap but Turbomole does not provide AO overlap.
# Hence, CIoverlap is not valid yet.
molecule.l_nacme = False
# Re-calculation of excited state forces is not needed for ground state dynamics
self.re_calc = False
def get_data(self, molecule, base_dir, bo_list, dt, istep, calc_force_only):
""" Extract energy, gradient and nonadiabatic couplings from (TD)DFT method
:param object molecule: Molecule object
:param string base_dir: Base directory
:param integer,list bo_list: List of BO states for BO calculation
:param double dt: Time interval
:param integer istep: Current MD step
:param boolean calc_force_only: Logical to decide whether calculate force only
"""
super().get_data(base_dir, calc_force_only)
self.write_xyz(molecule)
self.get_input(molecule, bo_list)
self.run_QM(molecule, base_dir, istep, bo_list)
self.extract_QM(molecule, bo_list)
self.move_dir(base_dir)
def get_input(self, molecule, bo_list):
""" Generate Turbomole input files: define.in, control, etc
:param object molecule: Molecule object
:param integer,list bo_list: List of BO states for BO calculation
"""
if (self.calc_coupling):
error_message = "Turbomole supports only BOMD!"
error_vars = f"qm_prog.qm_method = {qm.qm_prog}.{qm.qm_method}"
raise ValueError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
x2t_command = os.path.join(self.scripts_path, "x2t")
command = f"{x2t_command} geometry.xyz > coord"
os.system(command)
input_define = ""
# Job title
input_title = textwrap.dedent(f"""\
""")
input_define += input_title
# Molcule geometry
input_geom = textwrap.dedent(f"""\
a coord
*
no
""")
input_define += input_geom
# Basis set
input_bss = textwrap.dedent(f"""\
b all {self.basis_set}
*
""")
input_define += input_bss
# Occupation number and MO
input_initMO = textwrap.dedent(f"""\
eht
{int(molecule.charge)}
""")
input_define += input_initMO
# Functional
input_functional = textwrap.dedent(f"""\
dft
func
{self.functional}
on
*
""")
input_define += input_functional
# TODO: TDA or RPA (rpas -> ciss)
# Excited state calculation
input_ES = textwrap.dedent(f"""\
ex
rpas
q
a {molecule.nst - 1}
q
q
*
""")
input_define += input_ES
file_name = "define.in"
with open(file_name, "w") as f:
f.write(input_define)
define_command = os.path.join(self.qm_path, "define")
command = f"{define_command} < {file_name} >& define_log"
os.system(command)
file_name = "control"
with open(file_name, "r") as f:
control_prev = f.readlines()
control = ""
# Root state to calculate gradient
control += f"$exopt {bo_list[0]}\n"
# Memory to use
control += f"$maxcor {self.memory}\n"
iline = 0
# SCF options such as iteration and energy tolerance
while "$scfiterlimit" not in control_prev[iline]:
control += control_prev[iline]
iline += 1
control += f"$scfiterlimit {self.scf_max_iter}\n"
iline += 1
control += f"$scfconv {self.scf_en_tol}\n"
if (molecule.nst > 1):
control += f"$rpacor {self.memory}\n"
control += f"$rpaconv {self.cis_en_tol}\n"
control += f"$escfiterlimit {self.cis_max_iter}\n"
# Calculate energy gradient
while "$dft" not in control_prev[iline]:
control += control_prev[iline]
iline += 1
control += control_prev[iline]
control += " weight derivatives\n"
iline += 1
while iline != len(control_prev):
control += control_prev[iline]
iline += 1
with open(file_name, "w") as f:
f.write(control)
def run_QM(self, molecule, base_dir, istep, bo_list):
""" Run (TD)DFT calculation and save the output files to qm_log directory
:param object molecule: Molecule object
:param string base_dir: Base directory
:param integer istep: Current MD step
:param integer,list bo_list: List of BO states for BO calculation
"""
# Run dscf
scf_command = os.path.join(self.qm_path, "dscf")
command = f"{scf_command} >& dscf.out"
os.system(command)
if (bo_list[0] == 0):
grad_command = os.path.join(self.qm_path, "grad")
command = f"{grad_command} >& grad.out"
os.system(command)
if (molecule.nst > 1):
grad_command = os.path.join(self.qm_path, "escf")
command = f"{grad_command} >& escf.out"
os.system(command)
else:
egrad_command = os.path.join(self.qm_path, "egrad")
command = f"{egrad_command} >& egrad.out"
os.system(command)
# Copy the output file to 'qm_log' directory
tmp_dir = os.path.join(base_dir, "qm_log")
if (os.path.exists(tmp_dir)):
shutil.copy("dscf.out", os.path.join(tmp_dir, f"dscf.out.{istep + 1}"))
if (bo_list[0] == 0):
shutil.copy("grad.out", os.path.join(tmp_dir, f"grad.out.{istep + 1}"))
if (molecule.nst > 1):
shutil.copy("escf.out", os.path.join(tmp_dir, f"escf.out.{istep + 1}"))
else:
shutil.copy("egrad.out", os.path.join(tmp_dir, f"egrad.out.{istep + 1}"))
def extract_QM(self, molecule, bo_list):
""" Read the output files to get BO information
:param object molecule: Molecule object
:param integer,list bo_list: List of BO states for BO calculation
"""
file_name = "gradient"
with open(file_name, "r") as f:
bo_out = f.read()
bo_out = bo_out.replace('D', 'E')
# Energy of running state
find_e = "energy =\s+([-]\d+[.]\d+)"
energy = re.findall(find_e, bo_out)
energy = np.array(energy)
energy = energy.astype(float)
molecule.states[bo_list[0]].energy = energy[0]
# Force of running state
find_grad = "\s+([-]*\d*[.]\d+[E][-|+]\d+)"
grad = re.findall(find_grad, bo_out)
grad = np.array(grad)
grad = grad.astype(float)
grad = grad.reshape(molecule.nat_qm, 3, order='C')
molecule.states[bo_list[0]].force = - np.copy(grad)
# Energy of other states (except running state)
if (molecule.nst > 1):
if (bo_list[0] != 0):
file_name = "egrad.out"
else:
file_name = "escf.out"
with open(file_name, "r") as f:
bo_out = f.read()
find_e = 'Total energy:\s+([-]\d+[.]\d+)'
energy = re.findall(find_e, bo_out)
energy = np.array(energy)
energy = energy.astype(float)
for ist in range(molecule.nst):
if (ist != bo_list[0]):
molecule.states[ist].energy = energy[ist]
# NACME
# Turbomole cannot provides NACVs between excited states
pass
``` |
{
"source": "jkha-unist/rmsd",
"score": 2
} |
#### File: rmsd/tests/test_kabsch_weighted.py
```python
import pathlib
import numpy as np
from constants import RESOURCE_PATH
import rmsd
def test_kabash_fit_pdb():
filename_p = pathlib.PurePath(RESOURCE_PATH, "ci2_1r+t.pdb")
filename_q = pathlib.PurePath(RESOURCE_PATH, "ci2_1.pdb")
p_atoms, p_coord = rmsd.get_coordinates_pdb(filename_p)
q_atoms, q_coord = rmsd.get_coordinates_pdb(filename_q)
new_p_coord = rmsd.kabsch_fit(p_coord, q_coord)
np.testing.assert_array_almost_equal(q_coord[0], new_p_coord[0], decimal=2)
def test_kabash_weighted_fit_pdb():
filename_1 = pathlib.PurePath(RESOURCE_PATH, "ci2_12.pdb")
filename_2 = pathlib.PurePath(RESOURCE_PATH, "ci2_2.pdb")
p_atoms, p_coord = rmsd.get_coordinates_pdb(filename_1)
q_atoms, q_coord = rmsd.get_coordinates_pdb(filename_2)
weights = np.zeros(len(p_coord))
residue13_start = 200
residue24_start = 383
weights[residue13_start:residue24_start] = 1.0
new_p_coord = rmsd.kabsch_fit(p_coord, q_coord, weights)
np.testing.assert_array_almost_equal(
q_coord[300], new_p_coord[300], decimal=2
)
``` |
{
"source": "jkha-unist/unixmd",
"score": 2
} |
#### File: qm/dftbplus/dftb.py
```python
from __future__ import division
from build.cioverlap import *
from qm.dftbplus.dftbplus import DFTBplus
from qm.dftbplus.dftbpar import spin_w, spin_w_lc, onsite_uu, onsite_ud, max_l
from misc import data, eps, eV_to_au, call_name
import os, shutil, re, textwrap
import numpy as np
class DFTB(DFTBplus):
""" Class for (TD)DFTB method of DFTB+
:param object molecule: Molecule object
:param boolean l_scc: Include self-consistent charge (SCC) scheme
:param double scc_tol: Stopping criteria for the SCC iterations
:param integer scc_max_iter: Maximum number of SCC iterations
:param boolean l_onsite: Include onsite correction to SCC term
:param boolean l_range_sep: Include long-range corrected functional
:param string lc_method: Algorithms for LC-DFTB
:param boolean l_spin_pol: Include spin-polarisation scheme
:param double unpaired_elec: Number of unpaired electrons
:param string guess: Initial guess method for SCC scheme
:param string guess_file: Initial guess file for charges
:param double elec_temp: Electronic temperature in Fermi-Dirac scheme
:param string mixer: Charge mixing method used in DFTB
:param string ex_symmetry: Symmetry of excited state in TDDFTB
:param double e_window: Energy window for TDDFTB. Increases efficiency of NACME calculation.
:param integer,list k_point: Number of k-point samplings
:param boolean l_periodic: Use periodicity in the calculations
:param double,list cell_length: The lattice vectors of periodic unit cell
:param string sk_path: Path for Slater-Koster files
:param string install_path: Path for DFTB+ install directory
:param boolean mpi: Use MPI parallelization
:param string mpi_path: Path for MPI binary
:param integer nthreads: Number of threads in the calculations
:param string version: Version of DFTB+
"""
def __init__(self, molecule, l_scc=True, scc_tol=1E-6, scc_max_iter=100, l_onsite=False, \
l_range_sep=False, lc_method="MatrixBased", l_spin_pol=False, unpaired_elec=0., guess="h0", \
guess_file="./charges.bin", elec_temp=0., mixer="Broyden", ex_symmetry="singlet", e_window=0., \
k_point=[1, 1, 1], l_periodic=False, cell_length=[0., 0., 0., 0., 0., 0., 0., 0., 0.,], \
sk_path="./", install_path="./", mpi=False, mpi_path="./", nthreads=1, version="20.1"):
# Initialize DFTB+ common variables
super(DFTB, self).__init__(molecule, sk_path, install_path, nthreads, version)
# Initialize DFTB+ DFTB variables
self.l_scc = l_scc
self.scc_tol = scc_tol
self.scc_max_iter = scc_max_iter
self.l_onsite = l_onsite
self.l_range_sep = l_range_sep
self.lc_method = lc_method.lower()
self.l_spin_pol = l_spin_pol
self.unpaired_elec = unpaired_elec
# Set initial guess for SCC term
self.guess = guess.lower()
self.guess_file = guess_file
if not (self.guess in ["h0", "read"]):
error_message = "Invalid initial guess for DFTB!"
error_vars = f"guess = {self.guess}"
raise ValueError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
self.elec_temp = elec_temp
self.mixer = mixer.lower()
self.ex_symmetry = ex_symmetry.lower()
self.e_window = e_window
self.k_point = k_point
self.l_periodic = l_periodic
self.a_axis = np.copy(cell_length[0:3])
self.b_axis = np.copy(cell_length[3:6])
self.c_axis = np.copy(cell_length[6:9])
# Check excitation symmetry in TDDFTB
# TODO : Currently, allows only singlet excited states with TDDFTB
# if not (self.ex_symmetry in ["singlet", "triplet"]):
if (not self.ex_symmetry == "singlet"):
error_message = "Invalid symmetry of excited states for TDDFTB given!"
error_vars = f"ex_symmetry = {self.ex_symmetry}"
raise ValueError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
self.mpi = mpi
self.mpi_path = mpi_path
# Set 'l_nacme' and 're_calc' with respect to the computational method
# TDDFTB do not produce NACs, so we should get NACME from CIoverlap
# TDDFTB cannot compute the gradient of several states simultaneously.
molecule.l_nacme = True
self.re_calc = True
# Calculate number of basis for current system
# Set new variable to decide the position of basis functions in terms of atoms
# DFTB method considers only valence electrons, so core electrons should be removed
core_elec = 0.
self.nbasis = 0
self.check_atom = [0]
for iat in range(molecule.nat_qm):
# Check number of basis functions with respect to maximum angular momentum
max_ang = max_l[molecule.symbols[iat]]
if (max_ang == 's'):
self.nbasis += 1
elif (max_ang == 'p'):
self.nbasis += 4
elif (max_ang == 'd'):
self.nbasis += 9
else:
error_message = "Number of basis for f orbital not implemented, see '$PYUNIXMDHOME/src/qm/dftbplus/dftb.py'!"
error_vars = f"current atom = {molecule.symbols[iat]}, max_ang = {max_ang}"
raise NotImplementedError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
self.check_atom.append(self.nbasis)
# Check number of core electrons with respect to atomic number
sym_index = list(data.keys()).index(molecule.symbols[iat])
if (sym_index > 0 and sym_index <= 2):
core_elec += 0.
elif (sym_index > 2 and sym_index <= 10):
core_elec += 2.
elif (sym_index > 10 and sym_index <= 18):
core_elec += 10.
elif (sym_index > 18 and sym_index <= 36):
core_elec += 18.
elif (sym_index > 36 and sym_index <= 54):
core_elec += 36.
else:
error_message = "Core electrons for current element not implemented, see '$PYUNIXMDHOME/src/qm/dftbplus/dftb.py'!"
error_vars = f"current atom = {molecule.symbols[iat]}, sym_index = {sym_index}"
raise NotImplementedError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
# Set new variable to decide the position of atoms in terms of basis functions
self.check_basis = []
for ibasis in range(self.nbasis):
for iat in range(molecule.nat_qm):
ind_a = self.check_atom[iat] + 1
ind_b = self.check_atom[iat + 1]
if (ibasis + 1 >= ind_a and ibasis + 1 <= ind_b):
self.check_basis.append(iat + 1)
# Initialize NACME variables
# There is no core orbitals in TDDFTB (fixed occupations)
# nocc is number of occupied orbitals and nvirt is number of virtual orbitals
self.norb = self.nbasis
self.nocc = int(int(molecule.nelec - core_elec) / 2)
self.nvirt = self.norb - self.nocc
# Replace norb by arrays containing the limits of the for loops.
# For energy window calculations loops will not go from (0 to nocc/nvirt) or (0 to norb)
# but from (nocc_min to nocc/0 to nvirt_max) or (nocc_min to norb).
self.orb_ini = np.zeros(1, dtype=np.int32)
self.orb_final = np.zeros(1, dtype=np.int32)
self.orb_final[0] = self.norb
if (self.e_window > eps):
# Swap minimal/maximal values to replace them in reading of SPX.DAT by the minimal/maximal values.
self.orb_ini[0] = self.norb
self.orb_final[0] = 0
self.ao_overlap = np.zeros((self.nbasis, self.nbasis))
self.mo_coef_old = np.zeros((self.norb, self.nbasis))
self.mo_coef_new = np.zeros((self.norb, self.nbasis))
self.ci_coef_old = np.zeros((molecule.nst, self.nocc, self.nvirt))
self.ci_coef_new = np.zeros((molecule.nst, self.nocc, self.nvirt))
def get_data(self, molecule, base_dir, bo_list, dt, istep, calc_force_only):
""" Extract energy, gradient and nonadiabatic couplings from (TD)DFTB method
:param object molecule: Molecule object
:param string base_dir: Base directory
:param integer,list bo_list: List of BO states for BO calculation
:param double dt: Time interval
:param integer istep: Current MD step
:param boolean calc_force_only: Logical to decide whether calculate force only
"""
self.copy_files(molecule, istep, calc_force_only)
super().get_data(base_dir, calc_force_only)
self.write_xyz(molecule)
self.get_input(molecule, istep, bo_list, calc_force_only)
self.run_QM(molecule, base_dir, istep, bo_list, calc_force_only)
self.extract_QM(molecule, base_dir, istep, bo_list, dt, calc_force_only)
self.move_dir(base_dir)
def copy_files(self, molecule, istep, calc_force_only):
""" Copy necessary scratch files in previous step
:param object molecule: Molecule object
:param integer istep: Current MD step
:param boolean calc_force_only: Logical to decide whether calculate force only
"""
# Copy required files for NACME
if (self.calc_coupling and not calc_force_only and istep >= 0 and molecule.nst > 1):
# After T = 0.0 s
shutil.copy(os.path.join(self.scr_qm_dir, "geometry.xyz"), \
os.path.join(self.scr_qm_dir, "../geometry.xyz.pre"))
if (istep == 0):
shutil.copy(os.path.join(self.scr_qm_dir, "eigenvec.bin"), \
os.path.join(self.scr_qm_dir, "../eigenvec.bin.pre"))
shutil.copy(os.path.join(self.scr_qm_dir, "SPX.DAT"), \
os.path.join(self.scr_qm_dir, "../SPX.DAT.pre"))
shutil.copy(os.path.join(self.scr_qm_dir, "XplusY.DAT"), \
os.path.join(self.scr_qm_dir, "../XplusY.DAT.pre"))
# Copy required files to read initial guess
if (self.guess == "read" and istep >= 0):
# After T = 0.0 s
shutil.copy(os.path.join(self.scr_qm_dir, "charges.bin"), \
os.path.join(self.scr_qm_dir, "../charges.bin.pre"))
def get_input(self, molecule, istep, bo_list, calc_force_only):
""" Generate DFTB+ input files: geometry.gen, dftb_in.hsd
:param object molecule: Molecule object
:param integer istep: Current MD step
:param integer,list bo_list: List of BO states for BO calculation
:param boolean calc_force_only: Logical to decide whether calculate force only
"""
# Make 'geometry.gen' file
os.system("xyz2gen geometry.xyz")
if (self.l_periodic):
# Substitute C to S in first line
file_be = open('geometry.gen', 'r')
file_af = open('tmp.gen', 'w')
first_row = True
for row in file_be:
if (first_row):
row = f'{molecule.nat_qm} S\n'
first_row = False
file_af.write(row)
# Add gamma-point and cell lattice information
geom_periodic = textwrap.dedent(f"""\
{0.0:15.8f} {0.0:15.8f} {0.0:15.8f}
{self.a_axis[0]:15.8f} {self.a_axis[1]:15.8f} {self.a_axis[2]:15.8f}
{self.b_axis[0]:15.8f} {self.b_axis[1]:15.8f} {self.b_axis[2]:15.8f}
{self.c_axis[0]:15.8f} {self.c_axis[1]:15.8f} {self.c_axis[2]:15.8f}
""")
file_af.write(geom_periodic)
file_be.close()
file_af.close()
os.rename('tmp.gen', 'geometry.gen')
# Make 'double.gen' file for CIoverlap in TDDFTB
# In this case, we do not need to consider periodicity
if (self.calc_coupling and not calc_force_only and istep >= 0 and molecule.nst > 1):
# Move previous files to currect directory
os.rename('../geometry.xyz.pre', './geometry.xyz.pre')
if (istep == 0):
os.rename('../eigenvec.bin.pre', './eigenvec.bin.pre')
os.rename('../SPX.DAT.pre', './SPX.DAT.pre')
os.rename('../XplusY.DAT.pre', './XplusY.DAT.pre')
# Open 'geometry.xyz.pre'
file_af = open('double.xyz', 'w')
file_be = open('geometry.xyz.pre', 'r')
first_row = True
for row in file_be:
if (first_row):
row = f'{molecule.nat_qm * 2}\n'
first_row = False
file_af.write(row)
file_be.close()
# Open 'geometry.xyz'
file_be = open('geometry.xyz', 'r')
iline = 1
for row in file_be:
if (iline > 2):
file_af.write(row)
iline += 1
file_be.close()
file_af.close()
os.system("xyz2gen double.xyz")
# Make 'dftb_in.hsd' file
input_dftb = ""
# Geometry Block
input_geom = textwrap.dedent(f"""\
Geometry = GenFormat{{
<<< 'geometry.gen'
}}
""")
input_dftb += input_geom
# Hamiltonian Block
input_ham_init = textwrap.dedent(f"""\
Hamiltonian = DFTB{{
""")
input_dftb += input_ham_init
# SCC-DFTB option
if (self.l_scc):
input_ham_scc = textwrap.indent(textwrap.dedent(f"""\
SCC = Yes
SCCTolerance = {self.scc_tol}
MaxSCCIterations = {self.scc_max_iter}
Mixer = {self.mixer}{{}}
"""), " ")
input_dftb += input_ham_scc
# Onsite-corrected DFTB (OC-DFTB) option
if (self.l_onsite):
onsite_const_uu = ("\n" + " " * 18).join([f" {itype}uu = {{ {onsite_uu[f'{itype}']} }}" for itype in self.atom_type])
onsite_const_ud = ("\n" + " " * 18).join([f" {itype}ud = {{ {onsite_ud[f'{itype}']} }}" for itype in self.atom_type])
input_ham_oc = textwrap.indent(textwrap.dedent(f"""\
OnsiteCorrection = {{
{onsite_const_uu}
{onsite_const_ud}
}}
"""), " ")
input_dftb += input_ham_oc
# Long-range corrected DFTB (LC-DFTB) option
if (self.l_range_sep):
input_ham_lc = textwrap.indent(textwrap.dedent(f"""\
RangeSeparated = LC{{
Screening = {self.lc_method}{{}}
}}
"""), " ")
input_dftb += input_ham_lc
# Spin-polarized DFTB option
if (self.l_spin_pol and molecule.nst == 1):
input_ham_spin = textwrap.dedent(f"""\
SpinPolarisation = Colinear{{
UnpairedElectrons = {self.unpaired_elec}
}}
""")
input_dftb += input_ham_spin
# Read atomic spin constants used in spin-polarized DFTB or TDDFTB
# TODO : Currently, allows only singlet excited states with TDDFTB
# if (self.l_spin_pol or self.ex_symmetry == "triplet"):
if (self.l_spin_pol and molecule.nst == 1):
if (self.l_range_sep):
spin_constant = ("\n" + " " * 18).join([f" {itype} = {{ {spin_w_lc[f'{itype}']} }}" for itype in self.atom_type])
else:
spin_constant = ("\n" + " " * 18).join([f" {itype} = {{ {spin_w[f'{itype}']} }}" for itype in self.atom_type])
input_ham_spin_w = textwrap.indent(textwrap.dedent(f"""\
SpinConstants = {{
ShellResolvedSpin = Yes
{spin_constant}
}}
"""), " ")
input_dftb += input_ham_spin_w
# Read 'charges.bin' from previous step
if (self.guess == "read"):
if (istep == -1):
if (os.path.isfile(self.guess_file)):
# Copy guess file to currect directory
shutil.copy(self.guess_file, os.path.join(self.scr_qm_dir, "charges.bin"))
restart = "Yes"
else:
restart = "No"
elif (istep >= 0):
# Move previous file to currect directory
os.rename("../charges.bin.pre", "./charges.bin")
restart = "Yes"
elif (self.guess == "h0"):
restart = "No"
# Read 'charges.bin' for surface hopping dynamics when hop occurs
if (calc_force_only):
restart = "Yes"
input_ham_restart = textwrap.indent(textwrap.dedent(f"""\
ReadInitialCharges = {restart}
"""), " ")
input_dftb += input_ham_restart
# TODO: for QM/MM, point_charge??
if (self.l_periodic):
num_k_point = np.sum(self.k_point)
if (num_k_point == 3):
# gamma-point sampling
input_ham_periodic = textwrap.indent(textwrap.dedent(f"""\
KPointsAndWeights = {{
0.0 0.0 0.0 1.0
}}
"""), " ")
else:
# K-point sampling
shift_vector = [0.5 if (ik % 2 == 0) else 0 for ik in self.k_point]
input_ham_periodic = textwrap.indent(textwrap.dedent(f"""\
KPointsAndWeights = SupercellFolding{{
{self.k_point[0]} 0.0 0.0
0.0 {self.k_point[1]} 0.0
0.0 0.0 {self.k_point[2]}
{shift_vector[0]} {shift_vector[1]} {shift_vector[2]}
}}
"""), " ")
input_dftb += input_ham_periodic
angular_momentum = ("\n" + " " * 10).join([f" {itype} = '{max_l[f'{itype}']}'" for itype in self.atom_type])
input_ham_basic = textwrap.dedent(f"""\
Charge = {molecule.charge}
Filling = Fermi{{
Temperature[K] = {self.elec_temp}
}}
MaxAngularMomentum = {{
{angular_momentum}
}}
SlaterKosterFiles = Type2FileNames{{
Prefix = '{self.sk_path}'
Separator = '-'
Suffix = '.skf'
LowerCaseTypeName = No
}}
}}
""")
input_dftb += input_ham_basic
# Analysis Block
input_analysis = textwrap.dedent(f"""\
Analysis = {{
CalculateForces = Yes
WriteBandOut = Yes
WriteEigenvectors = Yes
MullikenAnalysis = Yes
}}
""")
input_dftb += input_analysis
# Options Block
input_options = textwrap.dedent(f"""\
Options = {{
WriteDetailedXml = Yes
WriteDetailedOut = Yes
TimingVerbosity = -1
}}
""")
input_dftb += input_options
# ExcitedState Block
if (molecule.nst > 1):
# Calculate excited state force for target state
if (bo_list[0] > 0):
ex_force = "Yes"
rst = bo_list[0]
else:
ex_force = "No"
rst = bo_list[0] + 1
# Set number of excitations in TDDFTB
# This part can be modified by users
if (molecule.nat_qm <= 5):
num_ex = molecule.nst + 2
elif (molecule.nat_qm > 5 and molecule.nat_qm <= 15):
num_ex = 2 * molecule.nst + 2
else:
num_ex = 3 * molecule.nst + 2
# Write XplusY data?
if (self.calc_coupling):
xpy = "Yes"
else:
xpy = "No"
input_excited = textwrap.dedent(f"""\
ExcitedState = Casida{{
NrOfExcitations = {num_ex}
StateOfInterest = {rst}
Symmetry = {self.ex_symmetry}
WriteTransitions = Yes
WriteSPTransitions = {xpy}
WriteMulliken = Yes
WriteXplusY = {xpy}
EnergyWindow [eV] = {self.e_window}
ExcitedStateForces = {ex_force}
}}
""")
input_dftb += input_excited
# ParserOptions Block
if (self.version == "19.1"):
parser_version = 7
elif (self.version == "20.1"):
parser_version = 8
input_parseroptions = textwrap.dedent(f"""\
ParserOptions = {{
ParserVersion = {parser_version}
}}
""")
input_dftb += input_parseroptions
# Parallel Block
if (self.mpi):
if (self.l_spin_pol and self.nthreads > 1):
groups = 2
else:
groups = 1
input_parallel = textwrap.dedent(f"""\
Parallel = {{
Groups = {groups}
UseOmpThreads = No
Blacs = BlockSize {{ 32 }}
}}
""")
input_dftb += input_parallel
# Write 'dftb_in.hsd.geom' file
file_name = f"dftb_in.hsd.geom.{bo_list[0]}"
with open(file_name, "w") as f:
f.write(input_dftb)
# Write 'dftb_in.hsd.double' file
if (self.calc_coupling and not calc_force_only and istep >= 0 and molecule.nst > 1):
# New input for dftb
input_dftb = ""
# Geometry Block
input_geom = textwrap.dedent(f"""\
Geometry = GenFormat{{
<<< 'double.gen'
}}
""")
input_dftb += input_geom
input_dftb += input_ham_init
input_dftb += input_ham_basic
# Options Block
input_options = textwrap.dedent(f"""\
Options = {{
WriteDetailedXml = Yes
WriteDetailedOut = Yes
WriteHS = Yes
TimingVerbosity = -1
}}
""")
input_dftb += input_options
file_name = "dftb_in.hsd.double"
with open(file_name, "w") as f:
f.write(input_dftb)
def run_QM(self, molecule, base_dir, istep, bo_list, calc_force_only):
""" Run (TD)DFTB calculation and save the output files to qm_log directory
:param object molecule: Molecule object
:param string base_dir: Base directory
:param integer istep: Current MD step
:param integer,list bo_list: List of BO states for BO calculation
:param boolean calc_force_only: Logical to decide whether calculate force only
"""
# Set run command
qm_command = os.path.join(self.qm_path, "dftb+")
if (self.mpi):
# MPI setting
os.environ["OMP_NUM_THREADS"] = "1"
mpi_command = os.path.join(self.mpi_path, "mpirun")
command = f"{mpi_command} -np {self.nthreads} {qm_command} > log"
else:
# OpenMP setting
os.environ["OMP_NUM_THREADS"] = f"{self.nthreads}"
command = f"{qm_command} > log"
# Run DFTB+ for calculation of overlap matrix
if (self.calc_coupling and not calc_force_only and istep >= 0 and molecule.nst > 1):
shutil.copy("dftb_in.hsd.double", "dftb_in.hsd")
os.system(command)
# Copy dftb_in.hsd for target state
file_name = f"dftb_in.hsd.geom.{bo_list[0]}"
shutil.copy(file_name, "dftb_in.hsd")
# Run DFTB+ method for molecular dynamics
os.system(command)
# Copy detailed.out for target state
file_name = f"detailed.out.{bo_list[0]}"
shutil.copy("detailed.out", file_name)
# Copy the output file to 'qm_log' directory
tmp_dir = os.path.join(base_dir, "qm_log")
if (os.path.exists(tmp_dir)):
detailed_out_step = f"detailed.out.{istep + 1}.{bo_list[0]}"
shutil.copy("detailed.out", os.path.join(tmp_dir, detailed_out_step))
log_step = f"log.{istep + 1}.{bo_list[0]}"
shutil.copy("log", os.path.join(tmp_dir, log_step))
def extract_QM(self, molecule, base_dir, istep, bo_list, dt, calc_force_only):
""" Read the output files to get BO information
:param object molecule: Molecule object
:param string base_dir: Base directory
:param integer istep: Current MD step
:param integer,list bo_list: List of BO states for BO calculation
:param double dt: Time interval
:param boolean calc_force_only: Logical to decide whether calculate force only
"""
# Read 'detailed.out' file
# TODO: the qmmm information is written in this file
file_name = f"detailed.out.{bo_list[0]}"
with open(file_name, "r") as f:
detailed_out = f.read()
# Read 'EXC.DAT' file
if (molecule.nst > 1):
file_name = "EXC.DAT"
with open(file_name, "r") as f:
exc_out = f.read()
# Energy
if (not calc_force_only):
energy = re.findall('Total energy:\s+([-]\S+) H', detailed_out)
energy = np.array(energy[0], dtype=np.float64)
molecule.states[0].energy = energy
if (molecule.nst > 1):
tmp_e = f'[=]+\n' + ('\s+([-]*\S+)\s+\S+\s+\d+\s+->\s+\d+\s+\S+\s+\S+\s+[ST]') * molecule.nst
energy = re.findall(tmp_e, exc_out)
energy = np.array(energy[0], dtype=np.float64)
energy *= eV_to_au
for ist in range(1, molecule.nst):
molecule.states[ist].energy = molecule.states[0].energy + energy[ist - 1]
# Force
tmp_f = 'Total Forces' + '\n\s+\d*\s+([-]*\S+)\s+([-]*\S+)\s+([-]*\S+)' * molecule.nat_qm
force = re.findall(tmp_f, detailed_out)
force = np.array(force[0], dtype=np.float64)
force = force.reshape(molecule.nat_qm, 3, order='C')
molecule.states[bo_list[0]].force = np.copy(force)
# NACME
if (self.calc_coupling and not calc_force_only):
if (istep >= 0):
self.CI_overlap(molecule, istep, dt)
def CI_overlap(self, molecule, istep, dt):
""" Read the necessary files and calculate NACME from tdnac.c routine,
note that only reading of several files is required in this method
:param object molecule: Molecule object
:param integer istep: Current MD step
:param double dt: Time interval
"""
# Read upper right block of 'oversqr.dat' file (< t | t+dt >)
file_name_in = "oversqr.dat"
self.ao_overlap = np.zeros((self.nbasis, self.nbasis))
with open(file_name_in, "r") as f_in:
lines = f_in.readlines()
row = 0
iline = 0
for line in lines:
# Skip first five lines and read upper block
if (iline in range(5, 5 + self.nbasis)):
col = 0
count = False
field = line.split()
for element in field:
# Read right block
if (count):
ind_a = self.check_basis[row]
ind_b = self.check_basis[col]
if (ind_a == ind_b):
# Choose onsite (same-atom) block
# Sometimes NaN or too large values appear in the onsite block due to the slater-koster file
# The values set to 1 or 0 regardless of original elements
if (row == col):
# Diagonal element in onsite block
new_val = 1.
else:
# Off-diagonal element in onsite block
new_val = 0.
else:
# Choose offsite (different-atom) block
new_val = float(element)
# Set overlap matrix element
self.ao_overlap[row, col] = new_val
col += 1
# Read right block
if (col > self.nbasis - 1):
col -= self.nbasis
count = True
row += 1
iline += 1
# np.savetxt("test-over", self.ao_overlap, fmt=f"%6.3f")
# Read 'eigenvec.bin.pre' file at time t
if (istep == 0):
file_name_in = "eigenvec.bin.pre"
self.mo_coef_old = np.zeros((self.norb, self.nbasis))
with open(file_name_in, "rb") as f_in:
dummy = np.fromfile(f_in, dtype=np.int32, count=1)
for iorb in range(self.norb):
dummy = np.fromfile(f_in, dtype=np.int32, count=1)
data = np.fromfile(f_in, dtype=np.float64, count=self.nbasis)
self.mo_coef_old[iorb] = data
# np.savetxt("test-mo1", self.mo_coef_old, fmt=f"%12.6f")
# Read 'eigenvec.bin' file at time t + dt
file_name_in = "eigenvec.bin"
self.mo_coef_new = np.zeros((self.norb, self.nbasis))
with open(file_name_in, "rb") as f_in:
dummy = np.fromfile(f_in, dtype=np.int32, count=1)
for iorb in range(self.norb):
dummy = np.fromfile(f_in, dtype=np.int32, count=1)
data = np.fromfile(f_in, dtype=np.float64, count=self.nbasis)
self.mo_coef_new[iorb] = data
# np.savetxt("test-mo2", self.mo_coef_new, fmt=f"%12.6f")
# The CI coefficients are arranged in order of single-particle excitations
# Read 'SPX.DAT.pre' file at time t
if (istep == 0):
file_name_in = "SPX.DAT.pre"
with open(file_name_in, "r") as f_in:
lines = f_in.readlines()
# Dimension for CI coefficients (number of excitations)
ndim_old = int(lines[-2].strip().split()[0])
get_wij_ind_old = np.zeros((ndim_old, 2), dtype=np.int32)
iline = 0
for line in lines:
# Skip first five lines
if (iline in range(5, 5 + ndim_old)):
# Column information: 1st = index, 4th = occ(i), 6th = virt(a)
field = line.split()
# Determine new limits for for-loops
if (int(field[5]) > self.orb_final[0]):
self.orb_final[0] = int(field[5])
if (int(field[3]) < self.orb_ini[0] + 1):
self.orb_ini[0] = int(field[3]) - 1
get_wij_ind_old[int(field[0]) - 1] = [int(field[3]), int(field[5])]
iline += 1
# Read 'SPX.DAT' file at time t + dt
file_name_in = "SPX.DAT"
with open(file_name_in, "r") as f_in:
lines = f_in.readlines()
# Dimension for CI coefficients (number of excitations)
ndim = int(lines[-2].strip().split()[0])
get_wij_ind_new = np.zeros((ndim, 2), dtype=np.int32)
iline = 0
for line in lines:
# Skip first five lines
if (iline in range(5, 5 + ndim)):
# Column information: 1st = index, 4th = occ(i), 6th = virt(a)
field = line.split()
if (int(field[5]) > self.orb_final[0]):
self.orb_final[0] = int(field[5])
if (int(field[3]) < self.orb_ini[0] + 1):
self.orb_ini[0] = int(field[3]) - 1
get_wij_ind_new[int(field[0]) - 1] = [int(field[3]), int(field[5])]
iline += 1
# Read 'XplusY.DAT.pre' file at time t
if (istep == 0):
file_name_in = "XplusY.DAT.pre"
self.ci_coef_old = np.zeros((molecule.nst, self.nocc, self.nvirt))
with open(file_name_in, "r") as f_in:
lines = f_in.readlines()
iline = 0
for line in lines:
if (iline == 0):
field = line.split()
assert (int(field[0]) == ndim_old)
assert (int(field[1]) >= molecule.nst - 1)
# nxply is number of lines for each excited state in 'XplusY.dat'
nxply = int(ndim_old / 6) + 1
if (ndim_old % 6 != 0):
nxply += 1
else:
field = line.split()
if (iline % nxply == 1):
ind = 0
ist = int(field[0]) - 1
# In general, TDDFTB calculate the excited states more than molecule.nst,
# so we do not need to read all data for 'XplusY.DAT'
if (ist == molecule.nst - 1):
break
else:
# Currently, elements for CI coefficients for S0 state are zero (not used values)
for element in field:
ind_occ = get_wij_ind_old[ind, 0] - 1
ind_virt = get_wij_ind_old[ind, 1] - self.nocc - 1
self.ci_coef_old[ist + 1, ind_occ, ind_virt] = float(element)
ind += 1
iline += 1
# np.savetxt("test-ci1", self.ci_coef_old[1], fmt=f"%12.6f")
# Read 'XplusY.DAT' file at time t + dt
file_name_in = "XplusY.DAT"
self.ci_coef_new = np.zeros((molecule.nst, self.nocc, self.nvirt))
with open(file_name_in, "r") as f_in:
lines = f_in.readlines()
iline = 0
for line in lines:
if (iline == 0):
field = line.split()
assert (int(field[0]) == ndim)
assert (int(field[1]) >= molecule.nst - 1)
# nxply is number of lines for each excited state in 'XplusY.dat'
nxply = int(ndim / 6) + 1
if (ndim % 6 != 0):
nxply += 1
else:
field = line.split()
if (iline % nxply == 1):
ind = 0
ist = int(field[0]) - 1
# In general, TDDFTB calculate the excited states more than molecule.nst,
# so we do not need to read all data for 'XplusY.DAT'
if (ist == molecule.nst - 1):
break
else:
# Currently, elements for CI coefficients for S0 state are zero (not used values)
for element in field:
ind_occ = get_wij_ind_new[ind, 0] - 1
ind_virt = get_wij_ind_new[ind, 1] - self.nocc - 1
self.ci_coef_new[ist + 1, ind_occ, ind_virt] = float(element)
ind += 1
iline += 1
# np.savetxt("test-ci2", self.ci_coef_new[1], fmt=f"%12.6f")
# Calculate wavefunction overlap with orbital scheme
# Reference: J. Phys. Chem. Lett. 2015, 6, 4200-4203
wf_overlap(self, molecule, istep, dt)
``` |
{
"source": "jkhebel/InsightProject",
"score": 2
} |
#### File: jkhebel/InsightProject/MorFE.py
```python
import click
import logging
import yaml
import time
from tqdm import tqdm
import numpy as np
import torch
import torchvision
from skimage.metrics import mean_squared_error as mse
import matplotlib
matplotlib.use("Agg")
from dataset import HCSData
from models import VAE_fm
@click.group()
@click.option('--debug/--no-debug', default=False)
@click.option("--dataset", type=click.Path(exists=True), required=True)
@click.pass_context
def cli(ctx, debug, dataset):
with open("./configs/default_params.yml", 'r') as f:
ctx = yaml.load(f, Loader=yaml.FullLoader)
ctx.ensure_object(dict)
ctx['dataset'] = dataset
logging.basicConfig(level=logging.INFO)
click.echo(f"Debug mode is {'on' if debug else 'off'}")
if debug:
logging.getLogger().setLevel(logging.DEBUG)
@cli.command()
@click.pass_context
def train(ctx):
click.echo('Training')
@cli.command()
@click.pass_context
def train_vae(ctx):
pass
@cli.command()
@click.pass_context
def classify(ctx):
pass
@cli.command()
@click.pass_context
def extract_features(ctx):
logging.debug(f"Dataset: {dataset}")
logging.debug(f"Debug: {debug}")
logging.debug(f"Epochs: {epochs}")
logging.debug(f"Batch Size: {batch_size}")
logging.debug(f"Maximum batches per epoch: {max_batches}")
logging.debug(f"Test-train split: {split*100}%")
logging.debug(f"Base features: {n_base_features}")
logging.debug(f"Latent features: {n_latent_features}")
logging.debug(f"VAE Layers: {n_layers}")
# TODO: define experiment name, make exp dir under predictions dir
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data = HCSData.from_csv(dataset) # Load dataset
test_loader = torch.utils.data.DataLoader( # Generate a testing loader
data, batch_size=batch_size, shuffle=False)
net = VAE_fm(lf=n_latent_features, base=n_base_features) # TODO: load from .pt
logging.debug(net)
if torch.cuda.device_count() > 1: # If multiple gpu's
net = torch.nn.DataParallel(net) # Parallelize
net.to(device) # Move model to devic
try:
for epoch in range(epochs): # Iterate through epochs
with torch.no_grad():
for bn, (X, _) in tqdm(enumerate(test_loader), total=max_batches):
x = X.to(device)
o, u, logvar = net(x)
X = x.cpu().detach().numpy()
O = o.cpu().detach.numpy()
err = mse(X, O)
tqdm.write(err) # TODO: Format nicely
# TODO: save feature maps (u, o) and predictions to exp dir
except (KeyboardInterrupt, SystemExit):
print("Session interrupted.")
if __name__ == '__main__':
cli(obj={})
``` |
{
"source": "jkhenning/autokeras",
"score": 2
} |
#### File: autokeras/hypermodels/wrapper_test.py
```python
import kerastuner
import tensorflow as tf
import autokeras as ak
from autokeras import adapters
from autokeras import graph as graph_module
from autokeras.hypermodels import wrapper
from tests import utils
def test_image_block():
block = wrapper.ImageBlock(normalize=None, augment=None)
hp = kerastuner.HyperParameters()
block = graph_module.deserialize(graph_module.serialize(block))
block.build(hp, ak.ImageInput(shape=(32, 32, 3)).build())
assert utils.name_in_hps('block_type', hp)
assert utils.name_in_hps('normalize', hp)
assert utils.name_in_hps('augment', hp)
def test_text_block():
block = wrapper.TextBlock()
hp = kerastuner.HyperParameters()
block = graph_module.deserialize(graph_module.serialize(block))
block.build(hp, ak.TextInput(shape=(1,)).build())
assert utils.name_in_hps('vectorizer', hp)
def test_structured_data_block():
block = wrapper.StructuredDataBlock()
block.column_names = ['0', '1']
block.column_types = {
'0': adapters.CATEGORICAL,
'1': adapters.CATEGORICAL,
}
hp = kerastuner.HyperParameters()
block = graph_module.deserialize(graph_module.serialize(block))
block.column_names = ['0', '1']
block.column_types = {
'0': adapters.CATEGORICAL,
'1': adapters.CATEGORICAL,
}
output = block.build(hp, ak.StructuredDataInput(shape=(2,)).build())
assert isinstance(output, tf.Tensor)
def test_timeseries_block():
block = wrapper.TimeseriesBlock()
hp = kerastuner.HyperParameters()
block.column_names = ['0', '1']
block.column_types = {
'0': adapters.NUMERICAL,
'1': adapters.NUMERICAL,
}
block = graph_module.deserialize(graph_module.serialize(block))
block.column_names = ['0', '1']
block.column_types = {
'0': adapters.NUMERICAL,
'1': adapters.NUMERICAL,
}
output = block.build(hp, ak.TimeseriesInput(shape=(32,), lookback=2).build())
assert isinstance(output, tf.Tensor)
``` |
{
"source": "jkhenning/ignite",
"score": 3
} |
#### File: examples/mnist/mnist_with_tensorboard.py
```python
from argparse import ArgumentParser
import torch
from torch.utils.data import DataLoader
from torch import nn
import torch.nn.functional as F
from torch.optim import SGD
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize
try:
from tensorboardX import SummaryWriter
except ImportError:
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise RuntimeError(
"This module requires either tensorboardX or torch >= 1.2.0. "
"You may install tensorboardX with command: \n pip install tensorboardX \n"
"or upgrade PyTorch using your package manager of choice (pip or conda)."
)
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True
)
val_loader = DataLoader(
MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False
)
return train_loader, val_loader
def run(train_batch_size, val_batch_size, epochs, lr, momentum, log_interval, log_dir):
train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)
model = Net()
writer = SummaryWriter(log_dir=log_dir)
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
model.to(device) # Move model before creating optimizer
optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)
criterion = nn.NLLLoss()
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
val_metrics = {"accuracy": Accuracy(), "nll": Loss(criterion)}
evaluator = create_supervised_evaluator(model, metrics=val_metrics, device=device)
@trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
def log_training_loss(engine):
print(
f"Epoch[{engine.state.epoch}] Iteration[{engine.state.iteration}/{len(train_loader)}] "
f"Loss: {engine.state.output:.2f}"
)
writer.add_scalar("training/loss", engine.state.output, engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Training Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("training/avg_accuracy", avg_accuracy, engine.state.epoch)
@trainer.on(Events.EPOCH_COMPLETED)
def log_validation_results(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics["accuracy"]
avg_nll = metrics["nll"]
print(
f"Validation Results - Epoch: {engine.state.epoch} Avg accuracy: {avg_accuracy:.2f} Avg loss: {avg_nll:.2f}"
)
writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
writer.add_scalar("valdation/avg_accuracy", avg_accuracy, engine.state.epoch)
# kick everything off
trainer.run(train_loader, max_epochs=epochs)
writer.close()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument(
"--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)"
)
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)")
parser.add_argument(
"--log_interval", type=int, default=10, help="how many batches to wait before logging training status"
)
parser.add_argument(
"--log_dir", type=str, default="tensorboard_logs", help="log directory for Tensorboard log output"
)
args = parser.parse_args()
run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum, args.log_interval, args.log_dir)
```
#### File: code/dataflow/transforms.py
```python
from typing import Type, Callable
import torch
def denormalize(t, mean, std, max_pixel_value=255):
assert isinstance(t, torch.Tensor), f"{type(t)}"
assert t.ndim == 3
d = t.device
mean = torch.tensor(mean, device=d).unsqueeze(-1).unsqueeze(-1)
std = torch.tensor(std, device=d).unsqueeze(-1).unsqueeze(-1)
tensor = std * t + mean
tensor *= max_pixel_value
return tensor
```
#### File: ignite/base/mixins.py
```python
from collections import OrderedDict
from collections.abc import Mapping
class Serializable:
_state_dict_all_req_keys = () # type: tuple
_state_dict_one_of_opt_keys = () # type: tuple
def state_dict(self) -> OrderedDict:
pass
def load_state_dict(self, state_dict: Mapping) -> None:
if not isinstance(state_dict, Mapping):
raise TypeError(f"Argument state_dict should be a dictionary, but given {type(state_dict)}")
for k in self._state_dict_all_req_keys:
if k not in state_dict:
raise ValueError(
f"Required state attribute '{k}' is absent in provided state_dict '{state_dict.keys()}'"
)
opts = [k in state_dict for k in self._state_dict_one_of_opt_keys]
if len(opts) > 0 and ((not any(opts)) or (all(opts))):
raise ValueError(f"state_dict should contain only one of '{self._state_dict_one_of_opt_keys}' keys")
``` |
{
"source": "JKHHai/galapagos",
"score": 4
} |
#### File: middleware/python/abstractDict.py
```python
import warnings
class abstractDict():
'''
This class defines a dictionary with the additional enforcement of the
existence of keys as stated in the mandatory_array and optional keys that
may be included in optional_array.
'''
def __init__(self, mandatory_array, optional_array, **kwargs):
'''
Initializes the abstract dictionary
Args:
mandatory_array (list): list of keys that must exist in this dict
optional_array (list): keys that may be optionally in this dict
Raises:
ValueError: Raised if unknown key is specified
'''
self.data = {}
for mandatory_elem in mandatory_array:
self.data[mandatory_elem] = None
for optional_elem in optional_array:
self.data[optional_elem] = None
for key, value in kwargs.items():
if key in self.data:
self.data[key] = value
else:
raise ValueError('Init with ' + key + ' failed. Key does not exist')
self.check_elements(mandatory_array, optional_array)
def check_elements(self, mandatory_array, optional_array):
'''
Checks the initialized dictionary to enforce that all mandatory keys exist.
It issues a warning for optional keys that may be missing.
Args:
mandatory_array (list): keys that must exist in this dict
optional_array (list): keys that may optionally exist in this dict
Raises:
ValueError: Raised if a mandatory key is missing
'''
for mandatory_elem in mandatory_array:
if not(mandatory_elem in self.data):
raise ValueError('Mandatory ' + mandatory_elem + ' must exist')
for optional_elem in optional_array:
if not(optional_elem in self.data):
warnings.warn('Optional elem ' + optional_elem + ' does not exist')
def __setitem__(self, key, item):
self.data[key] = item
def __getitem__(self, key):
if key in self.data:
return self.data[key]
else:
raise ValueError('Key \"' + key + '\" not found')
def __contains__(self, key):
return self.data[key]
def __str__(self):
print(self.data)
```
#### File: middleware/python/app_bridge.py
```python
import warnings
from abstractDict import abstractDict
class appBridge(abstractDict):
def __init__(self, **kwargs):
self.cycle_count = 0
mandatory_array = ('to_net', 'from_net', 'to_app', 'from_app','name','num','clk','aresetn')
optional_array = ('vendor', 'lib', 'version', 's_axis', 'm_axis', 's_axi', 'm_axi', 'wire_master', 'wire_slave', 'const', 'properties')
super().__init__(mandatory_array, optional_array, **kwargs)
array_of_arrays = ['clk', 'aresetn', 'm_axi', 's_axi', 'm_axis', 's_axis', 'wire_slave', 'wire_master', 'const', 'properties']
for elem in array_of_arrays:
if type(self.data[elem]) != type([]) and self.data[elem] != None:
self.data[elem] = [self.data[elem]]
```
#### File: middleware/python/galapagosNet.py
```python
import sys
import os
import math
import inspect
from sonar.testbench import Testbench, Module, TestVector, Thread
from sonar.interfaces import AXIS
from sonar.generators import Ethernet
class GalapagosNet:
def __init__(self, parameters):
if 'mac_table' not in parameters:
raise ValueError('Mac table must exist')
else:
if type(parameters['mac_table']) != type({}):
raise ValueError('Mac Table must be dictionary')
else:
self.macTable = parameters['mac_table']
if not('rank' in parameters):
raise ValueError('Rank not in mac table')
else:
self.macAddr = self._getMacAddr(parameters['rank'])
self.rank = parameters['rank']
if self.macAddr == None:
raise ValueError('Rank not in mac table')
if not('comm' in parameters):
self.comm = 'ethernet'
else:
self.comm = parameters['comm']
if not('mode' in parameters):
self.mode = 'sim'
elif parameters['mode'] == 'impl':
self.mode = 'impl'
else:
self.mode = 'sim'
if self.mode == 'sim':
self._makeSimModel()
#else init servers for tcp if using tcp
# galapagos ports
#--------------------------------------
# clocks
#--------------------------------------
# clk (stream clock) 156.25 MHz
# mem_sys_clk_p (mem_diff clock p) 333 MHz
#--------------------------------------
# resets
#--------------------------------------
# sys_resetn
#--------------------------------------
# streams
#--------------------------------------
# input stream
#--------------------------------------
# [7:0] stream_in_keep
# stream_in_last
# [63:0] stream_in_data
# stream_in_valid
# stream_in_ready
#--------------------------------------
# output stream
#--------------------------------------
# [7:0] stream_out_keep
# stream_out_last
# [63:0] stream_out_data
# stream_out_valid
# stream_out_ready,
#--------------------------------------
# output mem_ready -> when memory is calibrated
def _makeSimModel(self):
self.tb = Testbench.default('top_sim')
self.dut = Module.default("DUT")
self.tb.add_module(self.dut)
self.dut.add_clock_port("clk", "6.25ns")
self.dut.add_clock_port("mem_sys_clk_p", "3ns")
self.dut.add_reset_port("sys_resetn")
self.axis_in = AXIS("stream_in", "slave", "clk")
self.axis_in.port.init_channels('default', 64)
self.axis_out = AXIS("stream_out", "master", "clk")
self.axis_out.port.init_channels('default', 64)
self.dut.add_interface(self.axis_in)
self.dut.add_interface(self.axis_out)
self.dut.add_port("mem_ready", size=1, direction="output")
self.reset_thread = Thread()
self.reset_thread.wait_negedge('clk')
self.reset_thread.init_signals()
self.reset_thread.add_delay('25ns') #T*4
#reset the system
self.reset_thread.set_signal('sys_resetn', 1)
#wait for memory calibration
self.reset_thread.wait_level('mem_ready == $value', value=1)
self.tv = TestVector()
self.tv.add_thread(self.reset_thread)
def _make_tv(self):
thread = self.tv.add_thread()
thread.add_delay('100ns')
def _close_sim(self):
self.tb.add_test_vector(self.tv)
cwd = os.getcwd()
self.tb.generateTB(cwd + "/build/", "sv")
def start(self):
if self.mode == 'sim':
self._make_tv()
def stop(self):
if self.mode == 'sim':
self._close_sim()
def _getMacAddr(self, rank):
macAddr = None
for mac_entry in self.macTable:
if self.macTable[mac_entry] == rank:
macAddr = mac_entry
break
return macAddr
def waitForHeader(self, dest):
if self.mode == 'sim':
thread = self.tv.add_thread()
if self.comm == 'ethernet':
macAddrDst = self._getMacAddr(dest)
ethernet = Ethernet(macAddrDst, self.macAddr, "0x7400")
ethernet.prefix = dest
ethernet.wait_for_header(thread, self.axis_in, endian='little')
def binToStream(self, binData, dest):
if self.mode == 'sim':
thread = self.tv.add_thread()
if self.comm == 'ethernet':
macAddrDst = self._getMacAddr(dest)
ethernet = Ethernet(macAddrDst, self.macAddr, "0x7400")
ethernet.prefix = dest
ethernet.bin_to_stream(thread, self.axis_in, binData)
#else sim tcp/ip
#else call cpu library
#test for this module
if __name__=="__main__":
#writing random garbage data to test.txt
with open("test_axis.bin", "wb") as binary_file:
num_bytes_written = binary_file.write(b'\xDE\xAD\xBE\xEF\xFA\xCE\xFA\xCE')
num_bytes_written = binary_file.write(b'\x11\x22\x33\x44\x55\x66\x77\x88')
num_bytes_written = binary_file.write(b'\x00\xaa\xbb\xcc\xdd\xee\xff\x12')
num_bytes_written = binary_file.write(b'\x34\x56\x78')
#reading back random garbage data from test.txt
with open("test_axis.bin", "rb") as binary_file:
data = binary_file.read()
#now have data in byte array
dataArray = bytearray()
dataArray.extend(data)
rank0 = GalapagosNet({'comm': 'ethernet',
"mac_table": {"0x112233445566":"0x0001", "0xaabbccddeeff":"0x0000"},
"rank": "0x0000"
})
rank0.start()
rank0.binToStream(dataArray, "0x0001")
rank0.waitForHeader('0x0001')
rank0.stop()
```
#### File: middleware/python/tclFileGenerator.py
```python
import copy
import sys
import subprocess
import os
from tclMe import tclMeFile
import string
"""
Most of these functions are called (directly or indirectly) by makeTclFiles.
Each one takes care of one self-contained part of the TCL file generation.
"""
#interfaces constant
#creates the standard interfaces, same for all fpgas
def userApplicationRegionControlInst(tcl_user_app):
"""
Connects the AXI control interface from the shell (through an AXI interconnect)
to the various kernels in this FPGA (provided they declared control interfaces
in the logical file).
Args:
tcl_user_app: a tclMe object (which contains references to the FPGA's
node object and a handle to the output file)
"""
#initialize axi_control_interface interconnect slave side (1 slave)
num_ctrl_interfaces = len(getInterfaces(tcl_user_app.fpga, 's_axi', 'scope', 'global'))
# extra interfaces for the memories containing addresses in this mode
if tcl_user_app.fpga['comm'] == 'raw':
num_ctrl_interfaces = num_ctrl_interfaces + 2
#make dummy bram for control interface if no control interfaces
if(num_ctrl_interfaces == 0):
tcl_user_app.instBlock(
{'name':'axi_vip',
'inst':'applicationRegion/axi_vip_ctrl',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.PROTOCOL {AXI4LITE}', 'CONFIG.INTERFACE_MODE {SLAVE}']
}
)
tcl_user_app.makeConnection(
'intf',
{
'name':None,
'type':'intf_port',
'port_name':'S_AXI_CONTROL'
},
{'name':'applicationRegion/axi_vip_ctrl',
'type':'intf',
'port_name':'S_AXI'
}
)
else:
#tcl_user_app.instBlock(
# {'name':'smartconnect',
# 'inst':'applicationRegion/axi_interconnect_ctrl',
# 'clks':['aclk'],
# 'resetns':['aresetn'],
# 'properties':['CONFIG.NUM_SI {1}',
# 'CONFIG.NUM_MI {' + str(num_ctrl_interfaces) + '}']
# }
# )
#
inc_clks = ['ACLK', 'S00_ACLK']
inc_resetns = ['ARESETN', 'S00_ARESETN']
for inc_index in range(0, num_ctrl_interfaces):
inc_index_str = "%02d"%inc_index
inc_clks.append('M' + inc_index_str + '_ACLK')
inc_resetns.append('M' + inc_index_str + '_ARESETN')
tcl_user_app.instBlock(
{'name':'axi_interconnect',
'inst':'applicationRegion/axi_interconnect_ctrl',
'clks':inc_clks,
'resetns':inc_resetns,
'properties':['CONFIG.NUM_SI {1}',
'CONFIG.NUM_MI {' + str(num_ctrl_interfaces) + '}']
}
)
tcl_user_app.makeConnection(
'intf',
{
'name':None,
'type':'intf_port',
'port_name':'S_AXI_CONTROL'
},
{'name':'applicationRegion/axi_interconnect_ctrl',
'type':'intf',
'port_name':'S00_AXI'
}
)
def getInterfaces(fpga, intf, flag = None, scope = None):
"""
Helper function to get a list of interfaces of a particular type from all
the kernels in this particular node.
Args:
fpga: node object for this particular FPGA
intf (string): the type of itnerface to look for. For example, "s_axi"
flag: If specified, can ask the getInterfaces function to only match
certain interfaces. Right now, the only thing you can do with it
is set it to "scope" or leave it blank for no special behaviour
scope: If flag is set to "scope", this variable is the scope to look for
Can be "local" or "global", or left blank for no special behaviour
Returns:
An array of Python dicts, where each one is a subtree from the original
mapping file. Note that this also adds a 'kernel_inst' member to the
interface dict which contains a pointer to its parent kernel dict (this
is used in userApplicationRegionKernelConnectSwitches, among others)
"""
interfaces = []
# For each <kernel>...
for kern in fpga['kernel']:
#if global we can look for master or slave
# ^(Not sure what that comment means)
#if intf=='s_axis' and flag=='scope' and scope =='global':
# print('kernel is ' + str(kern.data))
# If this kernel has at least one <intf> tag, where intf is the string
# passed into this function...
if kern[intf] != None:
for kern_intf in kern[intf]:
# If we don't need a specific scope, match everything
if (scope == None):
kern_intf['kernel_inst'] = kern
interfaces.append(copy.deepcopy(kern_intf))
# Otherwise, only match the right scope
elif(flag == 'scope' and kern_intf['scope'] == scope):
kern_intf['kernel_inst'] = kern
interfaces.append(copy.deepcopy(kern_intf))
# (Not really sure what this is for, so I'm ignoring it)
elif(flag == 'debug' and 'debug' in kern_intf):
kern_intf['kernel_inst'] = kern
interfaces.append(copy.deepcopy(kern_intf))
#if intf=='s_axis' and flag=='scope' and scope =='global':
# print("interfaces returned are " + str(interfaces))
return interfaces
def strCompare(s1, s2):
s1 = s1.replace(" ", "")
s1 = s1.replace("\t", "")
s1 = s1.replace("\n", "")
s2 = s2.replace(" ", "")
s2 = s2.replace("\t", "")
s2 = s2.replace("\n", "")
return s1 == s2
def getSlaveInterfaces(fpga, intf, master):
"""
Gets all the particular type (s_axi, s_axis, or wire_slave) of slave interfaces for a given FPGA
Args:
fpga: The node object for this FPGA
intf (string): The type of interface to look for
master: The information associated with a particular master interface.
This will be a Python dict built by parsing the XML/JSON file,
but it is augmented with some extra stuff by the cluster __init__
function. Specifically, it will be a pointer from the kernels[]
member var in the cluster object (and even more specifically, the
fpga parameter to this function is actually a pointer to a member
of the nodes[] array of the cluster object, which itself contains
pointers to members of the kernels[] array).
"""
interfaces = []
# First get all the interfaces that could connect to this master
slave_array = getInterfaces(fpga, intf, 'scope', 'local')
for slave in slave_array:
#print ("slave num " + slave['master']['num'])
if ( (int(slave['master']['num']) == int(master['kernel_inst']['num'])) and strCompare(slave['master']['port'], master['name'])):
interfaces.append(copy.deepcopy(slave))
return interfaces
def userApplicationRegionMemInstLocal(tcl_user_app):
"""
For locally connected memory mapped slaves. This instantiates a local axi interconnecte
between the master and the slaves, all within the given FPGA.
Args:
tcl_user_app: a tclMe object (which contains references to the FPGA's
node object and a handle to the output file)
"""
m_axi_array = getInterfaces(tcl_user_app.fpga, 'm_axi', 'scope', 'local')
for m_axi in m_axi_array:
s_axi_array = getSlaveInterfaces(tcl_user_app.fpga, 's_axi', m_axi)
inc_clks = ['ACLK', 'S00_ACLK']
inc_resetns = ['ARESETN', 'S00_ARESETN']
for inc_index in range(0, len(s_axi_array)):
inc_index_str = "%02d"%inc_index
inc_clks.append('M' + inc_index_str + '_ACLK')
inc_resetns.append('M' + inc_index_str + '_ARESETN')
tcl_user_app.instBlock(
{
'name':'axi_interconnect',
'inst': m_axi['kernel_inst']['inst'] + '_' + m_axi['name'] + '_inc_inst',
'clks': inc_clks,
'resetns': inc_resetns,
'properties':['CONFIG.NUM_SI {1}', 'CONFIG.NUM_MI {' + str(len(s_axi_array)) + '}']
}
)
tcl_user_app.makeConnection(
'intf',
{
'name': m_axi['kernel_inst']['inst'],
'type':'intf',
'port_name': m_axi['name']
},
{'name': m_axi['kernel_inst']['inst'] + '_' + m_axi['name'] + '_inc_inst' ,
'type':'intf',
'port_name': 'S00_AXI'
}
)
for s_axi_idx, s_axi in enumerate(s_axi_array):
s_axi_idx_str = "%02d"%s_axi_idx
tcl_user_app.makeConnection(
'intf',
{
'name': m_axi['kernel_inst']['inst'] + '_' + m_axi['name'] + '_inc_inst',
'type':'intf',
'port_name':'M' + s_axi_idx_str + '_AXI'
},
{'name':s_axi['kernel_inst']['inst'],
'type':'intf',
'port_name': s_axi['name']
}
)
def userApplicationRegionMemInstGlobal(tcl_user_app, shared):
"""
Connects the kernels' AXI master port to the shell's off-chip memory
controller. Also instantiates an AXI interconnect if there is more than
one person trying to use the off-chip memory.
Args:
tcl_user_app: a tclMe object (which contains references to the FPGA's
node object and a handle to the output file)
shared: I'm not sure what this does, exactly.
"""
num_mem_interfaces = len(getInterfaces(tcl_user_app.fpga, 'm_axi', 'scope', 'global'))
inc_clks = ['ACLK', 'M00_ACLK']
inc_resetns = ['ARESETN', 'M00_ARESETN']
if (num_mem_interfaces > 0):
if shared:
properties = ['CONFIG.NUM_MI {2}']
inc_clks.append('M01_ACLK')
inc_resetns.append('M01_ARESETN')
else:
properties = ['CONFIG.NUM_MI {1}']
#MAKES SMARTCONNECT
#DOESN'T PLAY WELL WITH ENCRYPTED CORES, REPLACING WITH INTERCONNECT
properties.append('CONFIG.NUM_SI {' + str(num_mem_interfaces) + '}')
# adds an interface for the second memory interface to be added
if shared:
properties.append('CONFIG.NUM_MI {2}')
#
# tcl_user_app.instBlock(
# {
# 'name':'smartconnect',
# 'inst':'applicationRegion/axi_interconnect_mem',
# 'clks':['aclk'],
# 'resetns':['aresetn'],
# 'properties':properties
# }
# )
#AXI INTERCONNECT
for inc_index in range(0, num_mem_interfaces):
inc_index_str = "%02d"%inc_index
inc_clks.append('S' + inc_index_str + '_ACLK')
inc_resetns.append('S' + inc_index_str + '_ARESETN')
# print('axi interconnect mem properties ' + str(properties))
enable_AXI_mem_interconnect = True
if 'custom' in tcl_user_app.fpga:
if tcl_user_app.fpga['custom'] == 'GAScore':
enable_AXI_mem_interconnect = False
if enable_AXI_mem_interconnect:
tcl_user_app.instBlock(
{
'name':'axi_interconnect',
'inst':'applicationRegion/axi_interconnect_mem',
'clks':inc_clks,
'resetns':inc_resetns,
'properties':properties
}
)
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/axi_interconnect_mem',
'type':'intf',
'port_name':'M00_AXI'
},
{'name':None,
'type':'intf_port',
'port_name':'S_AXI_MEM_0'
}
)
if shared:
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/axi_interconnect_mem',
'type':'intf',
'port_name':'M01_AXI'
},
{'name':None,
'type':'intf_port',
'port_name':'S_AXI_MEM_1'
}
)
else:
#no mem interface use VIP instead
tcl_user_app.instBlock(
{
'name':'axi_vip',
'inst':'applicationRegion/axi_vip_mem_0',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.INTERFACE_MODE {MASTER}', 'CONFIG.DATA_WIDTH {512}']
}
)
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/axi_vip_mem_0',
'type':'intf',
'port_name':'M_AXI'
},
{'name':None,
'type':'intf_port',
'port_name':'S_AXI_MEM_0'
}
)
if shared:
tcl_user_app.instBlock(
{
'name':'axi_vip',
'inst':'applicationRegion/axi_vip_mem_1',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.INTERFACE_MODE {MASTER}', 'CONFIG.DATA_WIDTH {512}']
}
)
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/axi_vip_mem_1',
'type':'intf',
'port_name':'M_AXI'
},
{
'name':None,
'type':'intf_port',
'port_name':'S_AXI_MEM_1'
}
)
def userApplicationRegionKernelsInst(tcl_user_app):
"""
Loops through the list of kernels on one particular FPGA and generates the
appropriate TCL commands to instantiate them in a block diagram.
Args:
tcl_user_app: a tclMe object (which contains references to the FPGA's
node object and a handle to the output file)
"""
#instantiate kernels
for kern_idx, kern in enumerate(tcl_user_app.fpga['kernel']):
instName = kern['name'] + "_inst_" + str(kern['num'])
#instantiate kernel
tcl_user_app.fpga['kernel'][kern_idx]['inst'] = 'applicationRegion/' + instName
tcl_user_app.instBlock(
{
'vendor':kern['vendor'],
'lib': kern['lib'],
'name': kern['name'],
'inst':'applicationRegion/' + instName,
'clks': kern['clk'],
'resetns': kern['aresetn']
}
)
#instantiate and connect constant for id
if (kern['id_port'] != None):
tcl_user_app.instBlock(
{
'name':'xlconstant',
'inst': 'applicationRegion/id_' + str(kern['num']),
'properties':['CONFIG.CONST_WIDTH {32}',
'CONFIG.CONST_VAL {'+ str(kern['num'])+'}']
}
)
tcl_user_app.makeConnection(
'net',
{
'name':'applicationRegion/id_' + str(kern['num']),
'type':'pin',
'port_name':'dout'
},
{
'name':'applicationRegion/' + instName,
'type':'pin',
'port_name':kern['id_port']
}
)
if kern['const'] != None:
for const in kern['const']:
tcl_user_app.instBlock(
{
'name':'xlconstant',
'inst': 'applicationRegion/' + instName + '_' + const['name'],
'properties':['CONFIG.CONST_WIDTH {' + const['width'] + '}',
' CONFIG.CONST_VAL {'+ const['val'] + '}']
}
)
tcl_user_app.makeConnection(
'net',
{
'name':'applicationRegion/' + instName + '_' + const['name'] ,
'type':'pin',
'port_name':'dout'
},
{
'name':'applicationRegion/' + instName,
'type':'pin',
'port_name':const['name']
}
)
def userApplicationRegionSwitchesInst(tcl_user_app, sim):
"""
I think this is for making the Galapagos router (i.e. the one that sits in
the application region and takes care of routing packets to the network
switch or to another kernel in the same FPGA). This only instantiates IPs
and does not make any connections (except to network table and IP/MAC consts)
Args:
tcl_user_app: a tclMe object (which contains references to the FPGA's
node object and a handle to the output file)
sim: I still don't really know what this does, exactly
"""
# I think this is the BRAM which stores the routing table
if tcl_user_app.fpga['comm'] != 'none':
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.instBlock(
{
'name':'blk_mem_gen',
'inst':'applicationRegion/blk_mem_switch_rom',
}
)
# The next 250 lines of code are a big if-elif-else statement which generate
# the correct Galapagos router depending on whether the communication type is
# "tcp", "eth", or "raw"
if tcl_user_app.fpga['comm'] == 'tcp':
tcl_user_app.instBlock(
{'vendor':'xilinx.com',
'lib':'hls',
'name':'width32router',
'inst':'applicationRegion/custom_switch_inst',
'clks':['aclk'],
'resetns':['aresetn']
}
)
# Properties for routing table BRAM
properties = ['CONFIG.Memory_Type {Single_Port_ROM}',
'CONFIG.Enable_32bit_Address {true}',
'CONFIG.Use_Byte_Write_Enable {false}',
'CONFIG.Byte_Size {8}',
'CONFIG.Write_Depth_A {256}',
'CONFIG.Register_PortA_Output_of_Memory_Primitives {false}',
'CONFIG.Use_RSTA_Pin {true}',
'CONFIG.Port_A_Write_Rate {0}',
'CONFIG.use_bram_block {BRAM_Controller}',
'CONFIG.EN_SAFETY_CKT {true}',
'CONFIG.Load_Init_File {true}',
'CONFIG.Coe_File $top_path/projects/$default_dir/ip.coe'
]
tcl_user_app.setProperties('applicationRegion/blk_mem_switch_rom', properties)
# I think this connects the board's local IP to the router (but I don't
# know why this is needed)
tcl_user_app.makeConnection(
'net',
{
'name':'network/ip_constant_block_inst',
'type':'pin',
'port_name':'ip'
},
{
'name':'applicationRegion/custom_switch_inst',
'type':'pin',
'port_name':'network_addr_V'
}
)
# Connect routing table BRAM to Galapagos router
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'network_table_V_PORTA'
},
{
'name':'applicationRegion/blk_mem_switch_rom',
'type':'intf',
'port_name':'BRAM_PORTA'
}
)
# Refer to comments in the case for TCP (above)
elif tcl_user_app.fpga['comm'] == 'eth':
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.instBlock(
{'vendor':'xilinx.com',
'lib':'hls',
'name':'width48router',
'inst':'applicationRegion/custom_switch_inst',
'clks':['aclk'],
'resetns':['aresetn']
}
)
properties =['CONFIG.Memory_Type {Single_Port_ROM}',
'CONFIG.Enable_32bit_Address {true}',
'CONFIG.Use_Byte_Write_Enable {false}',
'CONFIG.Byte_Size {8}',
'CONFIG.Write_Width_A {64}',
'CONFIG.Write_Depth_A {256}',
'CONFIG.Read_Width_A {64}',
'CONFIG.Write_Width_B {64}',
'CONFIG.Read_Width_B {64}',
'CONFIG.Register_PortA_Output_of_Memory_Primitives {false}',
'CONFIG.Use_RSTA_Pin {true}',
'CONFIG.Port_A_Write_Rate {0}',
'CONFIG.use_bram_block {BRAM_Controller}',
'CONFIG.EN_SAFETY_CKT {true}',
'CONFIG.Load_init_file {true}',
'CONFIG.Coe_File $top_path/projects/$default_dir/mac.coe'
]
tcl_user_app.setProperties('applicationRegion/blk_mem_switch_rom', properties)
tcl_user_app.makeConnection(
'net',
{
'name':'network/ip_constant_block_inst',
'type':'pin',
'port_name':'mac_big'
},
{
'name':'applicationRegion/custom_switch_inst',
'type':'pin',
'port_name':'network_addr_V'
}
)
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'network_table_V_PORTA'
},
{
'name':'applicationRegion/blk_mem_switch_rom',
'type':'intf',
'port_name':'BRAM_PORTA'
}
)
elif tcl_user_app.fpga['comm'] == 'raw':
# configures one memory to hold the IP addresses
properties = ['CONFIG.Memory_Type {Single_Port_ROM}',
'CONFIG.Enable_32bit_Address {true}',
'CONFIG.Use_Byte_Write_Enable {false}',
'CONFIG.Byte_Size {8}',
'CONFIG.Write_Depth_A {256}',
'CONFIG.Register_PortA_Output_of_Memory_Primitives {false}',
'CONFIG.Use_RSTA_Pin {true}',
'CONFIG.Port_A_Write_Rate {0}',
'CONFIG.use_bram_block {BRAM_Controller}',
'CONFIG.EN_SAFETY_CKT {true}',
'CONFIG.Load_Init_File {true}',
'CONFIG.Coe_File $top_path/projects/$default_dir/ip.coe'
]
tcl_user_app.setProperties('applicationRegion/blk_mem_switch_rom', properties)
tcl_user_app.instBlock(
{
'name':'axi_bram_ctrl',
'inst':'applicationRegion/ctrl_blk_mem_switch_rom',
'clks':['s_axi_aclk'],
'resetns':['s_axi_aresetn']
}
)
tcl_user_app.setProperties('applicationRegion/ctrl_blk_mem_switch_rom', ["CONFIG.SINGLE_PORT_BRAM {1}"])
# configures another memory to hold the MAC addresses
tcl_user_app.instBlock(
{
'name':'blk_mem_gen',
'inst':'applicationRegion/blk_mem_switch_rom_mac',
}
)
tcl_user_app.instBlock(
{
'name':'axi_bram_ctrl',
'inst':'applicationRegion/ctrl_blk_mem_switch_rom_mac',
'clks':['s_axi_aclk'],
'resetns':['s_axi_aresetn']
}
)
tcl_user_app.setProperties('applicationRegion/ctrl_blk_mem_switch_rom_mac', ["CONFIG.SINGLE_PORT_BRAM {1}", "CONFIG.DATA_WIDTH {64}"])
properties =['CONFIG.Memory_Type {Single_Port_ROM}',
'CONFIG.Enable_32bit_Address {true}',
'CONFIG.Use_Byte_Write_Enable {false}',
'CONFIG.Byte_Size {8}',
'CONFIG.Write_Width_A {64}',
'CONFIG.Write_Depth_A {256}',
'CONFIG.Read_Width_A {64}',
'CONFIG.Write_Width_B {64}',
'CONFIG.Read_Width_B {64}',
'CONFIG.Register_PortA_Output_of_Memory_Primitives {false}',
'CONFIG.Use_RSTA_Pin {true}',
'CONFIG.Port_A_Write_Rate {0}',
'CONFIG.use_bram_block {BRAM_Controller}',
'CONFIG.EN_SAFETY_CKT {true}',
'CONFIG.Load_init_file {true}',
'CONFIG.Coe_File $top_path/projects/$default_dir/mac.coe'
]
tcl_user_app.setProperties('applicationRegion/blk_mem_switch_rom_mac', properties)
# connect these two memories to the global interconnect
app_interfaces = len(getInterfaces(tcl_user_app.fpga, 's_axi', 'scope', 'global'))
idx_str = "%02d"%(app_interfaces)
tcl_user_app.makeConnection(
'intf',
{'name':'applicationRegion/axi_interconnect_ctrl',
'type':'intf',
'port_name':'M' + idx_str + '_AXI'
},
{
'name':'applicationRegion/ctrl_blk_mem_switch_rom',
'type':'intf',
'port_name':'S_AXI'
}
)
idx_str = "%02d"%(app_interfaces + 1)
tcl_user_app.makeConnection(
'intf',
{'name':'applicationRegion/axi_interconnect_ctrl',
'type':'intf',
'port_name':'M' + idx_str + '_AXI'
},
{
'name':'applicationRegion/ctrl_blk_mem_switch_rom_mac',
'type':'intf',
'port_name':'S_AXI'
}
)
# connect the BRAMs to their controllers
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/ctrl_blk_mem_switch_rom',
'type':'intf',
'port_name':'BRAM_PORTA'
},
{
'name':'applicationRegion/blk_mem_switch_rom',
'type':'intf',
'port_name':'BRAM_PORTA'
}
)
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/ctrl_blk_mem_switch_rom_mac',
'type':'intf',
'port_name':'BRAM_PORTA'
},
{
'name':'applicationRegion/blk_mem_switch_rom_mac',
'type':'intf',
'port_name':'BRAM_PORTA'
}
)
elif tcl_user_app.fpga['comm'] == 'none':
pass
else:
print("Unknown communication type: " + tcl_user_app.fpga['comm'])
exit(1)
# Ask how many (global) s_axis connections are in the user app region.
num_slave_s_axis_global = len(getInterfaces(tcl_user_app.fpga, 's_axis', 'scope' , 'global'))
if num_slave_s_axis_global == 0:
##TO DO: CHANGE TO VIP FOR 0 SLAVES
print("TO DO: CHANGE TO VIP FOR 0 SLAVES in userApplicationRegionSwitchesInst")
quit(0)
else:
#for simulation purposes use custom arbiter instead of axis_switch
if(sim == 0):
# we don't want an input switch IFF 1 slave and mode is raw
# if it is raw, we need just a single slave interface
if num_slave_s_axis_global > 1 and tcl_user_app.fpga['comm'] in ['raw', 'none']:
tcl_user_app.instBlock(
{
'name':'axis_switch',
'inst':'applicationRegion/input_switch',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.NUM_SI {1}',
'CONFIG.NUM_MI {' + str(num_slave_s_axis_global) + '}',
'CONFIG.ARG_ON_TLAST {1}',
'CONFIG.HAS_TLAST {1}'
]
}
)
elif tcl_user_app.fpga['comm'] not in ['raw', 'none']:
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.instBlock(
{
'name':'axis_switch',
'inst':'applicationRegion/input_switch',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.NUM_SI {2}',
'CONFIG.NUM_MI {' + str(num_slave_s_axis_global) + '}',
'CONFIG.HAS_TLAST {1}',
'CONFIG.ARB_ON_TLAST {1}'
]
}
)
else:
if num_slave_s_axis_global > 1:
tcl_user_app.instBlock(
{
'name':'axis_switch',
'inst':'applicationRegion/input_switch',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.NUM_SI {1}',
'CONFIG.NUM_MI {' + str(num_slave_s_axis_global) + '}',
'CONFIG.HAS_TLAST {1}',
'CONFIG.ARB_ON_TLAST {1}'
]
}
)
else:
tcl_user_app.instBlock(
{
'name':'arbiter',
'lib':'hls',
'vendor':'xilinx.com',
'inst':'applicationRegion/arbiter',
'clks':['ap_clk'],
'resetns':['ap_rst_n'],
}
)
switch_port_index = 0
properties = ['CONFIG.ARB_ON_MAX_XFERS {0}']
for kern in tcl_user_app.fpga['kernel']:
if kern['s_axis'] != None:
for s_axis in kern['s_axis']:
if s_axis['scope'] == 'global':
#print("adding kernel to switch " + kern['inst'])
kernel_index_str = "0x{:08x}".format(int(kern['num']))
switch_port_index_str = "%02d"%switch_port_index
properties.append('CONFIG.M' + switch_port_index_str + '_AXIS_BASETDEST {' + kernel_index_str + '}')
properties.append('CONFIG.M' + switch_port_index_str + '_AXIS_HIGHTDEST {' + kernel_index_str + '}')
switch_port_index = switch_port_index + 1
# this condition is prerequisite to have an input_switch
if num_slave_s_axis_global > 1 or tcl_user_app.fpga['comm'] != 'raw':
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.setProperties('applicationRegion/input_switch', properties)
# Ask how many (global) m_axis connections are in the user app region.
num_slave_m_axis_global = len(getInterfaces(tcl_user_app.fpga, 'm_axis', 'scope', 'global'))
if num_slave_m_axis_global == 0:
# TODO: CHANGE TO VIP FOR 0 SLAVES
tcl_user_app.instBlock(
{
'name':'axis_switch',
'inst':'applicationRegion/input_switch',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.NUM_SI {2}',
'CONFIG.NUM_MI {' + num_slave_s_axis_global + '}',
'CONFIG.ARB_ON_TLAST {1}']
}
)
#instantiate switch only if more than one output
elif num_slave_m_axis_global > 1:
tcl_user_app.instBlock(
{
'name':'axis_switch',
'inst':'applicationRegion/output_switch',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.NUM_SI {' + str(num_slave_s_axis_global) + '}',
'CONFIG.NUM_MI {1}',
'CONFIG.ARB_ON_TLAST {1}',
'CONFIG.M00_AXIS_HIGHTDEST {0xffffffff}']
}
)
def userApplicationRegionKernelConnectSwitches(outDir, tcl_user_app, sim):
"""
Now that the kernels, Galapagos router, and memory controllers are instantiated,
it's time to connect them all together.
Args:
tcl_user_app: a tclMe object (which contains references to the FPGA's
node object and a handle to the output file)
sim: I still don't really know what this does, exactly
"""
#iterate through all kernels on FPGA connecting them to the input and output switches and their control and memory interfaces
ctrl_interface_index = 0
mem_interface_index = 0
# Get list of all (global) s_axis. That is, all the kernel input streams
# By the way, the getInterfaces function has the side effect of adding refs
# to the interface's dict represntation which links to its parent kernel
# dict (under the 'kernel_inst' key).
s_axis_array = getInterfaces(tcl_user_app.fpga, 's_axis', 'scope', 'global')
# Now connect the Galapagos router through the input switch into all of
# the s_axis interfaces
if len(s_axis_array) > 1:
if(sim == 1):
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/arbiter',
'type':'intf',
'port_name':'M00_AXIS'
},
{'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'S00_AXIS'
}
)
# For each s_axis connection
for idx, s_axis in enumerate(s_axis_array):
instName = s_axis['kernel_inst']['inst']
idx_str = "%02d"%idx
# Connect it to the correct port on the AXI switch (NOT directly into
# the Galapagos router; there is an AXI stream switch IP between
# the router and the kernel(s) )
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'M' + idx_str + '_AXIS'
},
{
'name': instName,
'type':'intf',
'port_name':s_axis['name']
}
)
# custom_switch_inst only exists without raw
if tcl_user_app.fpga['comm'] not in ['raw', 'none']:
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
# Connect the AXI input switch to the Galapagos router
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_out_switch_V'
},
{'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'S01_AXIS'
}
)
elif len(s_axis_array) == 1:
if (sim == 1):
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/arbiter',
'type':'intf',
'port_name':'M00_AXIS'
},
{'name': s_axis_array[0]['kernel_inst']['inst'],
'type':'intf',
'port_name': s_axis_array[0]['name']
}
)
if tcl_user_app.fpga['comm'] not in ['raw', 'none']:
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_out_switch_V'
},
{'name':'applicationRegion/arbiter',
'type':'intf',
'port_name':'S01_AXIS'
}
)
else:
# there's no input switch in this case
if tcl_user_app.fpga['comm'] not in ['raw', 'none']:
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'M00_AXIS'
},
{'name': s_axis_array[0]['kernel_inst']['inst'],
'type':'intf',
'port_name': s_axis_array[0]['name']
}
)
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_out_switch_V'
},
{'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'S01_AXIS'
}
)
m_axis_array = getInterfaces(tcl_user_app.fpga, 'm_axis', 'scope', 'global')
# Now connect all m_axis interfaces through the output switch into the
# Galapagos router
#no output switch, direct connect if only one
if len(m_axis_array) == 1:
if tcl_user_app.fpga['comm'] not in ['raw', 'none']:
instName = m_axis_array[0]['kernel_inst']['inst']
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.makeConnection(
'intf',
{
'name': instName,
'type':'intf',
'port_name': m_axis_array[0]['name']
},
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_in_V'
}
)
elif len(m_axis_array) > 1:
for idx, m_axis in enumerate(m_axis_array):
instName = m_axis['kernel_inst']['inst']
idx_str = "%02d"%idx
tcl_user_app.makeConnection(
'intf',
{
'name': instName ,
'type':'intf',
'port_name': m_axis['name']
},
{
'name':'applicationRegion/output_switch',
'type':'intf',
'port_name':'S'+ idx_str + '_AXIS'
}
)
if tcl_user_app.fpga['comm'] not in ['raw', 'none']:
if 'custom' not in tcl_user_app.fpga or tcl_user_app.fpga['custom'] != 'GAScore':
tcl_user_app.makeConnection(
'intf',
{
'name':'applicationRegion/output_switch',
'type':'intf',
'port_name':'M00_AXIS'
},
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_in_V'
}
)
# Now handle the control interfaces
s_axi_array = getInterfaces(tcl_user_app.fpga, 's_axi', 'scope', 'global')
for idx, s_axi in enumerate(s_axi_array):
instName = s_axi['kernel_inst']['inst']
idx_str = "%02d"%idx
tcl_user_app.makeConnection(
'intf',
{'name':'applicationRegion/axi_interconnect_ctrl',
'type':'intf',
'port_name':'M' + idx_str + '_AXI'
},
{'name': instName,
'type':'intf',
'port_name':s_axi['name']
}
)
# And finally the off-chip memory interface
enable_AXI_mem_interconnect = True
if 'custom' in tcl_user_app.fpga:
if tcl_user_app.fpga['custom'] == 'GAScore':
enable_AXI_mem_interconnect = False
m_axi_array = getInterfaces(tcl_user_app.fpga, 'm_axi', 'scope', 'global')
if enable_AXI_mem_interconnect:
for idx, m_axi in enumerate(m_axi_array):
instName = m_axi['kernel_inst']['inst']
idx_str = "%02d"%idx
tcl_user_app.makeConnection(
'intf',
{
'name': instName,
'type':'intf',
'port_name':m_axi['name']
},
{
'name':'applicationRegion/axi_interconnect_mem',
'type':'intf',
'port_name':'S' +idx_str + '_AXI'
}
)
else:
tcl_custom = tclMeFile( outDir + '/' + str(tcl_user_app.fpga['num']) + '_custom', tcl_user_app.fpga)
memory_lines = []
prev_instName = ""
curr_row = -1
curr_col = 0
for idx, m_axi in enumerate(m_axi_array):
instName = m_axi['kernel_inst']['inst']
idx_str = "%02d"%idx
if instName != prev_instName:
curr_row += 1
tcl_custom.tprint('set CUSTOM_arr(' + str(curr_row) + ',0) ' + instName)
prev_instName = instName
curr_col = 1
else:
curr_col += 1
tcl_custom.tprint('set CUSTOM_arr(' + str(curr_row) + ',' + str(curr_col) + ') ' + m_axi['name'])
def add_debug_interfaces(outDir, fpga):
m_axi_interfaces = getInterfaces(tcl_debug_app.fpga, 'm_axi', 'debug')
s_axi_interfaces = getInterfaces(tcl_debug_app.fpga, 's_axi', 'debug')
s_axis_interfaces = getInterfaces(tcl_debug_app.fpga, 's_axis', 'debug')
m_axis_interfaces = getInterfaces(tcl_debug_app.fpga, 'm_axis', 'debug')
wire_master_interfaces = getInterfaces(tcl_debug_app.fpga, 'wire_master', 'debug')
wire_slave_interfaces = getInterfaces(tcl_debug_app.fpga, 'wire_slave', 'debug')
#instantiate ila
if (len(m_axi_interfaces) + len(s_axi_interfaces) + len(s_axis_interfaces) + len(m_axis_interfaces) + len(wire_master_interfaces) + len(wire_slave_interfaces)) > 1:
tcl_debug_app = tclMeFile( outDir + '/' + str(fpga['num']) + '_debug')
tcl_debug_app.instBlock(
{
'name':'system_ila',
'inst':'system_ila_inst',
'clks':['clk'],
'resetns':['resetn']
}
)
#set properties
properties = []
#by default interface is AXI, only need to set interface for axis and wires
len_native = len(wire_slave_interfaces) + len(wire_master_interfaces)
len_interface = len(s_axis_interfaces) + len(m_axis_interfaces) + len(s_axi_interfaces) + len(m_axi_interfaces)
if len_native > 0 and len_interface > 0:
properties.append('CONFIG.C_MON_TYPE {MIXED}')
elif len_native > 0 and len_interface == 0:
properties.append('CONFIG.C_MON_TYPE {NATIVE}')
starting_idx = len(s_axi_interfaces) + len(m_axi_interfaces)
for axis_idx in range(starting_idx, starting_idx + len(s_axis_interfaces) + len(m_axis_interfaces)):
properties.append('CONFIG.C_SLOT_' + str(axis_idx) + '_INTF_TYPE {xilinx.com:interface:axis_rtl:1.0}')
for axi_idx, axi_interface in enumerate(s_axi_interfaces):
tcl_debug_app.makeConnection(
'intf',
{
'name':'system_ila_inst',
'type':'intf',
'port_name':'SLOT_' + str(axi_idx) + '_AXI'
},
{
'name': axi_interface['kernel_inst']['inst'],
'type':'intf',
'port_name': axi_interface['name']
}
)
slot_offset = len(s_axi_interfaces)
for axi_idx, axi_interface in enumerate(m_axi_interfaces):
tcl_debug_app.makeConnection(
'intf',
{
'name':'system_ila_inst',
'type':'intf',
'port_name':'SLOT_' + str(axi_idx + slot_offset) + '_AXI'
},
{
'name': axi_interface['kernel_inst']['inst'],
'type':'intf',
'port_name': axi_interface['name']
}
)
slot_offset = slot_offset + len(m_axi_interfaces)
for axis_idx, axis_interface in enumerate(m_axis_interfaces):
tcl_debug_app.makeConnection(
'intf',
{
'name':'system_ila_inst',
'type':'intf',
'port_name':'SLOT_' + str(axis_idx + slot_offset) + '_AXIS'
},
{
'name': axis_interface['kernel_inst']['inst'],
'type':'intf',
'port_name': axis_interface['name']
}
)
slot_offset = slot_offset + len(m_axis_interfaces)
for axis_idx, axis_interface in enumerate(s_axis_interfaces):
tcl_debug_app.makeConnection(
'intf',
{
'name':'system_ila_inst',
'type':'intf',
'port_name':'SLOT_' + str(axis_idx + slot_offset) + '_AXIS'
},
{
'name': axis_interface['kernel_inst']['inst'],
'type':'intf',
'port_name': axis_interface['name']
}
)
for wire_idx, wire_interface in enumerate(wire_master_interfaces):
tcl_user_app.makeConnection(
'net',
{
'name':'system_ila_inst',
'type':'pin',
'port_name':'probe' + str(wire_idx)
},
{
'name': wire_interface['kernel_inst']['inst'],
'type':'pin',
'port_name': wire_interface['name']
}
)
wire_offset = len(wire_master_interfaces)
for wire_idx, wire_interface in enumerate(wire_slave_interfaces):
tcl_user_app.makeConnection(
'net',
{
'name':'system_ila_inst',
'type':'pin',
'port_name':'probe' + str(wire_idx + wire_offset)
},
{
'name': wire_interface['kernel_inst']['inst'],
'type':'pin',
'port_name': wire_interface['name']
}
)
tcl_debug_app.close()
def getKernel(fpga, num):
for kern in fpga['kernel']:
if int(kernel['num']) == num:
return kern
return None
def getSlaveAddressInfo(s_axi):
slave_inst = s_axi['kernel_inst']['inst']
slave_inst = slave_inst.split('/')[1]
if (s_axi['kernel_inst']['lib'] == 'hls'):
slave_port = 'Data_' + s_axi['name']
else:
slave_port = s_axi['name']
if 'base' in s_axi:
slave_base = s_axi['base']
else:
slave_base = 'Reg'
properties = {}
if 'offset' in s_axi:
properties.update({'offset': s_axi['offset']})
if 'range' in s_axi:
properties.update({'range': s_axi['range']})
return slave_inst, slave_port, slave_base, properties
def userApplicationRegionAssignAddresses(tcl_user_app, shared):
"""
connect mem interconnect and assign addresses, all kernels need to be 32 bit addressable
connect ctrl interconnect and assign addresses
Args:
tcl_user_app: a tclMe object (which contains references to the FPGA's
node object and a handle to the output file)
shared: Not really sure what this is for
"""
if 'custom' in tcl_user_app.fpga and tcl_user_app.fpga['custom'] == 'GAScore':
s_axi_array = getInterfaces(tcl_user_app.fpga, 's_axi', 'scope', 'global')
master = 'S_AXI_CONTROL'
for global_s_axi in s_axi_array:
slave_inst = global_s_axi['kernel_inst']['inst']
slave_inst, slave_port, slave_base, properties = getSlaveAddressInfo(global_s_axi)
tcl_user_app.assign_address(slave_inst, slave_port, slave_base)
if 'offset' in properties:
prop = {'offset': properties['offset']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
if 'range' in properties:
prop = {'range': properties['range']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
return
#global m_axi
m_axi_array = getInterfaces(tcl_user_app.fpga, 'm_axi', 'scope', 'global')
tcl_user_app.assign_address(None, 'S_AXI_MEM_0', 'Reg')
if shared:
tcl_user_app.assign_address(None, 'S_AXI_MEM_1', 'Reg')
for global_m_axi in m_axi_array:
instName = global_m_axi['kernel_inst']['inst']
if(global_m_axi['kernel_inst']['lib'] == 'hls'):
master = instName + '/Data_' + global_m_axi['name']
else:
master = instName + '/' + global_m_axi['name']
properties = {'offset': '0x00000000', 'range': '4G'}
tcl_user_app.set_address_properties(None, 'S_AXI_MEM_0', 'Reg', master, offset='0x00000000')
if shared:
tcl_user_app.set_address_properties(None, 'S_AXI_MEM_1', 'Reg', master, offset='0x00000000')
for global_m_axi in m_axi_array:
instName = global_m_axi['kernel_inst']['inst']
if(global_m_axi['kernel_inst']['lib'] == 'hls'):
master = instName + '/Data_' + global_m_axi['name']
else:
master = instName + '/' + global_m_axi['name']
properties = {'range': '4G'}
tcl_user_app.set_address_properties(None, 'S_AXI_MEM_0', 'Reg', master, **properties)
if shared:
tcl_user_app.set_address_properties(None, 'S_AXI_MEM_1', 'Reg', master, **properties)
#global s_axi
s_axi_array = getInterfaces(tcl_user_app.fpga, 's_axi', 'scope', 'global')
master = 'S_AXI_CONTROL'
# set up the address space for the memories that were added in raw mode
if tcl_user_app.fpga['comm'] == 'raw':
slave_inst = "applicationRegion/ctrl_blk_mem_switch_rom"
slave_port = "S_AXI"
slave_base = "Mem0"
tcl_user_app.assign_address(slave_inst, slave_port, slave_base)
slave_inst = "ctrl_blk_mem_switch_rom"
# range is done first because if offset is done first, depending on the range, it can be misaligned
prop = {'range': '4K'}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
prop = {'offset': "0x0000"}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
slave_inst = "applicationRegion/ctrl_blk_mem_switch_rom_mac"
tcl_user_app.assign_address(slave_inst, slave_port, slave_base)
slave_inst = "ctrl_blk_mem_switch_rom_mac"
prop = {'range': '4K'}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
prop = {'offset': "0x1000"}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
for global_s_axi in s_axi_array:
slave_inst = global_s_axi['kernel_inst']['inst']
slave_inst, slave_port, slave_base, properties = getSlaveAddressInfo(global_s_axi)
tcl_user_app.assign_address(slave_inst, slave_port, slave_base)
if 'offset' in properties:
prop = {'offset': properties['offset']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
if 'range' in properties:
prop = {'range': properties['range']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, master, **prop)
#local m_axi and s_axi
m_axi_array = getInterfaces(tcl_user_app.fpga, 'm_axi', 'scope', 'local')
for local_m_axi in m_axi_array:
if (local_m_axi['kernel_inst']['lib'] == 'hls'):
master_port = 'Data_' + local_m_axi['name']
else:
master_port = local_m_axi['name']
s_axi_array = getSlaveInterfaces(tcl_user_app.fpga, 's_axi', local_m_axi)
for local_s_axi in s_axi_array:
slave_inst, slave_port, slave_base, properties = getSlaveAddressInfo(local_s_axi)
tcl_user_app.assign_address(slave_inst, slave_port, slave_base)
if 'offset' in properties:
prop = {'offset': properties['offset']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, local_m_axi['kernel_inst']['inst'] + '/' + master_port, **prop)
if 'range' in properties:
prop = {'range': properties['range']}
tcl_user_app.set_address_properties(slave_inst, slave_port, slave_base, local_m_axi['kernel_inst']['inst'] + '/' + master_port, **prop)
def userApplicationLocalConnections(tcl_user_app):
"""
Takes care of generating the TCL commands for wiring up the <scope>local</scope>
connections between kernels, as defined in the logical file.
THIS NEEDS AN OVERHAUL!
Args:
tcl_user_app: Basically a handle to a file on disk, but in a fancy tclMe
object with a bunch of helper functions to make writing to
it a little easier.
Raises:
ValueError: If the local connection specification doesn't make sense
"""
#connect local axis and wires
m_axis_array = getInterfaces(tcl_user_app.fpga, 'm_axis', 'scope', 'local')
for local_m_axis in m_axis_array:
s_axis_array = getSlaveInterfaces(tcl_user_app.fpga, 's_axis', local_m_axis)
#insert broadcaster
if (len(s_axis_array) > 1):
tcl_user_app.instBlock(
{
'name':'axis_broadcaster',
'inst': local_m_axis['kernel_inst']['inst'] + '_' +local_m_axis['name'] + '_broadcaster',
'clks':['aclk'],
'resetns':['aresetn'],
'properties':['CONFIG.NUM_MI {'+ str(len(s_axis)) +'}']
}
)
tcl_user_app.makeConnection(
'intf',
{
'name': local_m_axis['kernel_inst']['inst'] + '_' +local_m_axis['name'] + '_broadcaster',
'type':'intf',
'port_name':'S_AXIS'
},
{
'name': local_m_axis['kernel_inst']['inst'],
'type':'intf',
'port_name': local_m_axis['name']
}
)
for s_axis_idx, s_axis in enumerate(s_axis_array):
s_axis_idx_str = "%02d"%s_axis_idx
tcl_user_app.makeConnection(
'intf',
{
'name': local_m_axis['kernel_inst']['inst'] + '_' +local_m_axis['name'] + '_broadcaster',
'type':'intf',
'port_name':'M' + s_axis_idx_str + '_AXIS'
},
{
'name': s_axis['kernel_inst']['inst'],
'type':'intf',
'port_name': s_axis['name']
}
)
elif (len(s_axis_array) == 1):
tcl_user_app.makeConnection(
'intf',
{
'name': local_m_axis['kernel_inst']['inst'],
'type':'intf',
'port_name': local_m_axis['name']
},
{
'name': s_axis['kernel_inst']['inst'],
'type':'intf',
'port_name': s_axis['name']
}
)
else:
raise ValueError("Local Master needs at least one local slave")
wire_master_array = getInterfaces(tcl_user_app.fpga, 'wire_master', 'scope' ,'local')
for wire_master in wire_master_array:
wire_slave_array = getSlaveInterfaces(tcl_user_app.fpga, 'wire_slave', wire_master)
for wire_slave in wire_slave_array:
tcl_user_app.makeConnection(
'net',
{
'name': wire_master['kernel_inst']['inst'],
'type':'pin',
'port_name': wire_master['name']
},
{
'name': wire_slave['kernel_inst']['inst'],
'type':'pin',
'port_name': wire_slave['name']
}
)
def userApplicationRegion(outDir, fpga, sim):
"""
Takes care of calling a bunch of functions for assembling the user application
region part of the block diagram. To be specific, this function takes care
of generating a single TCL file whose only purpose is to draw up the user's
IPs and connections into a sub-heirarchy named "applicationRegion". The shell
is then done in another TCL file by another function
Args:
outDir (string): The output location for this TCL file
fpga: the node object for this FPGA
sim: some boolean for turning on sims or something
"""
tcl_user_app = tclMeFile( outDir + '/' + str(fpga['num']) + '_app', fpga)
#tcl_user_app = open( outDir + '/' + str(fpga['num']) + '_app.tcl', 'w')
tcl_user_app.createHierarchy('applicationRegion')
userApplicationRegionKernelsInst(tcl_user_app)
userApplicationRegionControlInst(tcl_user_app)
#if communication medium is ethernet then combine offchip memory into one shared address space
userApplicationRegionMemInstGlobal(tcl_user_app, tcl_user_app.fpga['comm'] != 'tcp')
userApplicationRegionMemInstLocal(tcl_user_app)
userApplicationRegionSwitchesInst(tcl_user_app, sim)
userApplicationRegionKernelConnectSwitches(outDir, tcl_user_app, sim)
userApplicationRegionAssignAddresses(tcl_user_app, tcl_user_app.fpga['comm'] !='tcp' and tcl_user_app.fpga.address_space == 64)
userApplicationLocalConnections(tcl_user_app)
tcl_user_app.close()
#return num_debug_interfaces
def netBridgeConstants(tcl_net):
"""
Generate ip_constant_blocks related to the network bridge. For example, this
would make a block for the MAC address and the IP address
Args:
tcl_net: A tclMe object for the TCL file for generating the network stuff
"""
# these constants are unneeded in raw mode
if tcl_net.fpga['comm'] != "raw":
ip_addr = tcl_net.fpga['ip'].split(".")
#tcl_net.write('create_bd_cell -type ip -vlnv user.org:user:ip_constant_block:1.0 network/ip_constant_block_inst\n')
# Not sure where this vendor and lib came from
tcl_net.instBlock(
{
'vendor':'user.org',
'lib':'user',
'name':'ip_constant_block',
'inst':'network/ip_constant_block_inst'
}
)
# I guess this is a custom module that Naif made? The regular IP for
# constants doesn't have these properties
properties = ['CONFIG.C_IP_B0 {'+ ip_addr[3] + '}',
'CONFIG.C_IP_B1 {'+ ip_addr[2] + '}',
'CONFIG.C_IP_B2 {'+ ip_addr[1] + '}',
'CONFIG.C_IP_B3 {'+ ip_addr[0] + '}']
properties = properties + ['CONFIG.C_GATEWAY_B0 {100}',
'CONFIG.C_GATEWAY_B1 {' + ip_addr[2] +'}',
'CONFIG.C_GATEWAY_B2 {' + ip_addr[1] +'}',
'CONFIG.C_GATEWAY_B3 {' + ip_addr[0] +'}']
properties = properties + ['CONFIG.C_SUBNET_B0 {0}',
'CONFIG.C_SUBNET_B1 {255}',
'CONFIG.C_SUBNET_B2 {255}',
'CONFIG.C_SUBNET_B3 {255}']
# MAC address
properties = properties + ['CONFIG.C_MAC {0x' + tcl_net.fpga['mac'].replace(":","") + '}']
tcl_net.setProperties('network/ip_constant_block_inst', properties)
# Instantiate the proper netBridge depending on the comm type
# By the way, these scripts also take care of hooking up the constants
# This should really be in the netBridge function instead of in this one...
galapagos_path = str(os.environ.get('GALAPAGOS_PATH'))
if tcl_net.fpga['comm'] == 'tcp':
tcl_net.addSource(galapagos_path + '/middleware/tclScripts/pr_tcp_bridge.tcl')
elif tcl_net.fpga['comm'] == 'eth':
tcl_net.addSource(galapagos_path + '/middleware/tclScripts/pr_eth_bridge.tcl')
elif tcl_net.fpga['comm'] == 'raw':
tcl_net.addSource(galapagos_path + '/middleware/tclScripts/pr_raw_bridge.tcl')
def netBridge(outDir, fpga):
"""
Handles makign a TCL file for generating this FPGA's network bridge.
All IPs are made in a hierarchy called "network"
Args:
outDir (string): The output location for this TCL file
fpga: the node object for this FPGA
"""
tcl_net = tclMeFile( outDir + '/' + str(fpga['num']) + '_net', fpga)
tcl_net.createHierarchy('network')
netBridgeConstants(tcl_net)
tcl_net.close()
def bridgeConnections(outDir, fpga, sim):
"""
At this point, the IP blocks for the network bridge and user app region are
in place, and the user app region is completely connected. This takes care
of stringing up all the stuff for the network bridge (and I think it also
takes care of the user appBridge option?)
Args:
outDir (string): The output location for this TCL file
fpga: the node object for this FPGA
sim: some boolean for turning on sims or something
"""
tcl_bridge_connections = tclMeFile( outDir + '/' + str(fpga['num']) + '_bridge_connections', fpga)
if 'custom' in tcl_bridge_connections.fpga:
tcl_custom = tclMeFile( outDir + '/' + str(fpga['num']) + '_custom', fpga)
tcl_bridge_connections.instBlock(
{'name':'galapagos_bridge',
'inst':'network/galapagos_bridge_inst',
'vendor':'xilinx.com',
'lib':'hls',
'clks':['aclk'],
'resetns':['aresetn']
}
)
tcl_bridge_connections.instBlock(
{
'name':'blk_mem_gen',
'inst':'network/galapagos_bridge_buffer'
}
)
properties = ['CONFIG.Memory_Type {True_Dual_Port_RAM}',
'CONFIG.Enable_B {Use_ENB_Pin}',
'CONFIG.Use_RSTB_Pin {true}',
'CONFIG.Port_B_Clock {100}',
'CONFIG.Port_B_Write_Rate {50}',
'CONFIG.Port_B_Enable_Rate {100}']
tcl_bridge_connections.setProperties('network/galapagos_bridge_buffer', properties)
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'network/galapagos_bridge_inst',
'type':'intf',
'port_name':'buffer_storage_A_V_PORTA'
},
{
'name':'network/galapagos_bridge_buffer',
'type':'intf',
'port_name':'BRAM_PORTA'
}
)
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'network/galapagos_bridge_inst',
'type':'intf',
'port_name':'buffer_storage_B_V_PORTA'
},
{
'name':'network/galapagos_bridge_buffer',
'type':'intf',
'port_name':'BRAM_PORTB'
}
)
#no bridge directly connect
if tcl_bridge_connections.fpga['app_bridge'] == None:
# custom_switch_inst only exists without raw
if tcl_bridge_connections.fpga['comm'] not in ['raw', 'none']:
if 'custom' not in tcl_bridge_connections.fpga or tcl_bridge_connections.fpga['custom'] != 'GAScore':
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_out_network_V'
},
{
'name':'network/galapagos_bridge_inst',
'type':'intf',
'port_name':'s_axis_g2N'
}
)
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'network/galapagos_bridge_inst',
'type':'intf',
'port_name':'m_axis_g2N'
},
{
'name':'network/network_bridge_inst',
'type':'intf',
'port_name':'${netBridge_from_app}'
}
)
else:
tcl_custom.tprint('set CUSTOM_net_out network/network_bridge_inst/${netBridge_from_app}')
s_axis_array = getInterfaces(tcl_bridge_connections.fpga, 's_axis', 'scope', 'global')
if len(s_axis_array) > 1:
tcl_custom.tprint('set CUSTOM_kernel_in applicationRegion/input_switch/S00_AXIS')
tcl_custom.tprint('set CUSTOM_kernels_stream_in ' + str(len(s_axis_array)))
else:
instName = s_axis_array[0]['kernel_inst']['inst']
tcl_custom.tprint('set CUSTOM_kernel_in ' + instName + '/' + s_axis_array[0]['name'])
tcl_custom.tprint('set CUSTOM_kernels_stream_in 1')
m_axis_array = getInterfaces(tcl_bridge_connections.fpga, 'm_axis', 'scope', 'global')
if len(m_axis_array) > 1:
tcl_custom.tprint('set CUSTOM_kernel_out applicationRegion/output_switch/M00_AXIS')
tcl_custom.tprint('set CUSTOM_kernels_stream_out ' + str(len(m_axis_array)))
else:
instName = m_axis_array[0]['kernel_inst']['inst']
tcl_custom.tprint('set CUSTOM_kernel_out ' + instName + '/' + m_axis_array[0]['name'])
tcl_custom.tprint('set CUSTOM_kernels_stream_out 1')
else:
# depending on the number of slaves, either connect the network to a switch or the slave
s_axis_array = getInterfaces(tcl_bridge_connections.fpga, 's_axis', 'scope', 'global')
if len(s_axis_array) > 1:
if tcl_bridge_connections.fpga['comm'] != 'none':
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'network/network_bridge_inst',
'type':'intf',
'port_name':'${netBridge_to_app}'
},
{'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'S00_AXIS'
}
)
else:
tcl_custom.tprint('set CUSTOM_kernel_in applicationRegion/input_switch/S00_AXIS')
tcl_custom.tprint('set CUSTOM_kernels_stream_in ' + str(len(s_axis_array)))
else:
instName = s_axis_array[0]['kernel_inst']['inst']
if tcl_bridge_connections.fpga['comm'] != 'none':
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'network/network_bridge_inst',
'type':'intf',
'port_name':'${netBridge_to_app}'
},
{'name': instName,
'type':'intf',
'port_name': s_axis_array[0]['name']
}
)
else:
tcl_custom.tprint('set CUSTOM_kernel_in ' + instName + '/' + s_axis_array[0]['name'])
tcl_custom.tprint('set CUSTOM_kernels_stream_in 1')
m_axis_array = getInterfaces(tcl_bridge_connections.fpga, 'm_axis', 'scope', 'global')
if len(m_axis_array) > 1:
if tcl_bridge_connections.fpga['comm'] != 'none':
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'applicationRegion/output_switch',
'type':'intf',
'port_name':'M00_AXIS'
},
{
'name':'network/network_bridge_inst',
'type':'intf',
'port_name':'${netBridge_from_app}'
}
)
else:
tcl_custom.tprint('set CUSTOM_kernel_out applicationRegion/output_switch/M00_AXIS')
tcl_custom.tprint('set CUSTOM_kernels_stream_out ' + str(len(m_axis_array)))
else:
instName = m_axis_array[0]['kernel_inst']['inst']
if tcl_bridge_connections.fpga['comm'] != 'none':
tcl_bridge_connections.makeConnection(
'intf',
{
'name': instName,
'type':'intf',
'port_name': m_axis_array[0]['name']
},
{
'name':'network/network_bridge_inst',
'type':'intf',
'port_name':'${netBridge_from_app}'
}
)
else:
tcl_custom.tprint('set CUSTOM_kernel_out ' + instName + '/' + m_axis_array[0]['name'])
tcl_custom.tprint('set CUSTOM_kernels_stream_out 1')
if tcl_bridge_connections.fpga['comm'] not in ['raw', 'none']:
if (sim == 0):
if 'custom' not in tcl_bridge_connections.fpga or tcl_bridge_connections.fpga['custom'] != 'GAScore':
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'network/network_bridge_inst',
'type':'intf',
'port_name':'${netBridge_to_app}'
},
{
'name':'network/galapagos_bridge_inst',
'type':'intf',
'port_name':'s_axis_n2G'
}
)
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'network/galapagos_bridge_inst',
'type':'intf',
'port_name':'m_axis_n2G'
},
{
'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'S00_AXIS'
}
)
else:
tcl_custom.tprint('set CUSTOM_net_in network/network_bridge_inst/${netBridge_to_app}')
else: #sim == 1
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'network/network_bridge_inst',
'type':'intf',
'port_name':'${netBridge_to_app}'
},
{
'name':'applicationRegion/arbiter',
'type':'intf',
'port_name':'S00_AXIS'
}
)
else:
tcl_bridge_connections.instBlock(
{
'name': tcl_bridge_connections.fpga['app_bridge']['name'],
'inst':'application_bridge_inst',
'lib': tcl_bridge_connections.fpga['app_bridge']['lib'],
'vendor': tcl_bridge_connections.fpga['app_bridge']['vendor'],
'clks':tcl_bridge_connections.fpga['app_bridge']['clk'],
'resetns':tcl_bridge_connections.fpga['app_bridge']['aresetn']
}
)
if (sim == 1):
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'application_bridge_inst',
'type':'intf',
'port_name':tcl_bridge_connections.fpga['app_bridge']['to_app']
},
{
'name':'applicationRegion/arbiter',
'type':'intf',
'port_name':'S00_AXIS'
}
)
else: #sim == 0
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'application_bridge_inst',
'type':'intf',
'port_name':tcl_bridge_connections.fpga['app_bridge']['to_app']
},
{
'name':'applicationRegion/input_switch',
'type':'intf',
'port_name':'S00_AXIS'
}
)
if tcl_bridge_connections.fpga['comm'] not in ['raw', 'none']:
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'applicationRegion/custom_switch_inst',
'type':'intf',
'port_name':'stream_out_network_V'
},
{
'name':'application_bridge_inst',
'type':'intf',
'port_name':tcl_bridge_connections.fpga['app_bridge']['from_app']
}
)
else:
m_axis_array = getInterfaces(tcl_bridge_connections.fpga, 'm_axis', 'scope', 'global')
if len(m_axis_array) > 1:
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'applicationRegion/output_switch',
'type':'intf',
'port_name':'M00_AXIS'
},
{
'name':'application_bridge_inst',
'type':'intf',
'port_name':tcl_bridge_connections.fpga['app_bridge']['from_app']
}
)
else:
instName = m_axis_array[0]['kernel_inst']['inst']
tcl_bridge_connections.makeConnection(
'intf',
{
'name': instName,
'type':'intf',
'port_name': m_axis_array[0]['name']
},
{
'name':'application_bridge_inst',
'type':'intf',
'port_name':tcl_bridge_connections.fpga['app_bridge']['from_app']
}
)
if tcl_bridge_connections.fpga['comm'] != 'none':
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'application_bridge_inst',
'type':'intf',
'port_name':tcl_bridge_connections.fpga['app_bridge']['to_net']
},
{
'name':'network/network_bridge_inst',
'type':'intf',
'port_name':'${netBridge_from_app}'
}
)
tcl_bridge_connections.makeConnection(
'intf',
{
'name':'network/network_bridge_inst',
'type':'intf',
'port_name':'${netBridge_to_app}'
},
{
'name':'application_bridge_inst',
'type':'intf',
'port_name':tcl_bridge_connections.fpga['app_bridge']['from_net']
}
)
if tcl_bridge_connections.fpga['comm'] == 'none':
tcl_custom.close()
tcl_bridge_connections.close()
def makeTCLFiles(fpga, projectName, output_path, sim):
"""
Top-level function call for TCL file generation functions.
Args:
fpga: a node object which has already been determined to be hw type
projectName (string): The project name
output_path (string): The folder path where the output TCL files will go
sim: I think you set this to nonzero if you want your design to be
intrumented for simulation or something
"""
outDir = output_path + '/' + projectName + '/' + str(fpga['num'])
#make bridge to network
if fpga['comm'] != 'none':
netBridge(outDir, fpga)
userApplicationRegion(outDir, fpga, sim)
bridgeConnections(outDir, fpga, sim)
#if(num_debug_interfaces > 0):
# add_debug_interfaces(outDir, num_debug_interfaces, fpga)
galapagos_path = str(os.environ.get('GALAPAGOS_PATH'))
tclMain = tclMeFile( outDir + '/' + str(fpga['num']), fpga)
tclMain.tprint(
"if { ! [info exists top_dir] } {\n\
set top_path ${::env(GALAPAGOS_PATH)}\n\
}\n\
if { ! [info exists default_dir] } {\n\
set default_dir " + projectName + "\n\
}\n\
"
)
tclMain.addSource(galapagos_path + '/shells/tclScripts/pr_standard_interfaces.tcl')
if fpga['comm'] != 'none':
tclMain.addSource(outDir + '/' + str(fpga['num']) + '_net.tcl')
tclMain.addSource(outDir + '/' + str(fpga['num']) + '_app.tcl')
tclMain.addSource(outDir + '/' + str(fpga['num']) + '_bridge_connections.tcl')
#if(num_debug_interfaces > 0):
# tclMain.addSource(outDir + '/' + str(fpga['num']) + '_debug.tcl')
if 'custom' in fpga:
tclMain.addSource(outDir + '/' + str(fpga['num']) + '_custom.tcl')
tclMain.addSource(galapagos_path + '/middleware/tclScripts/custom/' + fpga['custom'] + '.tcl')
tclMain.tprint('validate_bd_design')
tclMain.close()
``` |
{
"source": "jkhlr/immobot",
"score": 2
} |
#### File: immobot/bot/bot.py
```python
import logging
import threading
import time
from django.conf import settings
from telegram import Update
from telegram.ext import Updater, CommandHandler, CallbackContext
# Enable logging
from database.models import Subscriber, Job
NOTIFICATION_PAUSE_SECONDS = 1
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments update and
# context. Error handlers also receive the raised TelegramError object in error.
def watch(update: Update, context: CallbackContext) -> None:
chat_id = update.message.chat.id
if len(update.message.text.split()) != 2:
update.message.reply_text(f'Usage: /watch URL')
return
url = update.message.text.split()[1]
job, _ = Job.objects.get_or_create(start_url=url)
try:
subscriber = Subscriber.objects.get(chat_id=chat_id)
subscriber.job = job
except Subscriber.DoesNotExist:
subscriber = Subscriber(chat_id=chat_id, job=job)
subscriber.save()
update.message.reply_text(f'Subscribed to job {job.id}')
def stop(update: Update, context: CallbackContext) -> None:
chat_id = update.message.chat.id
try:
subscriber = Subscriber.objects.get(chat_id=chat_id)
job_id = subscriber.job.id
subscriber.delete()
update.message.reply_text(
f'Subscription to job {job_id} stopped'
)
except Subscriber.DoesNotExist:
update.message.reply_text('No subscription to stop')
def run_bot():
updater = Updater(settings.TELEGRAM_API_TOKEN, use_context=True)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler('watch', watch))
dispatcher.add_handler(CommandHandler('stop', stop))
result_updater = ResultUpdater(updater)
result_updater.start()
updater.start_polling()
updater.idle()
result_updater.stop()
class ResultUpdater(threading.Thread):
def __init__(self, updater, pause_seconds=NOTIFICATION_PAUSE_SECONDS):
super().__init__()
self._updater = updater
self._pause_seconds = pause_seconds
self._running = False
def run(self):
self._running = True
while self._running:
time.sleep(self._pause_seconds)
self.send_result_updates()
def send_result_updates(self):
for subscriber in Subscriber.objects.all():
for result in subscriber.unseen_results.all():
self._updater.bot.send_message(
chat_id=subscriber.chat_id,
text=f"<a href=\"{result.url}\">{result.title}</a>",
parse_mode='HTML'
)
subscriber.seen_results.add(result)
def stop(self):
if self._running:
self._running = False
self.join()
```
#### File: immobot/database/models.py
```python
from django.conf import settings
from django.db import models
from django.utils import timezone
class Result(models.Model):
title = models.TextField()
url = models.URLField(unique=True)
class Job(models.Model):
class Status(models.TextChoices):
SCRAPING = 'SCRAPING', 'Scraping'
WAITING = 'WAITING', 'Waiting'
scrape_interval_seconds = 60
start_url = models.TextField(unique=True)
status = models.CharField(
max_length=10,
choices=Status.choices,
default=Status.WAITING
)
last_scraped = models.DateTimeField(
null=True,
blank=True
)
results = models.ManyToManyField(Result)
def is_due(self):
if self.last_scraped is None:
return True
seconds_since_last_scrape = (timezone.now() - self.last_scraped).seconds
return seconds_since_last_scrape > settings.SCRAPE_INTERVAL_SECONDS
class Subscriber(models.Model):
chat_id = models.CharField(max_length=100, unique=True)
job = models.ForeignKey(
Job,
on_delete=models.CASCADE,
related_name='subscribers'
)
seen_results = models.ManyToManyField(Result)
@property
def unseen_results(self):
seen_result_ids = [result.id for result in self.seen_results.all()]
return self.job.results.exclude(id__in=seen_result_ids)
``` |
{
"source": "jkhnn/python-docs-samples",
"score": 3
} |
#### File: cloud-client/crop_hints/crop_hints.py
```python
import argparse
import io
from google.cloud import vision
from google.cloud.vision import types
from PIL import Image, ImageDraw
# [END vision_crop_hints_tutorial_imports]
def get_crop_hint(path):
# [START vision_crop_hints_tutorial_get_crop_hints]
"""Detect crop hints on a single image and return the first result."""
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
crop_hints_params = types.CropHintsParams(aspect_ratios=[1.77])
image_context = types.ImageContext(crop_hints_params=crop_hints_params)
response = client.crop_hints(image=image, image_context=image_context)
hints = response.crop_hints_annotation.crop_hints
# Get bounds for the first crop hint using an aspect ratio of 1.77.
vertices = hints[0].bounding_poly.vertices
# [END vision_crop_hints_tutorial_get_crop_hints]
return vertices
def draw_hint(image_file):
"""Draw a border around the image using the hints in the vector list."""
# [START vision_crop_hints_tutorial_draw_crop_hints]
vects = get_crop_hint(image_file)
im = Image.open(image_file)
draw = ImageDraw.Draw(im)
draw.polygon([
vects[0].x, vects[0].y,
vects[1].x, vects[1].y,
vects[2].x, vects[2].y,
vects[3].x, vects[3].y], None, 'red')
im.save('output-hint.jpg', 'JPEG')
# [END vision_crop_hints_tutorial_draw_crop_hints]
def crop_to_hint(image_file):
"""Crop the image using the hints in the vector list."""
# [START vision_crop_hints_tutorial_crop_to_hints]
vects = get_crop_hint(image_file)
im = Image.open(image_file)
im2 = im.crop([vects[0].x, vects[0].y,
vects[2].x - 1, vects[2].y - 1])
im2.save('output-crop.jpg', 'JPEG')
# [END vision_crop_hints_tutorial_crop_to_hints]
if __name__ == '__main__':
# [START vision_crop_hints_tutorial_run_application]
parser = argparse.ArgumentParser()
parser.add_argument('image_file', help='The image you\'d like to crop.')
parser.add_argument('mode', help='Set to "crop" or "draw".')
args = parser.parse_args()
parser = argparse.ArgumentParser()
if args.mode == 'crop':
crop_to_hint(args.image_file)
elif args.mode == 'draw':
draw_hint(args.image_file)
# [END vision_crop_hints_tutorial_run_application]
# [END vision_crop_hints_tutorial]
``` |
{
"source": "jkhouja/experimenter",
"score": 4
} |
#### File: experimenter/utils/text.py
```python
import collections
import logging
import re
from typing import List, Union
ARABIC_DIACRITICS = r"['ِ''ُ''ٓ''ٰ''ْ''ٌ''ٍ''ً''ّ''َ'`\"]"
UNDER_SCORE = r"_"
class clean_text:
"""Removes regex matching text with from input_text"""
@classmethod
def __init__(cls, regex=ARABIC_DIACRITICS, **kwargs):
cls.remove = re.compile(regex)
@classmethod
def __call__(cls, input_text: str = None, list_input=False):
"""Removes regex matching text from input_text
Calling this method on arabic text with default regexg removes
diacritics from the string.
Args:
input_text: The text to be cleaned.
Returns:
The text after removing parts that matches self.remove.
"""
if input_text is None:
return None
if list_input:
return [re.sub(cls.remove, "", t.strip()) for t in input_text]
return re.sub(cls.remove, "", input_text.strip())
class Tokenizer:
"""Tokenizes a string or a list of strings"""
def __init__(self, sep=" ", **kwargs):
self.sep = sep
def detokenize(self, tokens: List[str], list_input=False):
"""Given list of tokens (or a list of list of tokens),
generate a string attached by the separator"""
if list_input:
res = []
for inp in tokens:
res.append(self.sep.join(inp))
return res
return self.sep.join(tokens)
def _tokenize_one(self, input_text):
"""Simple tokenizer by a separator.
Given a string and a separator, returns a list of strings
separated by the separator
Args:
input_text: The text to be split
sep: The separator, if None or '' are passed,
the string will be separated by characters
Returns:
List of splitted text
"""
sep = self.sep
out = []
if sep == "" or sep is None:
# Tokenize to characters
for char in input_text:
out.append(char)
else:
for part in input_text.split(sep):
out.append(part)
return out
def _tokenize_list(self, input_text):
"""Simple tokenizer by a separator.
Given a string and a separator, returns a list of strings
separated by the separator
Args:
input_text: List of text to be split
sep: The separator, if None or '' are passed,
the string will be separated by characters
Returns:
List of List of splitted text
"""
sep = self.sep
res = []
for inp in input_text:
out = []
if sep == "" or sep is None:
# Tokenize to characters
for char in inp:
out.append(char)
else:
for part in inp.split(sep):
out.append(part)
res.append(out)
return res
def __call__(self, input_text, list_input=False):
if list_input:
return self._tokenize_list(input_text)
return self._tokenize_one(input_text)
# Convert String (sequence of characters) to indeces
class Encoder:
def __init__(
self,
vocab: dict = None,
update_vocab: bool = True,
no_special_chars: bool = False,
max_vocab_size=None,
min_vocab_count=None,
):
# Initialize new vocab if none is provided
# System wide special characters
padding = "<PAD>"
unk = "<UNK>"
self.padding = padding
self.unk = unk
self.max_vocab_size = max_vocab_size
self.min_vocab_count = min_vocab_count
self.no_special_chars = no_special_chars
self.update_vocab = update_vocab
self.wc = collections.Counter()
self.trim_pad = True
self.smoothing = (
4 # Random smoothing number for 0 count classes (like PAD / UNK)
)
if not vocab:
self.vocab = self.get_empty_vocab()
else:
self.vocab = vocab
if not no_special_chars:
# Make sure the vocab adheres to special token indices
assert self.vocab[padding] == 0
assert self.vocab[unk] == 1
def get_empty_vocab(self):
vocab = {}
if not self.no_special_chars:
vocab[self.padding] = len(vocab)
vocab[self.unk] = len(vocab)
return vocab
def freeze(self):
"""Lucks down the encoder so that new data does not update the vocab"""
self.update_vocab = False
def unfreeze(self):
"""Allows the vocab to be updated based on new data during encoding"""
self.update_vocab = True
def get_vocab(self):
return self.vocab
def get_vocab_counts(self, as_list=False):
logging.debug(self.vocab)
if as_list:
res = [0] * len(self.vocab)
for w, c in self.wc.items():
logging.debug(w)
logging.debug(self.vocab[w])
res[self.vocab[w]] = c
return res
return [(self.vocab.get(w, self.unk), c) for w, c in self.wc.items()]
def get_vocab_weights(self, as_list=False, min_w=False):
counts = self.get_vocab_counts(as_list)
if as_list:
dom_class = min(counts) if min_w else max(counts)
return [dom_class / max(c, self.smoothing) for c in counts]
dom_class = (
min([c for w, c in counts]) if min_w else max([c for w, c in counts])
)
return [(w, dom_class / max(c, self.smoothing)) for w, c in counts]
def filter_vocab(self):
if self.max_vocab_size is not None:
min_c = self.wc.most_common(self.max_vocab_size)[-1][1]
min_count = min(
min_c, self.min_vocab_count
) # might not include beg / padding etc. need to make sure they are there
filtered_wc = {}
unk = 0
for w, c in self.wc.items():
if c <= min_count:
unk += c
else:
filtered_wc[w] = c
if self.unk in filtered_wc:
filtered_wc[self.unk] += unk
else:
filtered_wc[self.unk] = unk
self.wc = collections.Counter(filtered_wc)
filtered_vocab = self.get_empty_vocab()
for w, c in self.wc.items():
# Make sure it's not PAD or UNK
# or anything we already include in initialization
if w not in filtered_vocab:
filtered_vocab[w] = len(filtered_vocab)
self.vocab = filtered_vocab
def get_padding_indx(self):
return self.vocab[self.padding]
def _encode_one(
self,
input_data: str,
vocab: dict = None,
update_vocab: bool = None,
no_special_chars: bool = None,
) -> Union[List[List[int]], dict]:
if vocab is None:
vocab = self.vocab
if update_vocab is None:
update_vocab = self.update_vocab
if no_special_chars is None:
no_special_chars = self.no_special_chars
# iterate through data, convert to indices
# and build vocab (if not provided) as we go
# results = []
num_unk = 0
vocab_keys = set(vocab.keys())
# for inp in input_data:
wid = []
if update_vocab:
self.wc.update(input_data)
for char in input_data:
if char not in vocab_keys:
# Add to vocab if allowed
if update_vocab:
indx = len(vocab)
vocab[char] = indx
self.vocab[char] = indx
wid.append(indx)
vocab_keys.add(char)
else:
# Replace with unk and count as OOV
wid.append(vocab[self.unk])
num_unk += 1
else:
# If in vocab, retreive index
wid.append(vocab[char])
# results.append(wid)
if not update_vocab:
# Show statistics
# logging.debug("Number of OOV: {}".format(num_unk))
pass
return wid
def _encode_list(
self,
input_data: List[str],
vocab: dict = None,
update_vocab: bool = None,
no_special_chars: bool = None,
) -> Union[List[List[int]], dict]:
if vocab is None:
vocab = self.vocab
if update_vocab is None:
update_vocab = self.update_vocab
if no_special_chars is None:
no_special_chars = self.no_special_chars
# iterate through data, convert to indices and build vocab
# (if not provided) as we go
# results = []
num_unk = 0
vocab_keys = set(vocab.keys())
# for inp in input_data:
res = []
for inp_line in input_data:
wid = []
if update_vocab:
self.wc.update(inp_line)
for char in inp_line:
if char not in vocab_keys:
# Add to vocab if allowed
if update_vocab:
indx = len(vocab)
vocab[char] = indx
self.vocab[char] = indx
wid.append(indx)
vocab_keys.add(char)
else:
# Replace with unk and count as OOV
wid.append(vocab[self.unk])
num_unk += 1
else:
# If in vocab, retreive index
wid.append(vocab[char])
res.append(wid)
# results.append(wid)
if not update_vocab:
# Show statistics
logging.debug("Number of OOV: {}".format(num_unk))
return res
def __call__(self, input_data, input_list=True, **kwargs):
if input_list:
return self._encode_list(input_data, **kwargs)
return self._encode_one(input_data, **kwargs)
def get_inverse(self):
# Get inverse vocab (index -> character).
inverse_vocab = {}
for char in self.vocab.keys():
inverse_vocab[self.vocab[char]] = char
return inverse_vocab
def decode(self, inp: List[int], list_input=False):
if list_input:
return self._decode_list(inp)
return self._decode_one(inp)
def _decode_list(self, inp: List[List[int]]):
inverse_vocab = self.get_inverse()
pad = self.vocab.get(self.padding, None)
if self.trim_pad:
res = []
for example in inp:
try:
res.append([inverse_vocab.get(i) for i in example if i != pad])
except KeyError:
# padding is not in vocab as in classification cases
# pass
raise
return res
res = []
res.append([inverse_vocab.get(i) for i in inp])
return res
def _decode_one(self, inp: List[int]) -> str:
"""Returns symbols from indices"""
inverse_vocab = self.get_inverse()
try:
pad = self.vocab[self.padding]
return [inverse_vocab.get(i) for i in inp if i != pad]
except KeyError:
# padding is not in vocab
pass
``` |
{
"source": "jkhu29/Deblurring-by-Realistic-Blurring",
"score": 3
} |
#### File: jkhu29/Deblurring-by-Realistic-Blurring/utils.py
```python
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
def weights_init(model):
"""init from article"""
for m in model.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, 0.1)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 0.1)
nn.init.constant_(m.bias, 0)
def calc_gram(x):
(n, c, h, w) = x.size()
f = x.view(n, c, w * h)
f_trans = f.transpose(1, 2)
gram = f.bmm(f_trans) / (c * h * w)
return gram
def gaussian(window_size, sigma):
gauss = torch.Tensor([math.exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size//2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size//2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding=window_size//2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding=window_size//2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding=window_size//2, groups=channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
def calc_ssim(img1, img2, window_size=11):
"""calculate SSIM"""
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average=True)
def calc_psnr(img1, img2):
"""calculate PNSR on cuda and cpu: img1 and img2 have range [0, 255]"""
mse = torch.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * torch.log10(255.0 / torch.sqrt(mse))
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def convert_rgb_to_y(img):
if type(img) == np.ndarray:
return 16. + (64.738 * img[:, :, 0] + 129.057 * img[:, :, 1] + 25.064 * img[:, :, 2]) / 256.
elif type(img) == torch.Tensor:
if len(img.shape) == 4:
img = img.squeeze(0)
return 16. + (64.738 * img[0, :, :] + 129.057 * img[1, :, :] + 25.064 * img[2, :, :]) / 256.
else:
raise Exception('Unknown Type', type(img))
def convert_rgb_to_ycbcr(img):
if type(img) == np.ndarray:
y = 16. + (64.738 * img[:, :, 0] + 129.057 * img[:, :, 1] + 25.064 * img[:, :, 2]) / 256.
cb = 128. + (-37.945 * img[:, :, 0] - 74.494 * img[:, :, 1] + 112.439 * img[:, :, 2]) / 256.
cr = 128. + (112.439 * img[:, :, 0] - 94.154 * img[:, :, 1] - 18.285 * img[:, :, 2]) / 256.
return np.array([y, cb, cr]).transpose([1, 2, 0])
elif type(img) == torch.Tensor:
if len(img.shape) == 4:
img = img.squeeze(0)
y = 16. + (64.738 * img[0, :, :] + 129.057 * img[1, :, :] + 25.064 * img[2, :, :]) / 256.
cb = 128. + (-37.945 * img[0, :, :] - 74.494 * img[1, :, :] + 112.439 * img[2, :, :]) / 256.
cr = 128. + (112.439 * img[0, :, :] - 94.154 * img[1, :, :] - 18.285 * img[2, :, :]) / 256.
return torch.cat([y, cb, cr], 0).permute(1, 2, 0)
else:
raise Exception('Unknown Type', type(img))
def convert_ycbcr_to_rgb(img):
if type(img) == np.ndarray:
r = 298.082 * img[:, :, 0] / 256. + 408.583 * img[:, :, 2] / 256. - 222.921
g = 298.082 * img[:, :, 0] / 256. - 100.291 * img[:, :, 1] / 256. - 208.120 * img[:, :, 2] / 256. + 135.576
b = 298.082 * img[:, :, 0] / 256. + 516.412 * img[:, :, 1] / 256. - 276.836
return np.array([r, g, b]).transpose([1, 2, 0])
elif type(img) == torch.Tensor:
if len(img.shape) == 4:
img = img.squeeze(0)
r = 298.082 * img[0, :, :] / 256. + 408.583 * img[2, :, :] / 256. - 222.921
g = 298.082 * img[0, :, :] / 256. - 100.291 * img[1, :, :] / 256. - 208.120 * img[2, :, :] / 256. + 135.576
b = 298.082 * img[0, :, :] / 256. + 516.412 * img[1, :, :] / 256. - 276.836
return torch.cat([r, g, b], 0).permute(1, 2, 0)
else:
raise Exception('Unknown Type', type(img))
def rgb2lum(arr):
small = np.where(arr <= 0.04045)
big = np.where(arr > 0.04045)
arr[small] /= 12.92
arr[big] = ((arr[big] + 0.055) / 1.055) ** 2.4
return arr
def lum(image):
"""
turn BGR to Lum
:param image: image in sRGB area, range 255
:return: image in Lum
"""
assert image.shape[0] == 3, "make sure the layout is (c, h, w), BGR"
_, h, w = image.shape
image = image.astype(np.float)
v_b = image[0, ...] / 255
v_g = image[1, ...] / 255
v_r = image[2, ...] / 255
print(rgb2lum(v_r))
l_image = 0.2126 * rgb2lum(v_r) + 0.7152 * rgb2lum(v_g) + 0.0722 * rgb2lum(v_b)
return l_image
def upsampling(img, x, y):
func = nn.Upsample(size=[x, y], mode='bilinear', align_corners=True)
return func(img)
def generate_noise(size, channels=1, type='gaussian', scale=2, noise=None):
if type == 'gaussian':
noise = torch.randn(channels, size[0], round(size[1]/scale), round(size[2]/scale))
noise = upsampling(noise, size[1], size[2])
if type =='gaussian_mixture':
noise1 = torch.randn(channels, size[0], size[1], size[2]) + 5
noise2 = torch.randn(channels, size[0], size[1], size[2])
noise = noise1 + noise2
if type == 'uniform':
noise = torch.randn(channels, size[0], size[1], size[2])
return noise * 10.
def concat_noise(img, *args):
noise = generate_noise(*args)
if isinstance(img, torch.Tensor):
noise = noise.to(img.device)
else:
img = torch.from_numpy(img.transpose(2, 0, 1)).unsqueeze(0)
mixed_img = torch.cat((img, noise), 1)
return mixed_img
class ImageEvaluation(object):
def __init__(self, img, mode):
super(ImageEvaluation, self).__init__()
self.img = img
``` |
{
"source": "jkhu29/SoCo",
"score": 3
} |
#### File: contrast/data/sampler.py
```python
import numpy as np
from torch.utils.data import Sampler
class SubsetSlidingWindowSampler(Sampler):
r"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices, window_stride, window_size, shuffle_per_epoch=False):
self.window_stride = window_stride
self.window_size = window_size
self.shuffle_per_epoch = shuffle_per_epoch
self.indices = indices
np.random.shuffle(self.indices)
self.start_index = 0
def __iter__(self):
# optionally shuffle all indices per epoch
if self.shuffle_per_epoch and self.start_index + self.window_size > len(self):
np.random.shuffle(self.indices)
# get indices of sampler in the current window
indices = np.mod(np.arange(self.window_size, dtype=np.int) + self.start_index, len(self))
window_indices = self.indices[indices]
# shuffle the current window
np.random.shuffle(window_indices)
# move start index to next window
self.start_index = (self.start_index + self.window_stride) % len(self)
return iter(window_indices.tolist())
def __len__(self):
return len(self.indices)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {"start_index": self.start_index}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
```
#### File: contrast/data/transform.py
```python
import numpy as np
from PIL import ImageFilter, ImageOps
from torchvision import transforms
from . import transform_ops
class GaussianBlur(object):
"""Gaussian Blur version 2"""
def __call__(self, x):
sigma = np.random.uniform(0.1, 2.0)
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
def get_transform(args, aug_type, crop, image_size=224, crop1=0.9, cutout_prob=0.5, cutout_ratio=(0.1, 0.2),
image3_size=224, image4_size=224):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
if aug_type == 'ImageAsymBboxCutout':
transform_whole_img = transform_ops.WholeImageResizedParams(image_size)
transform_img = transform_ops.RandomResizedCropParams(image_size, scale=(crop, 1.))
transform_flip = transform_ops.RandomHorizontalFlipImageBbox()
transform_post_1 = transform_ops.ComposeImage([
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur()], p=1.0),
transforms.ToTensor(),
normalize,
])
transform_post_2 = transform_ops.ComposeImage([
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur()], p=0.1),
transforms.RandomApply([ImageOps.solarize], p=0.2),
transforms.ToTensor(),
normalize,
])
transform_cutout = transform_ops.RandomCutoutInBbox(image_size, cutout_prob=cutout_prob, cutout_ratio=cutout_ratio)
transform = (transform_whole_img, transform_img, transform_flip, transform_post_1, transform_post_2, transform_cutout)
elif aug_type == 'ImageAsymBboxAwareMultiJitter1':
transform_whole_img = transform_ops.WholeImageResizedParams(image_size)
transform_img = transform_ops.RandomResizedCropParams(image_size, scale=(crop, 1.))
transform_img_small = transform_ops.RandomResizedCropParams(image_size//2, scale=(crop, 1.))
transform_flip_flip = transform_ops.RandomHorizontalFlipImageBboxBbox()
transform_flip = transform_ops.RandomHorizontalFlipImageBbox()
transform_post_1 = transform_ops.ComposeImage([
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur()], p=1.0),
transforms.ToTensor(),
normalize,
])
transform_post_2 = transform_ops.ComposeImage([
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur()], p=0.1),
transforms.RandomApply([ImageOps.solarize], p=0.2),
transforms.ToTensor(),
normalize,
])
transform = (transform_whole_img, transform_img, transform_img_small, transform_flip_flip, transform_flip, transform_post_1, transform_post_2)
elif aug_type == 'ImageAsymBboxAwareMultiJitter1Cutout':
transform_whole_img = transform_ops.WholeImageResizedParams(image_size)
transform_img = transform_ops.RandomResizedCropParams(image_size, scale=(crop, 1.))
transform_img_small = transform_ops.RandomResizedCropParams(image_size//2, scale=(crop, 1.))
transform_flip_flip = transform_ops.RandomHorizontalFlipImageBboxBbox()
transform_flip = transform_ops.RandomHorizontalFlipImageBbox()
transform_post_1 = transform_ops.ComposeImage([
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur()], p=1.0),
transforms.ToTensor(),
normalize,
])
transform_post_2 = transform_ops.ComposeImage([
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur()], p=0.1),
transforms.RandomApply([ImageOps.solarize], p=0.2),
transforms.ToTensor(),
normalize,
])
transform_cutout = transform_ops.RandomCutoutInBbox(image_size, cutout_prob=cutout_prob, cutout_ratio=cutout_ratio)
transform = (transform_whole_img, transform_img, transform_img_small, transform_flip_flip, transform_flip, transform_post_1, transform_post_2, transform_cutout)
elif aug_type == 'ImageAsymBboxAwareMulti3ResizeExtraJitter1':
transform_whole_img = transform_ops.WholeImageResizedParams(image_size)
transform_img = transform_ops.RandomResizedCropParams(image_size, scale=(crop, 1.))
transform_img_small = transform_ops.RandomResizedCropParams(image3_size, scale=(crop, 1.))
transform_img_resize = transforms.Resize(image4_size)
transform_flip_flip_flip = transform_ops.RandomHorizontalFlipImageBboxBboxBbox()
transform_flip = transform_ops.RandomHorizontalFlipImageBbox()
transform_post_1 = transform_ops.ComposeImage([
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur()], p=1.0),
transforms.ToTensor(),
normalize,
])
transform_post_2 = transform_ops.ComposeImage([
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur()], p=0.1),
transforms.RandomApply([ImageOps.solarize], p=0.2),
transforms.ToTensor(),
normalize,
])
transform = (transform_whole_img, transform_img, transform_img_small, transform_img_resize, transform_flip_flip_flip, transform_flip, transform_post_1, transform_post_2)
elif aug_type == 'NULL': # used in linear evaluation
transform = transform_ops.Compose([
transform_ops.RandomResizedCropCoord(image_size, scale=(crop, 1.)),
transform_ops.RandomHorizontalFlipCoord(),
transforms.ToTensor(),
normalize,
])
elif aug_type == 'val': # used in validate
transform = transforms.Compose([
transforms.Resize(image_size + 32),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
normalize
])
else:
supported = '[ImageAsymBboxCutout, ImageAsymBboxAwareMultiJitter1, ImageAsymBboxAwareMultiJitter1Cutout, ImageAsymBboxAwareMulti3ResizeExtraJitter1, NULL]'
raise NotImplementedError(f'aug_type "{aug_type}" not supported. Should in {supported}')
return transform
```
#### File: SoCo/contrast/util.py
```python
import argparse
import torch
import torch.distributed as dist
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def dist_collect(x):
""" collect all tensor from all GPUs
args:
x: shape (mini_batch, ...)
returns:
shape (mini_batch * num_gpu, ...)
"""
x = x.contiguous()
out_list = [torch.zeros_like(x, device=x.device, dtype=x.dtype)
for _ in range(dist.get_world_size())]
dist.all_gather(out_list, x)
return torch.cat(out_list, dim=0)
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= dist.get_world_size()
return rt
class MyHelpFormatter(argparse.MetavarTypeHelpFormatter, argparse.ArgumentDefaultsHelpFormatter):
pass
class DistributedShuffle:
@staticmethod
def forward_shuffle(x):
"""
Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = dist_collect(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
dist.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = dist.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@staticmethod
def backward_shuffle(x, idx_unshuffle, return_local=True):
"""
Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = dist_collect(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
if return_local:
# restored index for this gpu
gpu_idx = dist.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_unshuffle], x_gather[idx_this]
else:
return x_gather[idx_unshuffle]
@staticmethod
def get_local_id(ids):
return ids.chunk(dist.get_world_size())[dist.get_rank()]
@staticmethod
def get_shuffle_ids(bsz, epoch):
"""generate shuffle ids for ShuffleBN"""
torch.manual_seed(epoch)
# global forward shuffle id for all process
forward_inds = torch.randperm(bsz).long().cuda()
# global backward shuffle id
backward_inds = torch.zeros(forward_inds.shape[0]).long().cuda()
value = torch.arange(bsz).long().cuda()
backward_inds.index_copy_(0, forward_inds, value)
return forward_inds, backward_inds
```
#### File: jkhu29/SoCo/main_linear.py
```python
import json
import os
import time
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from contrast import resnet
from contrast.data import get_loader
from contrast.logger import setup_logger
from contrast.lr_scheduler import get_scheduler
from contrast.option import parse_option
from contrast.util import AverageMeter, accuracy, reduce_tensor
try:
from apex import amp # type: ignore
except ImportError:
amp = None
def build_model(args, num_class):
# create model
model = resnet.__dict__[args.arch](low_dim=num_class, head_type='reduce').cuda()
# set requires_grad of parameters except last fc layer to False
for name, p in model.named_parameters():
if 'fc' not in name:
p.requires_grad = False
optimizer = torch.optim.SGD(model.fc.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.amp_opt_level != "O0":
model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp_opt_level)
model = DistributedDataParallel(model, device_ids=[args.local_rank], broadcast_buffers=False)
return model, optimizer
def load_pretrained(model, pretrained_model):
ckpt = torch.load(pretrained_model, map_location='cpu')
model_dict = model.state_dict()
base_fix = False
for key in ckpt['model'].keys():
if key.startswith('module.base.'):
base_fix = True
break
if base_fix:
state_dict = {k.replace("module.base.", "module."): v
for k, v in ckpt['model'].items()
if k.startswith('module.base.')}
logger.info(f"==> load checkpoint from Module.Base")
else:
state_dict = {k.replace("module.encoder.", "module."): v
for k, v in ckpt['model'].items()
if k.startswith('module.encoder.')}
logger.info(f"==> load checkpoint from Module.Encoder")
state_dict = {k: v for k, v in state_dict.items()
if k in model_dict and v.size() == model_dict[k].size()}
model_dict.update(state_dict)
model.load_state_dict(model_dict)
logger.info(f"==> loaded checkpoint '{pretrained_model}' (epoch {ckpt['epoch']})")
def load_checkpoint(args, model, optimizer, scheduler):
logger.info("=> loading checkpoint '{args.resume'")
checkpoint = torch.load(args.resume, map_location='cpu')
global best_acc1
best_acc1 = checkpoint['best_acc1']
args.start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
if args.amp_opt_level != "O0" and checkpoint['args'].amp_opt_level != "O0":
amp.load_state_dict(checkpoint['amp'])
logger.info(f"=> loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})")
def save_checkpoint(args, epoch, model, test_acc, optimizer, scheduler):
state = {
'args': args,
'epoch': epoch,
'model': model.state_dict(),
'best_acc1': test_acc,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
}
if args.amp_opt_level != "O0":
state['amp'] = amp.state_dict()
torch.save(state, os.path.join(args.output_dir, f'ckpt_epoch_{epoch}.pth'))
torch.save(state, os.path.join(args.output_dir, f'current.pth'))
def main(args):
global best_acc1
args.batch_size = args.total_batch_size // dist.get_world_size()
train_loader = get_loader(args.aug, args, prefix='train')
val_loader = get_loader('val', args, prefix='val')
logger.info(f"length of training dataset: {len(train_loader.dataset)}")
model, optimizer = build_model(args, num_class=len(train_loader.dataset.classes))
scheduler = get_scheduler(optimizer, len(train_loader), args)
# load pre-trained model
load_pretrained(model, args.pretrained_model)
# optionally resume from a checkpoint
if args.auto_resume:
resume_file = os.path.join(args.output_dir, "current.pth")
if os.path.exists(resume_file):
logger.info(f'auto resume from {resume_file}')
args.resume = resume_file
else:
logger.info(f'no checkpoint found in {args.output_dir}, ignoring auto resume')
if args.resume:
assert os.path.isfile(args.resume), f"no checkpoint found at '{args.resume}'"
load_checkpoint(args, model, optimizer, scheduler)
if args.eval:
logger.info("==> testing...")
validate(val_loader, model, args)
return
# tensorboard
if dist.get_rank() == 0:
summary_writer = SummaryWriter(log_dir=args.output_dir)
else:
summary_writer = None
# routine
for epoch in range(args.start_epoch, args.epochs + 1):
if isinstance(train_loader.sampler, DistributedSampler):
train_loader.sampler.set_epoch(epoch)
tic = time.time()
train(epoch, train_loader, model, optimizer, scheduler, args)
logger.info(f'epoch {epoch}, total time {time.time() - tic:.2f}')
logger.info("==> testing...")
test_acc, test_acc5, test_loss = validate(val_loader, model, args)
if summary_writer is not None:
summary_writer.add_scalar('test_acc', test_acc, epoch)
summary_writer.add_scalar('test_acc5', test_acc5, epoch)
summary_writer.add_scalar('test_loss', test_loss, epoch)
# save model
if dist.get_rank() == 0 and epoch % args.save_freq == 0:
logger.info('==> Saving...')
save_checkpoint(args, epoch, model, test_acc, optimizer, scheduler)
def train(epoch, train_loader, model, optimizer, scheduler, args):
"""
one epoch training
"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
end = time.time()
for idx, (x, _, y) in enumerate(train_loader):
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True)
# measure data loading time
data_time.update(time.time() - end)
# forward
output = model(x)
loss = F.cross_entropy(output, y)
# backward
optimizer.zero_grad()
if args.amp_opt_level != "O0":
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
scheduler.step()
# update meters
acc1, acc5 = accuracy(output, y, topk=(1, 5))
loss_meter.update(loss.item(), x.size(0))
acc1_meter.update(acc1[0], x.size(0))
acc5_meter.update(acc5[0], x.size(0))
batch_time.update(time.time() - end)
end = time.time()
# print info
if idx % args.print_freq == 0:
logger.info(
f'Epoch: [{epoch}][{idx}/{len(train_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
f'Lr {optimizer.param_groups[0]["lr"]:.3f} \t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
def validate(val_loader, model, args):
batch_time = AverageMeter()
loss_meter = AverageMeter()
acc1_meter = AverageMeter()
acc5_meter = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for idx, (x, _, y) in enumerate(val_loader):
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True)
# compute output
output = model(x)
loss = F.cross_entropy(output, y)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, y, topk=(1, 5))
acc1 = reduce_tensor(acc1)
acc5 = reduce_tensor(acc5)
loss = reduce_tensor(loss)
loss_meter.update(loss.item(), x.size(0))
acc1_meter.update(acc1[0], x.size(0))
acc5_meter.update(acc5[0], x.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % args.print_freq == 0:
logger.info(
f'Test: [{idx}/{len(val_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\t'
f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\t'
f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})')
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg, acc5_meter.avg, loss_meter.avg
if __name__ == '__main__':
opt = parse_option(stage='linear')
if opt.amp_opt_level != "O0":
assert amp is not None, "amp not installed!"
torch.cuda.set_device(opt.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
cudnn.benchmark = True
best_acc1 = 0
os.makedirs(opt.output_dir, exist_ok=True)
logger = setup_logger(output=opt.output_dir, distributed_rank=dist.get_rank(), name="contrast")
if dist.get_rank() == 0:
path = os.path.join(opt.output_dir, "config.json")
with open(path, "w") as f:
json.dump(vars(opt), f, indent=2)
logger.info("Full config saved to {}".format(path))
# print args
# TODO: check format
logger.info(vars(opt))
main(opt)
``` |
{
"source": "jkhuangfu/daily-essays",
"score": 4
} |
#### File: jkhuangfu/daily-essays/del_repeat_file.py
```python
import os
# 你要删除的文件格式
del_type = '.txt'
# 需要处理的文件路径
file_path = os.path.join(os.getcwd(),'')
def get_all_file_name(source_path):
file_name_arr = []
for root, dirs, files in os.walk(source_path):
for file in files:
file_name_arr.append(os.path.splitext(file)[0])
return file_name_arr
def repeat_num(_array):
seen = set()
duplicated = set()
for x in _array:
if x not in seen:
seen.add(x)
else:
duplicated.add(x)
return duplicated
def del_file(file_arr):
for file_name in file_arr:
if os.path.exists(file_path + file_name + del_type):
os.remove(file_path + file_name + del_type)
res = repeat_num(get_all_file_name(file_path))
print('重复文件名---->' , res)
del_file(res)
``` |
{
"source": "jkhuhnke11/generative-art-processing",
"score": 3
} |
#### File: Crazy Circles/sketch_210721a/sketch_210721a.pyde
```python
w, h = 1000, 1000
r = 650
cir_num = 100
# color palette
#colors = [(101, 46, 199), (222, 56, 200), (255, 211, 0)]
#bg = color(51, 19, 92)
colors = [(120, 152, 251), (92, 229, 213), (184, 251, 60)]
bg = color(0, 20, 55)
def setup():
size(w, h)
pixelDensity(2)
background(bg)
noFill()
strokeWeight(2)
for i in range(cir_num):
w_c = random(-r*0.1, r*0.1)
h_c = random(-r*0.1, r*0.1)
r_c = random(-r*0.1, r*0.1)
num = int(random(3))
ran_col = colors[num]
stroke(ran_col[0], ran_col[1], ran_col[2])
circle(w/2+w_c, h/2+h_c, r+r_c)
seed = int(random(5000))
save("Examples/palette2" + str(seed) + ".png")
``` |
{
"source": "jkhulme/LinkToSource",
"score": 2
} |
#### File: jkhulme/LinkToSource/link_to_source.py
```python
import sublime
import sublime_plugin
import subprocess
from .remote import Remote
class LinkToSourceCommand(sublime_plugin.TextCommand):
root = ''
repo_url = ''
cwd = ''
def run(self, edit, branch):
self.cwd = self._cwd()
self.root = self._project_root()
self.branch = self._branch(branch)
self.repo_url = self._repo_url()
sublime.set_clipboard(self._link())
def _link(self):
return '{}{}'.format(self.repo_url, self._relative_path())
def _relative_path(self):
path_to_file = self.view.file_name()
return path_to_file.replace(self.root, '', 1)
def _cwd(self):
return '/'.join(self.view.file_name().split('/')[:-1])
def _repo_url(self):
return Remote(self._remote_origin()).repo_url(self.branch)
def _project_root(self):
git_root_directory = ['rev-parse', '--show-toplevel']
return self._git_command(git_root_directory)
def _remote_origin(self):
git_remote_origin = ['config', '--get', 'remote.origin.url']
return self._git_command(git_remote_origin)
def _current_branch(self):
git_branch = ['rev-parse', '--abbrev-ref', 'HEAD']
return self._git_command(git_branch)
def _git_command(self, command):
git_command = ['git', '-C', self.cwd] + command
return subprocess.check_output(git_command).strip().decode('utf-8')
def _branch(self, branch):
if branch == 'master':
return branch
return self._current_branch()
``` |
{
"source": "jkiang13/aws-infra",
"score": 2
} |
#### File: cfn-oidc-identity-provider/oidc_identity_provider/app.py
```python
import boto3
from crhelper import CfnResource
from botocore.exceptions import ClientError
helper = CfnResource(
json_logging=False, log_level='INFO', boto_level='CRITICAL')
try:
iam = boto3.client("iam")
ARN_FORMAT = "arn:aws:iam::{}:oidc-provider/{}"
except Exception as e:
helper.init_failure(e)
def get_comma_delimited_list(event, parameter):
value = event['ResourceProperties'].get(parameter)
return [x.strip() for x in value.split(',')] if value else []
def get_parameters(event):
aws_account_id = event['StackId'].split(':')[4]
url = event['ResourceProperties']['Url']
client_id_list = get_comma_delimited_list(event, 'ClientIDList')
thumbprint_list = get_comma_delimited_list(event, 'ThumbprintList')
return aws_account_id, url, client_id_list, thumbprint_list
def update_provider(url, aws_account_id, client_id_list, thumbprint_list):
arn = ARN_FORMAT.format(aws_account_id, url[8:])
try:
response = iam.get_open_id_connect_provider(
OpenIDConnectProviderArn=arn)
except ClientError as e:
if e.response['Error']['Code'] == "NoSuchEntity":
response_create = iam.create_open_id_connect_provider(
Url=url,
ClientIDList=client_id_list,
ThumbprintList=thumbprint_list)
return response_create['OpenIDConnectProviderArn']
else:
raise
deleted_client_ids = set(response['ClientIDList']) - set(
client_id_list)
added_client_ids = set(client_id_list) - set(
response['ClientIDList'])
if set(thumbprint_list) ^ set(response['ThumbprintList']):
iam.update_open_id_connect_provider_thumbprint(
OpenIDConnectProviderArn=arn,
ThumbprintList=thumbprint_list)
for client_id in added_client_ids:
iam.add_client_id_to_open_id_connect_provider(
OpenIDConnectProviderArn=arn, ClientID=client_id)
for client_id in deleted_client_ids:
iam.remove_client_id_from_open_id_connect_provider(
OpenIDConnectProviderArn=arn, ClientID=client_id)
return arn
def create_provider(aws_account_id, url, client_id_list, thumbprint_list):
try:
response = iam.create_open_id_connect_provider(
Url=url,
ClientIDList=client_id_list,
ThumbprintList=thumbprint_list)
return response['OpenIDConnectProviderArn']
except ClientError as e:
if e.response['Error']['Code'] == "EntityAlreadyExists":
arn = ARN_FORMAT.format(aws_account_id, url[8:])
return update_provider(url, aws_account_id, client_id_list, thumbprint_list)
@helper.create
def create(event, context):
return create_provider(*get_parameters(event))
@helper.update
def update(event, context):
aws_account_id, url, client_id_list, thumbprint_list = get_parameters(
event)
if (event['OldResourceProperties']['Url'] !=
event['ResourceProperties']['Url']):
arn = ARN_FORMAT.format(
aws_account_id, event['OldResourceProperties']['Url'][8:])
iam.delete_open_id_connect_provider(OpenIDConnectProviderArn=arn)
return create_provider(
aws_account_id, url, client_id_list, thumbprint_list)
else:
arn = ARN_FORMAT.format(
aws_account_id, event['ResourceProperties']['Url'][8:])
update_provider(url, aws_account_id, client_id_list, thumbprint_list)
@helper.delete
def delete(event, context):
aws_account_id, _, _, _ = get_parameters(event)
arn = ARN_FORMAT.format(
aws_account_id, event['ResourceProperties']['Url'][8:])
iam.delete_open_id_connect_provider(OpenIDConnectProviderArn=arn)
def lambda_handler(event, context):
helper(event, context)
``` |
{
"source": "jkibele/LandMasker",
"score": 2
} |
#### File: jkibele/LandMasker/ui_landmasker.py
```python
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_LandMasker(object):
def setupUi(self, LandMasker):
LandMasker.setObjectName(_fromUtf8("LandMasker"))
LandMasker.resize(388, 352)
self.verticalLayout_4 = QtGui.QVBoxLayout(LandMasker)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.inputRasterGroupBox = QtGui.QGroupBox(LandMasker)
self.inputRasterGroupBox.setObjectName(_fromUtf8("inputRasterGroupBox"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.inputRasterGroupBox)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.inputRasterComboBox = QtGui.QComboBox(self.inputRasterGroupBox)
self.inputRasterComboBox.setObjectName(_fromUtf8("inputRasterComboBox"))
self.verticalLayout_3.addWidget(self.inputRasterComboBox)
self.verticalLayout_4.addWidget(self.inputRasterGroupBox)
spacerItem = QtGui.QSpacerItem(156, 10, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem)
self.groupBox = QtGui.QGroupBox(LandMasker)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.thresholdDoubleSpinBox = QtGui.QDoubleSpinBox(self.groupBox)
self.thresholdDoubleSpinBox.setMaximum(9999.99)
self.thresholdDoubleSpinBox.setProperty("value", 50.0)
self.thresholdDoubleSpinBox.setObjectName(_fromUtf8("thresholdDoubleSpinBox"))
self.horizontalLayout_3.addWidget(self.thresholdDoubleSpinBox)
self.thresholdLabel = QtGui.QLabel(self.groupBox)
self.thresholdLabel.setObjectName(_fromUtf8("thresholdLabel"))
self.horizontalLayout_3.addWidget(self.thresholdLabel)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.connectivitySpinBox = QtGui.QSpinBox(self.groupBox)
self.connectivitySpinBox.setMaximum(99999)
self.connectivitySpinBox.setSingleStep(10)
self.connectivitySpinBox.setProperty("value", 1000)
self.connectivitySpinBox.setObjectName(_fromUtf8("connectivitySpinBox"))
self.horizontalLayout_2.addWidget(self.connectivitySpinBox)
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_2.addWidget(self.label)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout_4.addWidget(self.groupBox)
spacerItem3 = QtGui.QSpacerItem(20, 13, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem3)
self.outputRasterGroupBox = QtGui.QGroupBox(LandMasker)
self.outputRasterGroupBox.setObjectName(_fromUtf8("outputRasterGroupBox"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.outputRasterGroupBox)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.outputRasterLineEdit = QtGui.QLineEdit(self.outputRasterGroupBox)
self.outputRasterLineEdit.setObjectName(_fromUtf8("outputRasterLineEdit"))
self.horizontalLayout.addWidget(self.outputRasterLineEdit)
self.selectPushButton = QtGui.QPushButton(self.outputRasterGroupBox)
self.selectPushButton.setObjectName(_fromUtf8("selectPushButton"))
self.horizontalLayout.addWidget(self.selectPushButton)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.addMaskCheckBox = QtGui.QCheckBox(self.outputRasterGroupBox)
self.addMaskCheckBox.setChecked(True)
self.addMaskCheckBox.setObjectName(_fromUtf8("addMaskCheckBox"))
self.verticalLayout_2.addWidget(self.addMaskCheckBox)
self.verticalLayout_4.addWidget(self.outputRasterGroupBox)
self.buttonBox = QtGui.QDialogButtonBox(LandMasker)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout_4.addWidget(self.buttonBox)
self.retranslateUi(LandMasker)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), LandMasker.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), LandMasker.reject)
QtCore.QObject.connect(self.selectPushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), LandMasker.showFileSelectDialog)
QtCore.QMetaObject.connectSlotsByName(LandMasker)
LandMasker.setTabOrder(self.inputRasterComboBox, self.outputRasterLineEdit)
LandMasker.setTabOrder(self.outputRasterLineEdit, self.selectPushButton)
LandMasker.setTabOrder(self.selectPushButton, self.addMaskCheckBox)
LandMasker.setTabOrder(self.addMaskCheckBox, self.buttonBox)
def retranslateUi(self, LandMasker):
LandMasker.setWindowTitle(QtGui.QApplication.translate("LandMasker", "LandMask", None, QtGui.QApplication.UnicodeUTF8))
self.inputRasterGroupBox.setTitle(QtGui.QApplication.translate("LandMasker", "Input Raster", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("LandMasker", "Options", None, QtGui.QApplication.UnicodeUTF8))
self.thresholdDoubleSpinBox.setToolTip(QtGui.QApplication.translate("LandMasker", "<html><head/><body><p>Pixel value below which that pixel will be considered as water. The default value is reasonable for near infrared bands in units of raw digital numbers. If the image units are different, you\'ll have to inspect the image and pick an appropriate value.</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.thresholdLabel.setText(QtGui.QApplication.translate("LandMasker", "Value Threshold", None, QtGui.QApplication.UnicodeUTF8))
self.connectivitySpinBox.setToolTip(QtGui.QApplication.translate("LandMasker", "<html><head/><body><p>Pixels below the value threshold must be part of a group of this many contiguous pixels in order to be considered water. The intention is to eliminate shadow areas on land.</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("LandMasker", "Connectivity Threshold", None, QtGui.QApplication.UnicodeUTF8))
self.outputRasterGroupBox.setTitle(QtGui.QApplication.translate("LandMasker", "Output Raster", None, QtGui.QApplication.UnicodeUTF8))
self.selectPushButton.setText(QtGui.QApplication.translate("LandMasker", "Select...", None, QtGui.QApplication.UnicodeUTF8))
self.addMaskCheckBox.setText(QtGui.QApplication.translate("LandMasker", "Add Mask to Project", None, QtGui.QApplication.UnicodeUTF8))
``` |
{
"source": "jkickens/data.gov.mon",
"score": 3
} |
#### File: jkickens/data.gov.mon/data.gov_mon_mt.py
```python
import sys
import os
import requests
import urllib.request
import csv
from datetime import datetime
import time
import concurrent.futures
import threading
import socket
#from multiprocessing import Pool
#osname = sys.platform
#if osname == "darwin":
# print("MacOS detected: set No Proxy to avoid system config crash.")
os.environ['no_proxy'] = "*"
http_proxies = {
"http": None,
"https": None
}
# initialize counters
pagesize = 10
timeout = 5 # wait up to 5 seconds for data source to respond
socket.setdefaulttimeout(timeout)
# name of report file containing results of all data sources
common_report_file = 'data.gov_mon_rpt_' + datetime.now().strftime("%m_%d_%Y") + '.csv'
with open(common_report_file, 'w') as f:
writer = csv.writer(f)
#write header row
writer.writerow(['URL', 'Name', 'Description', 'Resource State', 'Protocol', 'Status', 'Link State'])
# lock to keep threads from writing report file over each other
report_file_lock = threading.Lock()
if len(sys.argv) > 1:
search_string = sys.argv[1]
else:
search_string = "climate"
print("Searching for resources containing: ", search_string)
# reusable functions
def handle_ftplink(resources, report_row, i):
ftplinks = 0
good = 0
bad = 0
ftplinks += 1
# use urllib since requests package is only for HTTP
ftplink = urllib.request.urlopen(resources[i]['url'], data=None)
report_row.append('FTP')
report_row.append('NA')
good += 1
report_row.append('GOOD')
return ftplinks, good, bad
def handle_httplink(resources, report_row, i):
httplinks = 0
good = 0
bad = 0
testlink = requests.get(resources[i]['url'], timeout=timeout, proxies=http_proxies)
report_row.append('HTTP')
report_row.append(testlink.status_code)
httplinks += 1
if testlink.status_code == 200:
good += 1
report_row.append('GOOD')
else:
bad += 1
report_row.append('BAD')
return httplinks, good, bad
# worker function
def get_results(row_range):
# Initialize counts for this run
num_resources = 0
start = row_range[0]
end = row_range[1]
resource_report = []
resources = []
httplinks = 0
ftplinks = 0
good = 0
bad = 0
unknown = 0
report_file = common_report_file
# Now get all results page by page
for startrow in range (start, end, pagesize):
parameters = {'q': search_string, 'rows': pagesize, 'start': startrow}
try:
r = requests.get('https://catalog.data.gov/api/3/action/package_search', params = parameters, proxies=http_proxies)
json_dict = r.json()
num_results_total = json_dict['result']['count']
num_resources_in_response = len(json_dict['result']['results'])
except:
#skip
continue
results = []
resources = []
previous_url = None
# build list of resources within results
for i in range(0, num_resources_in_response):
try:
results.append(json_dict['result']['results'][i])
for j in range(0, len(results[i]['resources'])):
rsrc = results[i]['resources'][j]
# check for URL same as previous - if so, skip
if rsrc['url'] == previous_url:
continue
else:
previous_url = rsrc['url']
resources.append(rsrc)
except:
# just skip bad JSON resource
continue
# now go through and test all resources
num_resources = len(resources)
for i in range(0, num_resources):
report_row = [resources[i]['url'], resources[i]['name'],resources[i]['description'], resources[i]['state']]
# initialize internal function return values
f = 0 # ftplinks count
h = 0 # httplinks count
g = 0 # good count
b = 0 # bad count
# test resource URL
try:
# Check HTTP resources
if resources[i]['resource_locator_protocol'] == 'HTTP' or resources[i]['url'][:4] == 'http':
h, g, b = handle_httplink(resources, report_row, i)
# Check FTP resources
if resources[i]['url'][:3] == 'ftp':
f, g, b = handle_ftplink(resources, report_row, i)
except requests.exceptions.RequestException:
bad += 1
report_row.append('UNKNOWN')
report_row.append('NONE')
report_row.append('BAD')
except:
# maybe bad JSON - check URL directly
try:
if resources[i]['url'][:3] == 'ftp':
f, g, b = handle_ftplink(resources, report_row, i)
else:
if resources[i]['url'][:4] == 'http':
h,g,b = handle_httplink(resources, report_row, i)
else:
unknown += 1
report_row.append('UNKNOWN')
report_row.append('NONE')
report_row.append('UNKNOWN')
except:
bad += 1
report_row.append('UNKNOWN')
report_row.append('NONE')
report_row.append('BAD')
httplinks += h
ftplinks += f
good += g
bad += b
# write result row to CSV
with report_file_lock:
with open(report_file, 'a') as f:
writer = csv.writer(f)
writer.writerow(report_row)
# create return result
results = [num_resources,httplinks, ftplinks, good, bad, unknown]
return results
# Main logic ...
def main():
# We will report elapsed time
start_time = time.time()
# Get count of total results
parameters = {'q': search_string, 'rows': 0}
r = requests.get('https://catalog.data.gov/api/3/action/package_search', params = parameters)
json_dict = r.json()
num_results_total = json_dict['result']['count']
r = requests.get('https://catalog.data.gov/api/3/action/package_search?q=climate&rows=0', timeout=10, proxies=http_proxies)
json_dict = r.json()
print ('Request success = ', json_dict['success'])
num_results_total = json_dict['result']['count']
print('Total results: ', num_results_total)
# Create thread pool and run
poolsize = 10
results_per_thread = 10
batch_size = 100
num_results_test = num_results_total # for testing only
# create list of ranges
ranges = []
# Reset result counts
total_resources = 0
good = 0
bad = 0
unknown = 0
httplinks = 0
ftplinks = 0
for batch_no in range(0, num_results_test, batch_size):
ranges.append([batch_no, batch_no+batch_size-1])
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
list_of_results = executor.map(get_results, ranges)
# Consolidate counts
for each_result in list_of_results:
total_resources += each_result[0]
httplinks += each_result[1]
ftplinks += each_result[2]
good += each_result[3]
bad += each_result[4]
unknown += each_result[5]
# Print summary of run
print ('Total number of resources: ', total_resources)
print ('HTTP Links: ', httplinks)
print ('FTP links: ', ftplinks)
print ('Good links: ', good)
print ('Bad links: ', bad)
print ("Unknown: ", unknown)
print ('See detailed report in ', common_report_file)
# Print elapsed time needed to create report
elapsed_time = time.time() - start_time
print ('Elapsed Time: ', round(elapsed_time), ' seconds')
if __name__ == '__main__':
main()
``` |
{
"source": "jkielbaey/aipnd-project",
"score": 2
} |
#### File: jkielbaey/aipnd-project/model.py
```python
import time
from collections import OrderedDict
import torch
from torch import nn, optim
from torchvision import models
class FlowerRecognizor():
def __init__(self, base_model='densenet121', hidden_units=512,
learning_rate=0.005, use_gpu=False):
self.base_model = base_model
self.hidden_units = hidden_units
self.use_gpu = use_gpu
if not use_gpu:
self.device = torch.device("cpu")
else:
self.device = torch.device("cuda")
self._create_model(base_model, hidden_units, learning_rate)
self.criterion = None
# print(self.model)
def _create_model(self, base_model, hidden_units, learning_rate=0.005):
supported_base_models = {
'vgg13': models.vgg13,
'vgg13_bn': models.vgg13_bn,
'vgg16': models.vgg16,
'vgg16_bn': models.vgg16_bn,
'vgg19': models.vgg19,
'vgg19_bn': models.vgg19_bn,
'densenet121': models.densenet121,
'densenet169': models.densenet169
}
input_features_dict = {
'vgg13': 25088,
'vgg13_bn': 25088,
'vgg16': 25088,
'vgg16_bn': 25088,
'vgg19': 25088,
'vgg19_bn': 25088,
'densenet121': 1024,
'densenet169': 1024
}
base_model_function = supported_base_models.get(base_model, None)
if not base_model_function:
print("Not a valid base_model. Try: {}".format(
','.join(supported_base_models.keys())))
self.model = base_model_function(pretrained=True)
input_features = input_features_dict[base_model]
# Freeze weights of feature extractor.
for param in self.model.parameters():
param.requires_grad = False
self.model.base_model = base_model
self.model.hidden_units = hidden_units
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_features, hidden_units)),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout(0.05)),
('fc3', nn.Linear(hidden_units, 102)),
('output', nn.LogSoftmax(dim=1))
]))
self.model.classifier = classifier
self.optimizer = optim.Adam(
self.model.classifier.parameters(), lr=learning_rate)
def _load_checkpoint(self, model_state_dict, optim_state_dict, class_to_idx):
self.model.load_state_dict(model_state_dict)
self.model.class_to_idx = class_to_idx
self.optimizer.load_state_dict(optim_state_dict)
@staticmethod
def load_checkpoint(checkpoint_file, use_gpu=False):
"""
Creates a model from an existing checkpoint files.
Input:
- checkpoint_file: filepath to .pth file
Output:
- object of FlowerRecognizor with model loaded from checkpoint
"""
checkpoint = torch.load(checkpoint_file, map_location='cpu')
base_model = checkpoint.get("base_model", "densenet121")
hidden_units = int(checkpoint.get("hidden_units", 512))
fr = FlowerRecognizor(base_model, hidden_units, use_gpu)
fr._load_checkpoint(checkpoint['model_state_dict'],
checkpoint['optim_state_dict'],
checkpoint['class_to_idx'])
return fr
def predict(self, image_obj, topk):
tensor_image = torch.from_numpy(image_obj).type(torch.FloatTensor)
tensor_image = tensor_image.unsqueeze_(0)
tensor_image.to(self.device)
self.model.to(self.device)
self.model.eval()
with torch.no_grad():
outputs = self.model(tensor_image)
probs = torch.exp(outputs)
top_p, top_class = probs.topk(topk, dim=1)
top_p = top_p.numpy()[0]
top_class = top_class.numpy()[0]
idx_to_class = {val: key for key, val in
self.model.class_to_idx.items()}
top_class = [idx_to_class[i] for i in top_class]
return top_p, top_class
def _save_model(self, filepath, epochs):
print(f"Saving model..")
model_checkpoint = {
'model_state_dict': self.model.state_dict(),
'base_model': self.model.base_model,
'class_to_idx': self.model.class_to_idx,
'optim_state_dict': self.optimizer.state_dict(),
'nr_epochs': epochs,
'hidden_units': self.model.hidden_units
}
torch.save(model_checkpoint, filepath)
def _validate(self, valid_loader):
valid_loss = 0
valid_accuracy = 0
for images, labels in valid_loader:
images, labels = images.to(self.device), labels.to(self.device)
logps = self.model(images)
loss = self.criterion(logps, labels)
valid_loss += loss.item()
ps = torch.exp(logps)
_, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
valid_accuracy += equals.type(torch.FloatTensor).mean()
return valid_loss/len(valid_loader), valid_accuracy/len(valid_loader)
def test(self, test_loader):
with torch.no_grad():
test_loss, test_accuracy = self._validate(test_loader)
print(f"Test loss: {test_loss:.3f}.. "
f"Test accuracy: {100 * test_accuracy:.2f}%..")
def train(self, save_dir, train_loader, valid_loader, class_to_idx, epochs):
self.model.to(self.device)
self.criterion = nn.NLLLoss()
train_losses, valid_losses = [], []
model_save_path = save_dir + "/checkpoint.pth"
self.model.class_to_idx = class_to_idx
previous_valid_loss = None
for epoch in range(epochs):
epoch_start = time.time()
epoch_train_running_loss = 0
epoch_batches = 0
print(f"Epoch {epoch+1}/{epochs}..")
for images, labels in train_loader:
epoch_batches += 1
images, labels = images.to(self.device), labels.to(self.device)
self.optimizer.zero_grad()
logps = self.model(images)
loss = self.criterion(logps, labels)
loss.backward()
self.optimizer.step()
epoch_train_running_loss += loss.item()
if epoch_batches % 10 == 0:
print(f" Batch {epoch+1}.{epoch_batches}/{epochs}.. done")
else:
with torch.no_grad():
self.model.eval()
valid_loss, valid_accuracy = self._validate(valid_loader)
valid_losses.append(valid_loss)
self.model.train()
# Save model if it was better.
if not previous_valid_loss or valid_loss < previous_valid_loss:
self._save_model(model_save_path, epoch)
previous_valid_loss = valid_loss
train_losses.append(epoch_train_running_loss/epoch_batches)
print(f"Epoch {epoch+1}/{epochs}.. "
f"Duration {time.time() - epoch_start:.1f}s.. "
f"Train loss: {epoch_train_running_loss/epoch_batches:.3f}.."
f"Validation loss: {valid_loss:.3f}.. "
f"Validation accuracy: {valid_accuracy:.3f}..")
``` |
{
"source": "jkielbaey/coursera-cloud-computing-capstone",
"score": 2
} |
#### File: part2_streaming/ingestion/load_data.py
```python
import boto3
import csv
import json
import re
import os
import logging
from multiprocessing import Pool
import sys
sys.path.insert(0, './lib')
from kafka import KafkaProducer
lambda_client = boto3.client('lambda')
bucket_name = None
kafka_topic = None
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
logger = logging.getLogger()
if 'DEBUG' in os.environ and os.environ['DEBUG'] == 'true':
logger.setLevel(logging.DEBUG)
logger.debug('debug mode enabled.')
else:
logger.setLevel(logging.INFO)
def handler_file(event, context):
key_name = event['key_name']
bucket_name = event['bucket_name']
kafka_topics = event['kafka_topic'].split(",")
for t in kafka_topics:
logging.info("Sending data to topic \"%s\"." % t)
kafka_hosts = os.environ['KAFKA_HOSTS'].split(",")
logging.info("Started handling %s." % key_name)
s3 = boto3.resource('s3')
obj = s3.Object(bucket_name, key_name)
csvlines = obj.get()['Body'].read().decode('utf-8').splitlines()
csvreader = csv.DictReader(csvlines)
nr_lines = 0
producer = KafkaProducer(bootstrap_servers=kafka_hosts)
nr_topics = len(kafka_topics)
topic_id = 0
logging.info("Producer created for %s." % key_name)
for l in csvreader:
producer.send(kafka_topics[topic_id], json.dumps(l))
topic_id += 1
nr_lines += 1
if topic_id == nr_topics:
topic_id = 0
producer.flush()
logging.info("Messages produced. Nr of messages: %d." % nr_lines)
return nr_lines
def handler_load(event, context):
bucket_name = event['bucket_name']
key_prefix = event['key_prefix']
kafka_topic = event['kafka_topic']
nr_failed = 0
nr_success = 0
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
for obj in bucket.objects.filter(Prefix=key_prefix):
if re.search('\.csv$', obj.key):
logging.info("File added %s" % obj.key)
args = {
'bucket_name': bucket_name,
'key_name': obj.key,
'kafka_topic': kafka_topic
}
logger.info('Starting async processing of %s...' % obj.key)
results = lambda_client.invoke_async(
FunctionName='capstone-kafka-ingest-dev-send_file',
InvokeArgs=json.dumps(args)
)
logger.info("Async processing of %s started." % obj.key)
if results['Status'] == 202:
logger.info('Lambda invoked successfully.')
nr_success += 1
else:
logger.error('Failed to start lambda for %s.' % obj.key)
nr_failed += 1
logger.info('%d lambda started successfully' % nr_success)
logger.info('%d lambda failed to start.' % nr_failed)
def worker_lambda(key):
logger.info("Start processing of %s..." % key)
args = {
'bucket_name': bucket_name,
'key_name': key,
'kafka_topic': kafka_topic
}
results = lambda_client.invoke(
FunctionName='capstone-kafka-ingest-dev-send_file',
InvocationType='RequestResponse',
Payload=json.dumps(args))
logging.info(str(results))
if results['StatusCode'] == 200:
logger.info('Lambda completed successfully.')
return (key, True)
else:
logger.error('Failed to start lambda for %s.' % key)
return (key, False)
if __name__ == '__main__':
bucket_name, key_prefix, kafka_topic = sys.argv[1:]
s3 = boto3.resource('s3')
bucket = s3.Bucket(bucket_name)
files_to_process = []
for obj in bucket.objects.filter(Prefix=key_prefix):
if re.search('\.csv$', obj.key):
logger.info("File added %s" % obj.key)
files_to_process.append(obj.key)
pool = Pool(100)
results = pool.map(worker_lambda, files_to_process)
success = []
failed = []
for result in results:
if result[1]:
success.append(result[0])
else:
failed.append(result[0])
if len(failed) != 0:
print "Not all files were processed successfully :("
print(str(failed))
print "%d files completed successfully" % len(success)
``` |
{
"source": "jkiesele/HGCalML-1",
"score": 2
} |
#### File: HGCalML-1/modules/accknn_op.py
```python
import tensorflow as tf
from tensorflow.python.framework import ops
import globals as gl
from oc_helper_ops import SelectWithDefault
'''
Indices MUST be unique in each row.
Only exception are multiple self-references, that can be used as sort of padding.
Alternatively, the index -1 is skipped (non TF conpatible padding)
'''
_accknn_op = tf.load_op_library('accumulate_knn.so')
_accknn_grad_op = tf.load_op_library('accumulate_knn_grad.so')
def AccumulateLinKnn(weights, features, indices,
mean_and_max=True):
'''
Accumulates neighbour features with linear weights (not exp(-w) as AccumulateKnn)
'''
if not gl.acc_ops_use_tf_gradients:
return _accknn_op.AccumulateKnn(distances=weights, features=features, indices=indices,
n_moments=0, mean_and_max=mean_and_max)
weights = tf.expand_dims(weights,axis=2) #V x K x 1
nfeat = SelectWithDefault(indices, features, 0.) # V x K x F
wfeat = weights*nfeat
fmean = tf.reduce_mean(wfeat,axis=1)# V x F
fmax = tf.reduce_max(wfeat,axis=1)
fout = fmean
if mean_and_max:
fout = tf.concat([fmean,fmax],axis=1)
return fout,None
def AccumulateKnn(distances, features, indices,
mean_and_max=True):
'''
.Output("out_features: float32")
.Output("out_max_idxs: int32");
Assumes that neighbour indices can be padded with -1, but not mixed, e.g. [1,4,-1,2] needs to be [1,4,2,-1]
Other than the padding, the indices must be unique
'''
#compatibility
distances = tf.exp(-distances)
if not gl.acc_ops_use_tf_gradients:
return _accknn_op.AccumulateKnn(distances=distances, features=features, indices=indices,
n_moments=0, mean_and_max=mean_and_max)
distances = tf.expand_dims(distances,axis=2) #V x K x 1
nfeat = SelectWithDefault(indices, features, 0.) # V x K x F
wfeat = distances*nfeat
fmean = tf.reduce_mean(wfeat,axis=1)# V x F
fmax = tf.reduce_max(wfeat,axis=1)
fout = fmean
if mean_and_max:
fout = tf.concat([fmean,fmax],axis=1)
return fout,None
#this refers to the OP called AccumulateKnn, not the function below
@ops.RegisterGradient("AccumulateKnn")
def _AccumulateKnnGrad(op, grad, gradmaxidxs):
"""
"""
distances = op.inputs[0]
features = op.inputs[1]
max_feat_indices = op.outputs[1]
neigh_indices = op.inputs[2]
dist_grad , feat_grad = _accknn_grad_op.AccumulateKnnGrad(grad_from_out_features=grad,
distances=distances,
features=features,
neigh_indices=neigh_indices,
max_feat_indices=max_feat_indices)
return [dist_grad , feat_grad, None] #no gradient for indices
```
#### File: HGCalML-1/modules/assign_condensate_op.py
```python
import tensorflow as tf
from tensorflow.python.framework import ops
_bc_op = tf.load_op_library('assign_to_condensates.so')
<EMAIL>
def AssignToCondensates(ccoords,
c_point_idx,
row_splits,
radius=0.8,
dist=None):
'''
REGISTER_OP("AssignToCondensates")
.Attr("radius: float")
.Input("ccoords: float32")
.Input("dist: float32")
.Input("c_point_idx: int32")
.Input("row_splits: int32")
.Output("asso_idx: int32");
'''
if dist is None:
dist = tf.ones_like(ccoords[:,0:1])
else:
tf.assert_equal(tf.shape(ccoords[:,0:1]),tf.shape(dist))
return _bc_op.AssignToCondensates(ccoords=ccoords,
dist=dist,
c_point_idx=c_point_idx,
row_splits=row_splits,
radius=radius)
@ops.RegisterGradient("AssignToCondensates")
def _AssignToCondensatesGrad(op, asso_grad):
return [None, None, None, None]
#### convenient helpers, not the OP itself
from condensate_op import BuildCondensates
def BuildAndAssignCondensates(ccoords, betas, row_splits,
radius=0.8, min_beta=0.1,
dist=None,
soft=False,
assign_radius=None):
if assign_radius is None:
assign_radius = radius
asso, iscond, ncond = BuildCondensates(ccoords, betas, row_splits,
radius=radius, min_beta=min_beta,
dist=dist,
soft=soft)
c_point_idx,_ = tf.unique(asso)
asso_idx = AssignToCondensates(ccoords,
c_point_idx,
row_splits,
radius=assign_radius,
dist=dist)
return asso_idx, iscond, ncond
```
#### File: HGCalML-1/modules/bin_by_coordinates_op.py
```python
import tensorflow as tf
from tensorflow.python.framework import ops
_bin_by_coordinates = tf.load_op_library('bin_by_coordinates.so')
'''
.Input("coordinates: float")
.Input("row_splits: int32")
.Input("bin_width: float")
.Input("nbins: int32")//same in all dimensions
.Output("output: int32");
'''
def BinByCoordinates(coordinates, row_splits, bin_width=None, n_bins=None, calc_n_per_bin=True):
'''
Assign bins to coordinates
@type coordinates: tf.Tensor(float32)
@param coordinates: coordinates per input point
@type row_splits: tf.Tensor(int)
@param row_splits: row splits following tf.ragged convention
@type bin_width: tf.Tensor(float32) / None
@param bin_width: will be the same for all dimensions (either bin_width or n_bins must be specified)
@type n_bins: tf.Tensor(int) / None
@param n_bins: this is the maximum number of bins in any dimension (either bin_width or n_bins must be specified)
@type calc_n_per_bin: bool
@param calc_n_per_bin: calculates the number of points per bin and returns it
output:
- bin indices (dim = [rs] + dim(coordinates)). The first index constitues the row split index
- bin indices (the above) flattened
- number of bins used per dimension (dim = dim(coordinates))
- bin width used (dim = 1)
- (opt) number of points per bin (dim = 1)
'''
#calculate
min_coords = tf.reduce_min(coordinates,axis=0,keepdims=True)
coordinates -= min_coords
dmax_coords = tf.reduce_max(coordinates,axis=0)
if bin_width is None:
assert n_bins is not None
bin_width = (dmax_coords) / tf.cast(n_bins, dtype='float32')
n_bins = None #re-calc in dimensions
bin_width = tf.reduce_max(bin_width)[...,tf.newaxis]#just add a '1' dimension
if n_bins is None:
assert bin_width is not None
n_bins = (dmax_coords) / bin_width
n_bins += 1.
n_bins = tf.cast(n_bins, dtype='int32')
binass,flatbinass,nperbin = _bin_by_coordinates.BinByCoordinates(coordinates=coordinates,
row_splits=row_splits,
bin_width=bin_width, nbins=n_bins,
calc_n_per_bin=calc_n_per_bin)
#sanity checks
#with tf.control_dependencies([tf.assert_less(binass,
# tf.expand_dims(
# tf.concat([tf.constant([row_splits.shape[0]-1]) ,n_bins],axis=0),
# axis=0))]):
if calc_n_per_bin:
return binass,flatbinass,n_bins,bin_width,nperbin
else:
return binass,flatbinass,n_bins,bin_width
@ops.RegisterGradient("BinByCoordinates")
def _BinByCoordinatesGrad(op, idxout_grad, flatidxgrad,npbingrad):
return None, None, None, None
```
#### File: HGCalML-1/modules/binned_select_knn_op.py
```python
import tensorflow as tf
from tensorflow.python.framework import ops
_binned_select_knn = tf.load_op_library('binned_select_knn.so')
def _BinnedSelectKnn(K : int, coords, bin_idx, dim_bin_idx, bin_boundaries, n_bins, bin_width , tf_compatible=False):
'''
the op wrapper only
'''
return _binned_select_knn.BinnedSelectKnn(n_neighbours=K,
coords=coords,
bin_idx=bin_idx,
dim_bin_idx=dim_bin_idx,
bin_boundaries=bin_boundaries,
n_bins=n_bins,
bin_width=bin_width,
tf_compatible=tf_compatible
)
def BinnedSelectKnn(K : int, coords, row_splits, n_bins=None, max_bin_dims=3, tf_compatible=False, max_radius=None):
'''
max_radius is a dummy for now to make it a drop-in replacement
'''
from bin_by_coordinates_op import BinByCoordinates
from index_replacer_op import IndexReplacer
# the following number of bins seems a good~ish estimate for good performance
# for homogenous point distributions but should be subject to more tests
elems_per_rs = 1
if row_splits.shape[0] is not None:
elems_per_rs = row_splits[1]
if n_bins is None:
n_bins = tf.math.pow(tf.cast(elems_per_rs,dtype='float32')/(K/32),1/max_bin_dims)
n_bins = tf.cast(n_bins,dtype='int32')
n_bins = tf.where(n_bins<5,5,n_bins)
n_bins = tf.where(n_bins>20,20,n_bins)#just a guess
bin_coords = coords
if bin_coords.shape[-1]>max_bin_dims:
bin_coords = bin_coords[:,:max_bin_dims]
dbinning,binning, nb, bin_width, nper = BinByCoordinates(bin_coords, row_splits, n_bins=n_bins)
#if this becomes a bottleneck one could play tricks since nper and bin numbers are predefined
sorting = tf.argsort(binning)
scoords = tf.gather_nd( coords, sorting[...,tf.newaxis])
sbinning = tf.gather_nd( binning, sorting[...,tf.newaxis])
sdbinning = tf.gather_nd( dbinning, sorting[...,tf.newaxis])
#add a leading 0
bin_boundaries = tf.concat([tf.zeros([1],dtype='int32'), nper],axis=0) #row_splits[0:1]
# make it row split like
bin_boundaries = tf.cumsum(bin_boundaries)
idx,dist = _BinnedSelectKnn(K, scoords, sbinning, sdbinning, bin_boundaries=bin_boundaries,
n_bins=nb, bin_width=bin_width, tf_compatible=tf_compatible )
if row_splits.shape[0] is None:
return idx, dist
#sort back
idx = IndexReplacer(idx,sorting)
dist = tf.scatter_nd(sorting[...,tf.newaxis], dist, dist.shape)
idx = tf.scatter_nd(sorting[...,tf.newaxis], idx, idx.shape)
return idx, dist
_sknn_grad_op = tf.load_op_library('select_knn_grad.so')
@ops.RegisterGradient("BinnedSelectKnn")
def _BinnedSelectKnnGrad(op, idxgrad, dstgrad):
coords = op.inputs[0]
indices = op.outputs[0]
distances = op.outputs[1]
coord_grad = _sknn_grad_op.SelectKnnGrad(grad_distances=dstgrad, indices=indices, distances=distances, coordinates=coords)
return coord_grad,None,None,None,None,None
```
#### File: compiled/tests/testing_tools.py
```python
import tensorflow as tf
import time
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from select_knn_op import SelectKnn
import os
def makeIndices(nvert,nneigh):
all = []
for i in range(nvert):
a = np.array([],dtype='int32')
while len(a) < nneigh-1:
a = np.random.choice(nvert, nneigh-1, replace=False)
a = a[a != i]
a = np.concatenate([np.array([i],dtype='int32'),a],axis=-1)
a = np.expand_dims(a, axis=0)
all.append(a)
return np.concatenate(all,axis=0)
class Benchmarker(object):
def __init__(self, tf_implementation, custom_implementation, name, use_distances_direct,tfoncpu,customoncpu,mean_and_max):
self.tfimp=tf_implementation
self.customimpl=custom_implementation
self.name = name
self.debugout=False
self.use_distances_direct=use_distances_direct
self.tfoncpu=tfoncpu
self.customoncpu=customoncpu
self.mean_and_max=mean_and_max
def benchmark(self, nvert = 30000, nfeat = 64, nneigh = 128, ncoords = 4, dogradient=False,do_tf=True):
coords = tf.constant( np.random.rand(nvert,ncoords) ,dtype='float32')
feats = tf.constant( np.random.rand(nvert,nfeat) ,dtype='float32')
row_splits = tf.constant( [0, nvert] ,dtype='int32')
indices, distances = SelectKnn(K=nneigh, coords=coords, row_splits=row_splits)
if self.use_distances_direct:
coords = distances
tf_failed = False
if not dogradient:
#each gets one dry run to compile
meanmax = self.customimpl(coords, features=feats, indices=indices, mean_and_max=self.mean_and_max)
t0 = time.time()
for i in range(0,50):
meanmax = self.customimpl(coords, features=feats, indices=indices, mean_and_max=self.mean_and_max)
op_time= (time.time() - t0)/50.
print('op_time',op_time)
tf_time=0
if do_tf:
try:
meanmax = self.tfimp(coords, features=feats, indices=indices, mean_and_max=self.mean_and_max)
t0 = time.time()
for i in range(0,50):
meanmax = self.tfimp(coords, features=feats, indices=indices, mean_and_max=self.mean_and_max)
tf_time= (time.time() - t0)/50.
except:
tf_failed=True
print('tf_time',tf_time)
return op_time, tf_time
else:
with tf.GradientTape(persistent=True,watch_accessed_variables=True) as t_newop:
t_newop.watch(coords)
t_newop.watch(feats)
meanmax = self.customimpl(coords, features=feats, indices=indices, mean_and_max=self.mean_and_max)
#once to get it compiled in case needed
feat_grad = t_newop.gradient(meanmax, feats)
coord_grad = t_newop.gradient(meanmax, coords)
t0 = time.time()
for i in range(5) :
feat_grad = t_newop.gradient(meanmax, feats)
coord_grad = t_newop.gradient(meanmax, coords)
op_time= (time.time() - t0)/5.
tf_time=0
if do_tf:
try:
with tf.GradientTape(persistent=True) as t_tfop:
t_tfop.watch(coords)
t_tfop.watch(feats)
meanmax = self.tfimp(coords, features=feats, indices=indices, mean_and_max=self.mean_and_max)
feat_grad = t_tfop.gradient(meanmax, feats)
coord_grad = t_tfop.gradient(meanmax, coords)
t0 = time.time()
for i in range(5) :
feat_grad = t_tfop.gradient(meanmax, feats)
coord_grad = t_tfop.gradient(meanmax, coords)
tf_time= (time.time() - t0)/5.
except:
tf_failed=True
return op_time, tf_time
def difference(self, nvert = 300, nfeat = 64, nneigh = 32, ncoords = 4, onlyForward=False, assert_error=True):
coords = tf.constant( np.random.rand(nvert,ncoords) ,dtype='float32')
feats = np.random.rand(nvert,nfeat)
#to make the max unambiguous
frange = np.arange(nvert)
np.random.shuffle(frange)
toadd = np.expand_dims(frange, axis=1)
feats = tf.constant(feats+toadd, dtype='float32')
row_splits = tf.constant( [0, nvert] ,dtype='int32')
#print('building indices')
with tf.device("/cpu:0"):
indices, distances = SelectKnn(K=nneigh, coords=coords, row_splits=row_splits)
#indices = indices[:,1:]
#distances = distances[:,1:]
#print('process custom op')
if self.use_distances_direct:
coords = distances
op_time = 0
tfdevstring = "/gpu:0"
if self.customoncpu:
tfdevstring = "/cpu:0"
tfdev = tf.device(tfdevstring)
t0 = time.time()
with tfdev:
t0 = time.time()
with tf.GradientTape(persistent=True,watch_accessed_variables=True) as t_newop:
t_newop.watch(coords)
t_newop.watch(feats)
meanmax = self.customimpl( coords, features=feats, indices=indices, mean_and_max=self.mean_and_max)
t1 = time.time()
op_time= t1 - t0
#print('op time',op_time)
with tfdev:
coord_grad = t_newop.gradient(meanmax, coords)
feat_grad = t_newop.gradient(meanmax, feats)
if self.debugout:
print('coords',coords,'\n')
print('feats',feats,'\n')
print('custom output',meanmax,'\n')
print('indices',indices)
### tf op implementation
print('TFTFTF')
tf_feat_grad = None
tf_coord_grad = None
#print('process TF op')
tfdevstring = "/gpu:0"
if self.tfoncpu:
tfdevstring = "/cpu:0"
tfdev = tf.device(tfdevstring)
t0 = time.time()
with tfdev:
with tf.GradientTape(persistent=True) as t_tfop:
t_tfop.watch(coords)
t_tfop.watch(feats)
tf_meanmax = self.tfimp(coords, features=feats, indices=indices, mean_and_max=self.mean_and_max)
tf_time= time.time() - t0
if self.debugout:
print('TF output',tf_meanmax,'\n')
with tfdev:
tf_feat_grad = t_tfop.gradient(tf_meanmax, feats)
tf_coord_grad = t_tfop.gradient(tf_meanmax, coords)
with tf.device("/cpu:0"):
difference = meanmax - tf_meanmax
max_rel_difference = tf.reduce_max(tf.abs(difference/(tf.abs(tf_meanmax)+1e-3))).numpy()
max_difference = tf.reduce_max(tf.abs(difference)).numpy()
#print('max rel difference',max_rel_difference)
#print('max difference',max_difference)
#print('op time',op_time)
#print('tf time',tf_time)
if assert_error:
assert max_difference < 1e-2
if onlyForward:
return
## gradients
#print('tf_feat_grad',tf_feat_grad)
#print('tf_coord_grad',tf_coord_grad)
#print('feat_grad',feat_grad)
#print('coord_grad',coord_grad)
feat_grad_diff = feat_grad - tf_feat_grad
coord_grad_diff = coord_grad - tf_coord_grad
#print('feat_grad_diff',feat_grad_diff)
#print('coord_grad_diff',coord_grad_diff)
#print('relative feat_grad_diff',feat_grad_diff/tf_feat_grad)
#print('relative coord_grad_diff',coord_grad_diff/tf_coord_grad)
maxfeatgraddiff = tf.reduce_max(tf.abs(feat_grad_diff))
maxcoordgraddiff = tf.reduce_max(tf.abs(coord_grad_diff))
rel_feat_grad_diff = (feat_grad_diff)/(tf.abs(tf_feat_grad)+1e-2)
rel_coord_grad_diff = coord_grad_diff/(tf.abs(tf_coord_grad)+1e-2)
maxrelfeatgraddiff = tf.reduce_max(tf.abs(rel_feat_grad_diff))
maxrelcoordgraddiff = tf.reduce_max(tf.abs(rel_coord_grad_diff))
#print('\nmax relative feature grad diff', maxrelfeatgraddiff)
#print('max relative coordinate grad diff', maxrelcoordgraddiff)
def check_indices():
idx_ok=True
for i in tf.range(indices.shape[0]):
y,idx,c = tf.unique_with_counts(indices[i])
if (c.numpy() > 1).any() or (indices[i].numpy() >= indices.shape[0]).any() :
idx_ok=False
print("indices not unique", indices[i])
if idx_ok:
print('indices ok')
if self.debugout:
print('custom feature grad ',feat_grad)
print('TF feature grad',tf_feat_grad)
print('difference',feat_grad_diff)
print('custom coord grad',coord_grad)
print('TF coord grad',tf_coord_grad)
print('Difference',coord_grad_diff)
if maxrelfeatgraddiff > 1e-2:
print('Feature gradient off:')
print('max rel diff',maxrelfeatgraddiff)
print('max diff',maxfeatgraddiff)
print('min,max feat', tf.reduce_min(feats), tf.reduce_max(feats))
print('min,max coords', tf.reduce_min(coords), tf.reduce_max(coords))
check_indices()
if maxrelcoordgraddiff > 1e-2:
print('Coordinate gradient off:')
print('max rel diff',maxrelcoordgraddiff)
print('max diff',maxcoordgraddiff)
print('min,max feat', tf.reduce_min(feats), tf.reduce_max(feats))
print('min,max coords', tf.reduce_min(coords), tf.reduce_max(coords))
check_indices()
if maxfeatgraddiff > 1e-2:
print('Feature gradient off:')
print('max rel diff',maxrelfeatgraddiff)
print('max diff',maxfeatgraddiff)
print('min,max feat', tf.reduce_min(feats), tf.reduce_max(feats))
print('min,max coords', tf.reduce_min(coords), tf.reduce_max(coords))
check_indices()
if maxcoordgraddiff > 1e-2:
print('Coordinate gradient off:')
print('max rel diff',maxrelcoordgraddiff)
print('max diff',maxcoordgraddiff)
print('min,max feat', tf.reduce_min(feats), tf.reduce_max(feats))
print('min,max coords', tf.reduce_min(coords), tf.reduce_max(coords))
check_indices()
if assert_error:
assert maxrelfeatgraddiff < 5e-2
assert maxrelcoordgraddiff < 5e-2
reldifference = tf.reshape(difference/(tf.abs(tf_meanmax)+1e-4),[-1])
difference = tf.reshape(difference,[-1])
rel_feat_grad_diff = tf.reshape(rel_feat_grad_diff,[-1])
rel_coord_grad_diff = tf.reshape(rel_coord_grad_diff,[-1])
feat_grad_diff = tf.reshape(feat_grad_diff,[-1])
coord_grad_diff = tf.reshape(coord_grad_diff,[-1])
return difference,reldifference,rel_feat_grad_diff,rel_coord_grad_diff,feat_grad_diff,coord_grad_diff
def run_extended_difference(self,
nvert,
nneigh,
nfeat,
addstring=""):
diff = []
reldiff = []
relcoordgraddiff = []
relfeatgraddiff = []
coordgraddiff = []
featgraddiff = []
for nv in nvert:
for nn in nneigh:
for nf in nfeat:
print('nv:',nv, 'nf:',nf, 'nn:' ,nn)
for blub in range(5): #run a few times
d,dr,fr,cr,f,c = self.difference(nv,nf,nn, ncoords = 4, onlyForward=False, assert_error=False)
#print('>>> max feat diff',tf.reduce_max(tf.abs(f)))
diff.append(d)
reldiff.append(dr)
coordgraddiff.append(c)
featgraddiff.append(f)
relcoordgraddiff.append(cr)
relfeatgraddiff.append(fr)
def conc_and_reshape(intensor):
x = tf.concat(intensor,axis=0)
x = tf.reshape(x, [-1])
return x.numpy()
diff = conc_and_reshape(diff)
reldiff = conc_and_reshape(reldiff)
coordgraddiff = conc_and_reshape(coordgraddiff)
featgraddiff = conc_and_reshape(featgraddiff)
#print('total >>> max feat diff',tf.reduce_max(tf.abs(featgraddiff)))
relcoordgraddiff = conc_and_reshape(relcoordgraddiff)
relfeatgraddiff = conc_and_reshape(relfeatgraddiff)
nbins=101
print('plotting...')
plt.close()
plt.hist(diff, bins=nbins)
plt.xlabel("Output Difference")
plt.yscale('log')
plt.savefig(self.name+addstring+"output_diff.pdf")
plt.close()
plt.hist(reldiff, bins=nbins)
plt.xlabel("Relative Output Difference")
plt.yscale('log')
plt.savefig(self.name+addstring+"rel_output_diff.pdf")
plt.close()
plt.hist(coordgraddiff, bins=nbins)
plt.xlabel("Coordinate Gradient Difference")
plt.yscale('log')
plt.savefig(self.name+addstring+"coord_grad_diff.pdf")
plt.close()
plt.hist(featgraddiff, bins=nbins)
plt.xlabel("Feature Gradient Difference")
plt.yscale('log')
plt.savefig(self.name+addstring+"feat_grad_diff.pdf")
plt.close()
plt.hist(relcoordgraddiff, bins=nbins)
plt.xlabel("Relative Coordinate Gradient Difference")
plt.yscale('log')
plt.savefig(self.name+addstring+"rel_coord_grad_diff.pdf")
plt.close()
plt.hist(relfeatgraddiff, bins=nbins)
plt.xlabel("Relative Feature Gradient Difference")
plt.yscale('log')
plt.savefig(self.name+addstring+"rel_feat_grad_diff.pdf")
plt.close()
def run_extended_benchmark(self,
nvert,
nneigh,
nfeat,
d_nvert = 10000,
d_nneigh = 100,
d_nfeat = 100,
gradient=False,
tf_thresholds = {'nvert': 55000,
'nneigh': 210,
'nfeat': 200}):
tf_times = []
op_times = []
tfx = []
for nv in nvert:
print('nvert self.benchmark, nvert:',nv, "do tf",tf_thresholds['nvert']>nv)
opt,tft = self.benchmark(nv,d_nfeat,d_nneigh,4, dogradient=gradient,do_tf=tf_thresholds['nvert']>nv)
if tft:
tf_times.append(tft)
tfx.append(nv)
op_times.append(opt)
plt.plot(nvert,op_times,color='green',label="custom",marker='o')
plt.plot(tfx,tf_times,color='orange',label="TF",marker='o')
plt.xlabel("# vertices")
plt.ylabel("time")
#plt.yscale('log')
plt.legend()
if gradient:
plt.savefig(self.name+"benchmark_grad_nvert.pdf")
else:
plt.savefig(self.name+"benchmark_nvert.pdf")
plt.close()
tf_times=[]
op_times=[]
tfx=[]
for nn in nneigh:
print('nneigh self.benchmark, nn:',nn)
opt,tft = self.benchmark(d_nvert,d_nfeat,nn,4,
dogradient=gradient,do_tf=tf_thresholds['nneigh']>nn)
if tft:
tf_times.append(tft)
tfx.append(nn)
op_times.append(opt)
plt.plot(nneigh,op_times,color='green',label="custom",marker='o')
plt.plot(tfx,tf_times,color='orange',label="TF",marker='o')
plt.xlabel("# neighbours")
plt.ylabel("time")
plt.legend()
if gradient:
plt.savefig(self.name+"benchmark_grad_nneigh.pdf")
else:
plt.savefig(self.name+"benchmark_nneigh.pdf")
plt.close()
tf_times=[]
op_times=[]
tfx=[]
for nf in nfeat:
print('nfeat self.benchmark, nfeat:',nf)
opt,tft = self.benchmark(d_nvert,nf,d_nneigh,4,
dogradient=gradient,do_tf=tf_thresholds['nfeat']>nf)
if tft:
tf_times.append(tft)
tfx.append(nf)
op_times.append(opt)
plt.plot(nfeat,op_times,color='green',label="custom",marker='o')
plt.plot(tfx,tf_times,color='orange',label="TF",marker='o')
plt.xlabel("# features")
plt.ylabel("time")
plt.legend()
if gradient:
plt.savefig(self.name+"benchmark_grad_nfeat.pdf")
else:
plt.savefig(self.name+"benchmark_nfeat.pdf")
plt.close()
```
#### File: HGCalML-1/modules/globals.py
```python
class _metaconst(type):
def __getattr__(cls, key):
return cls[key]
def __setattr__(cls, key, value):
raise TypeError(key+' is constant.')
class _const(object, metaclass=_metaconst):
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
raise TypeError(name+' is constant.')
####### actual constants here, need to inherit from _const
class cluster_space(_const):
'''
coordinates used for noise that was removed when it is
assigned a place in clustering space when the hits are
scattered back.
'''
noise_coord = 100.
class hit_keys(_const):
'''
keys for features/pred/truth per hit that cannot be passed transparently
as they need to be used in functions.
These should be used at any occurence to avoid issues when they're changed.
Another good (maybe better) place for them would be TrainData_NanoML.
Also, there use the inheritance from _const.
So far, this is not used (yet)
'''
rec_energy = 'recHitEnergy'
# ...
class pu(_const):
'''
special constants associated to pile up to make
'standard-style' particle-in-pu plots. Don't overuse
these, we don't really want to really distinguish between
PU and 'main' event at this stage of reconstruction.
'''
'''
The hgcal has 2x3M sensors, so there will be max 6M truth showers
in one event, so 10M is a good offset, and well within int32 range.
Still, if used, please always add a safety check before such as e.g.:
if np.max(t_idx) >= pu.t_idx_offset:
raise ValueError(...)
'''
t_idx_offset = 1e7
'''
The only non-const global.
In case TF gradients should be used instead of custom OP gradients.
This will increase resource usage, and is mostly a panic switch
to make sure weird behaviour is not caused by the (well tested) custom gradients
This needs to be imported and changed before any other import of ops
'''
knn_ops_use_tf_gradients=False
acc_ops_use_tf_gradients=False
```
#### File: HGCalML-1/modules/hgcal_predictor.py
```python
from DeepJetCore.DataCollection import DataCollection
from DeepJetCore.dataPipeline import TrainDataGenerator
from datastructures.TrainData_NanoML import TrainData_NanoML
import os
from DeepJetCore.modeltools import load_model
from datastructures import TrainData_TrackML
import time
class HGCalPredictor():
def __init__(self, input_source_files_list, training_data_collection, predict_dir, unbuffered=False, model_path=None, max_files=4, inputdir=None):
self.input_data_files = []
self.inputdir = None
self.predict_dir = predict_dir
self.unbuffered=unbuffered
self.max_files = max_files
print("Using HGCal predictor class")
## prepare input lists for different file formats
if input_source_files_list[-6:] == ".djcdc":
print('reading from data collection', input_source_files_list)
predsamples = DataCollection(input_source_files_list)
self.inputdir = predsamples.dataDir
for s in predsamples.samples:
self.input_data_files.append(s)
elif input_source_files_list[-6:] == ".djctd":
self.inputdir = os.path.abspath(os.path.dirname(input_source_files_list))
infile = os.path.basename(input_source_files_list)
self.input_data_files.append(infile)
else:
print('reading from text file', input_source_files_list)
self.inputdir = os.path.abspath(os.path.dirname(input_source_files_list))
with open(input_source_files_list, "r") as f:
for s in f:
self.input_data_files.append(s.replace('\n', '').replace(" ", ""))
self.dc = None
if input_source_files_list[-6:] == ".djcdc" and not training_data_collection[-6:] == ".djcdc":
self.dc = DataCollection(input_source_files_list)
else:
self.dc = DataCollection(training_data_collection)
if inputdir is not None:
self.inputdir = inputdir
self.model_path = model_path
if max_files > 0:
self.input_data_files = self.input_data_files[0:min(max_files, len(self.input_data_files))]
def predict(self, model=None, model_path=None, output_to_file=True):
if model_path==None:
model_path = self.model_path
if model is None:
if not os.path.exists(model_path):
raise FileNotFoundError('Model file not found')
assert model_path is not None or model is not None
outputs = []
if output_to_file:
os.system('mkdir -p ' + self.predict_dir)
if model is None:
model = load_model(model_path)
all_data = []
for inputfile in self.input_data_files:
use_inputdir = self.inputdir
if inputfile[0] == "/":
use_inputdir = ""
outfilename = "pred_" + os.path.basename(inputfile)
print('predicting ', use_inputdir +'/' + inputfile)
td = self.dc.dataclass()
#also allows for inheriting classes now, like with tracks or special PU
if not isinstance(td, TrainData_NanoML) and type(td) is not TrainData_TrackML:
raise RuntimeError("TODO: make sure this works for other traindata formats")
if inputfile[-5:] == 'djctd':
if self.unbuffered:
td.readFromFile(use_inputdir + "/" + inputfile)
else:
td.readFromFileBuffered(use_inputdir + "/" + inputfile)
else:
print('converting ' + inputfile)
td.readFromSourceFile(use_inputdir + "/" + inputfile, self.dc.weighterobjects, istraining=False)
gen = TrainDataGenerator()
# the batch size must be one otherwise we need to play tricks with the row splits later on
gen.setBatchSize(1)
gen.setSquaredElementsLimit(False)
gen.setSkipTooLargeBatches(False)
gen.setBuffer(td)
num_steps = gen.getNBatches()
generator = gen.feedNumpyData()
dumping_data = []
thistime = time.time()
for _ in range(num_steps):
data_in = next(generator)
predictions_dict = model(data_in[0])
for k in predictions_dict.keys():
predictions_dict[k] = predictions_dict[k].numpy()
features_dict = td.createFeatureDict(data_in[0])
truth_dict = td.createTruthDict(data_in[0])
dumping_data.append([features_dict, truth_dict, predictions_dict])
totaltime = time.time() - thistime
print('took approx',totaltime/num_steps,'s per endcap (also includes dict building)')
td.clear()
gen.clear()
outfilename = os.path.splitext(outfilename)[0] + '.bin.gz'
if output_to_file:
td.writeOutPredictionDict(dumping_data, self.predict_dir + "/" + outfilename)
outputs.append(outfilename)
if not output_to_file:
all_data.append(dumping_data)
if output_to_file:
with open(self.predict_dir + "/outfiles.txt", "w") as f:
for l in outputs:
f.write(l + '\n')
if not output_to_file:
return all_data
```
#### File: modules/hplots/general_graph_plot.py
```python
import numpy as np
import matplotlib.pyplot as plt
class GeneralGraphPlot():
def __init__(self, x_label='Values', y_label='Frequency', title='', histogram_log=False):
self.models_data = list()
self.x_label = x_label
self.y_label = y_label
self.title = title
self.histogram_log=histogram_log
def _compute(self, x_values,y_values):
processed_data = dict()
processed_data['x_values'] = x_values
processed_data['y_values'] = y_values
return processed_data
def add_raw_values(self, x_values,y_values, tags={}):
if type(x_values) is not np.ndarray or type(y_values) is not np.ndarray:
raise ValueError("x and y values has to be numpy array")
data = self._compute(x_values,y_values)
data['tags'] = tags
self.models_data.append(data)
def add_processed_data(self, processed_data):
self.models_data.append(processed_data)
def draw(self, name_tag_formatter=None):
"""
:param name_tag_formatter: a function to which tags dict is given and it returns the name
:return:
"""
fig, ax1 = plt.subplots(1, 1, figsize=(6, 6))
max_of_hist_values = 0
for model_data in self.models_data:
x_values = model_data['x_values']
y_values = model_data['y_values']
tags = model_data['tags']
if name_tag_formatter is None:
name_of_plot = ''
else:
name_of_plot = name_tag_formatter(tags)
print(name_of_plot)
ax1.plot(x_values, y_values, color='black',linestyle='None', alpha=1,marker='o')
if self.histogram_log:
ax1.set_yscale('log')
ax1.set_title(self.title, fontsize=14)
ax1.set_xlabel(self.x_label, fontsize=14)
ax1.set_ylabel(self.y_label, fontsize=14)
#ax1.legend(loc='center right')
plt.subplots_adjust(left=0.15)
# ax1.set_ylim(0, 1.04)
# ax2.set_ylim(0, max_of_hist_values * 1.3)
@classmethod
def draw_static(cls, x_values, y_values):
plotter = GeneralGraphPlot()
plotter.add_raw_values(x_values, y_values, tags=None)
plotter.draw()
def write_to_database(self, database_manager, table_name):
pass
def get_tags(self):
return [x['tags'] for x in self.models_data]
def read_from_database(self, database_reading_manager, table_name, experiment_name=None, condition=None):
pass
```
#### File: modules/hplots/response_scale.py
```python
import numpy as np
from matplotlib import scale as mscale
import matplotlib
from matplotlib.ticker import FixedLocator, FuncFormatter, Locator, AutoLocator
class ResponseLocator(Locator):
def __init__(self, locs, nbins=None,numticks=None):
self.locs = np.asarray(locs)
self.numticks=numticks
self.auto_locator = AutoLocator()
self.nbins=nbins
def set_params(self, nbins=None,numticks=None):
"""Set parameters within this locator."""
if nbins is not None:
self.nbins = nbins
if numticks is not None:
self.numticks=numticks
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
@property
def numticks(self):
# Old hard-coded default.
return self._numticks if self._numticks is not None else 11
@numticks.setter
def numticks(self, numticks):
self._numticks = numticks
def tick_values(self, vmin, vmax):
"""
Return the locations of the ticks.
.. note::
Because the values are fixed, vmin and vmax are not used in this
method.
"""
if vmax < 3:
return np.round(self.auto_locator.tick_values(vmin, vmax),4)
else:
if self.nbins is None:
return self.locs
step = max(int(np.ceil(len(self.locs) / self.nbins)), 1)
ticks = self.locs[::step]
for i in range(1, step):
ticks1 = self.locs[i::step]
if np.abs(ticks1).min() < np.abs(ticks).min():
ticks = ticks1
return self.raise_if_exceeds(ticks)
class ResponseScale(mscale.ScaleBase):
name='response_scale'
def __init__(self, axis):
super().__init__(axis)
def get_transform(self):
return self.ResponseTransform()
class ResponseTransform(matplotlib.scale.FuncTransform):
def __init__(self):
super().__init__(forward=self.forward, inverse=self.inverse)
def forward(self, array):
return np.where(np.less_equal(array,3), array, 3+np.log(array)-np.log(3))
def inverse(self, array):
return np.where(np.less_equal(array,3), array, np.exp(array+np.log(3) - 3))
def set_default_locators_and_formatters(self, axis):
fmt = FuncFormatter(
lambda x, pos=None: str(x))
locaters = [0,0.5,1,1.5,2,2.5,3,4,5,10,50,100,1000,10000,10000,100000,1000000,10000000,10000000,10000000]
axis.set(major_locator=ResponseLocator(locaters),
major_formatter=fmt, minor_formatter=fmt)
def register():
mscale.register_scale(ResponseScale)
```
#### File: HGCalML-1/modules/plotting_tools.py
```python
import numpy as np
import os
def calc_r(x,y):
return np.sqrt(x ** 2 + y ** 2)
def calc_eta(x, y, z):
rsq = np.sqrt(x ** 2 + y ** 2)
return -1 * np.sign(z) * np.log(rsq / np.abs(z + 1e-3) / 2.)
def calc_phi(x, y):
return np.arctan2(x, y)
def rotation(counter):
angle_in = 10. * counter + 60.
while angle_in >= 360: angle_in -= 360
while angle_in <= -360: angle_in -= 360
return angle_in
def publish(file_to_publish, publish_to_path):
cpstring = 'cp -f '
if "@" in publish_to_path:
cpstring = 'scp '
basefilename = os.path.basename(file_to_publish)
os.system(cpstring + file_to_publish + ' ' + publish_to_path +'_'+basefilename+ ' 2>&1 > /dev/null')
def shuffle_truth_colors(df, qualifier="truthHitAssignementIdx",rdst=None):
ta = df[qualifier]
unta = np.unique(ta)
unta = unta[unta>-0.1]
if rdst is None:
np.random.shuffle(unta)
else:
rdst.shuffle(unta)
out = ta.copy()
for i in range(len(unta)):
out[ta ==unta[i]]=i
df[qualifier] = out
```
#### File: HGCalML-1/modules/Regularizers.py
```python
import tensorflow as tf
from baseModules import LayerWithMetrics
'''
don't forget to register as custom objects (e.g. in Layers.py)
'''
class OffDiagonalRegularizer(tf.keras.regularizers.Regularizer):
def __init__(self, strength):
assert strength>=0
self.strength = strength
def get_config(self):
return {'strength': self.strength}
def __call__(self, x):
diag = tf.eye(x.shape[-2], x.shape[-1])
offdiag = x * (1.-diag)
return self.strength * tf.reduce_mean(tf.square(offdiag))
class WarpRegularizer(tf.keras.layers.Layer):
def __init__(self, strength : float = 0.1, **kwargs):
super(WarpRegularizer, self).__init__(**kwargs)
self.strength = strength
def get_config(self):
config = {'strength': self.strength}
base_config = super(WarpRegularizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shapes):
return input_shapes
def call(self, inputs):
warp = inputs
diag = tf.expand_dims(tf.eye(warp.shape[-1]), axis=0)
loss = diag*warp - warp #penalise non-diag elements
loss *= loss
loss = self.strength * tf.reduce_mean(loss)
self.add_loss(loss)
print(self.name, loss)
return inputs
class AverageDistanceRegularizer(LayerWithMetrics):
def __init__(self, strength :float =1.,
printout: bool = False,
**kwargs):
'''
Penalises if the average distance is not around 0.5
To make sure a gradient for the GravNet distance weighting exists early on
and is most effective in shaping the space. This regulariser should be switched off
later in the training (set strength to 0 and recompile)
Inputs/outputs: distances (not modified)
'''
super(AverageDistanceRegularizer, self).__init__(**kwargs)
self.strength = strength
self.printout = printout
assert strength >= 0
def get_config(self):
config = {'strength': self.strength,
'printout': self.printout
}
base_config = super(AverageDistanceRegularizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shapes):
return input_shapes
def call(self, inputs):
if self.strength == 0:
return inputs
dist = inputs
dist = tf.sqrt(dist+1e-3)
Nreal_neigh = tf.cast(tf.math.count_nonzero(inputs,axis=1),dtype='float32')
avdist = tf.math.divide_no_nan(tf.reduce_sum(dist,axis=1),Nreal_neigh)
avdist = tf.reduce_mean(avdist)
avneigh = tf.reduce_mean(tf.math.count_nonzero(
tf.logical_and(inputs<1.,inputs>0),axis=1))
loss = self.strength * (avdist-0.5)**2
if self.printout:
print(self.name,'average dist',float(avdist),'average neighbours',
float(tf.reduce_mean(Nreal_neigh)),
'average active neighbours',
float(avneigh),
'penalty',float(loss))
self.add_prompt_metric(avdist, self.name+'_dist')
self.add_prompt_metric(avneigh, self.name+'_Nneigh')
self.add_loss(loss)
return inputs
class MeanMaxDistanceRegularizer(LayerWithMetrics):
def __init__(self,
strength :float =1.,
printout: bool = False,
**kwargs):
'''
Penalises if the average max distance is not around 0.9 and max max distance around 0.9
To make sure a gradient for the GravNet distance weighting exists early on
and is most effective in shaping the space. This regulariser should be switched off
later in the training (set strength to 0 and recompile)
Inputs/outputs: distances (not modified)
'''
super(MeanMaxDistanceRegularizer, self).__init__(**kwargs)
self.strength = strength
self.printout = printout
assert strength >= 0
def get_config(self):
config = {'strength': self.strength,
'printout': self.printout}
base_config = super(MeanMaxDistanceRegularizer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shapes):
return input_shapes
def call(self, inputs):
if self.strength == 0:
return inputs
dist = inputs
dist = tf.sqrt(dist+1e-6)
maxdist = tf.reduce_max(dist,axis=1)
meanmax = tf.reduce_mean(maxdist)
maxmax = tf.reduce_max(maxdist)
#less penalty if max is larger
def half_sided_penalty(x):
return tf.where(x>0, 0.25*x**2, x**2)
loss = self.strength * (half_sided_penalty(meanmax-0.9) + half_sided_penalty(maxmax-0.9))
if self.printout:
print(self.name,'meanmax dist loss',float(loss))
self.add_prompt_metric(meanmax, self.name+'_meanmax')
self.add_prompt_metric(maxmax, self.name+'_maxmax')
self.add_prompt_metric(loss, self.name+'_loss')
self.add_loss(loss)
return inputs
```
#### File: HGCalML-1/modules/select_knn_op.py
```python
import tensorflow as tf
from tensorflow.python.framework import ops
import globals as gl
from oc_helper_ops import SelectWithDefault
'''
Wrap the module
'''
_sknn_op = tf.load_op_library('select_knn.so')
def SelectKnn(K : int, coords, row_splits, masking_values=None, threshold=0.5, tf_compatible=False, max_radius=-1.,
mask_mode='none', mask_logic='xor'):
'''
returns indices and distances**2 , gradient for distances is implemented!
new: mask (switch):
masked:
0) none = no masking
1) acc = get to have neighbours
2) scat = get to be neighbours
10) xor: exclusive (one xor the other) -> exchange between collections, direction given by 1 and 2
20) and: selected (one and the other) -> pooling
no gradient for the mask!
'''
assert mask_mode=='none' or mask_mode=='acc' or mask_mode=='scat'
assert mask_mode=='none' or mask_logic=='xor' or mask_logic=='and'
if masking_values is None:
assert mask_mode=='none'
masking_values = tf.zeros_like(coords[:,0:1])
mask = tf.zeros_like(masking_values, dtype='int32')
mask = tf.where(masking_values>threshold, mask+1, mask)
#print('mask',mask)
op_mask_mode = 0
if mask_logic=='xor':
op_mask_mode=10
elif mask_logic=='and':
op_mask_mode=20
if mask_mode=='acc':
op_mask_mode+=1
elif mask_mode=='scat':
op_mask_mode+=2
'''
0) none = no masking
1) acc = get to have neighbours
2) scat = get to be neighbours
10) xor: exclusive (one xor the other) -> exchange between collections, direction given by 1 and 2
20) and: selected (one and the other) -> pooling (scat and acc don't matter)
'''
idx,distsq = _sknn_op.SelectKnn(n_neighbours=K, tf_compatible=tf_compatible, max_radius=max_radius,
coords=coords, row_splits=row_splits, mask=mask, mask_mode=op_mask_mode)
#safe guards
with tf.control_dependencies([
tf.assert_equal(tf.range(tf.shape(idx)[0]), idx[:,0]),
tf.assert_less(idx, row_splits[-1]),
tf.assert_less(-2, idx)
]):
if not gl.knn_ops_use_tf_gradients:
return idx, distsq
ncoords = SelectWithDefault(idx, coords, 0.)
distsq = (ncoords[:,0:1,:]-ncoords)**2
distsq = tf.reduce_sum(distsq,axis=2)
distsq = tf.where(idx<0, 0., distsq)
return idx, distsq
_sknn_grad_op = tf.load_op_library('select_knn_grad.so')
@ops.RegisterGradient("SelectKnn")
def _SelectKnnGrad(op, gradidx, dstgrad):
coords = op.inputs[0]
indices = op.outputs[0]
distances = op.outputs[1]
coord_grad = _sknn_grad_op.SelectKnnGrad(grad_distances=dstgrad, indices=indices, distances=distances, coordinates=coords)
return coord_grad, None, None #no grad for row splits and masking values
```
#### File: HGCalML-1/scripts/plotWindow.py
```python
from DeepJetCore import DataCollection
import os
from argparse import ArgumentParser
import numpy as np
import matplotlib.pyplot as plt
import math
from multiprocessing import Process
import random
from datastructures import TrainData_NanoML
import plotly.express as px
import pandas as pd
import tqdm
from DeepJetCore.dataPipeline import TrainDataGenerator
parser = ArgumentParser('')
parser.add_argument('inputFile')
parser.add_argument('outputDir')
parser.add_argument('--hipsearch',action='store_true')
parser.add_argument('--plots',action='store_true')
args = parser.parse_args()
outdir = args.outputDir+'/'
### rewrite!
os.system('mkdir -p '+outdir)
#read a file
def invokeGen(infile):
if infile[-6:] == '.djcdc':
dc = DataCollection(infile)
td = dc.dataclass()
tdclass = dc.dataclass
dc.setBatchSize(1)
gen = dc.invokeGenerator()
elif infile[-6:] == '.djctd':
td = TrainData_NanoML()
tdclass = TrainData_NanoML
td.readFromFile(infile)
gen = TrainDataGenerator()
gen.setBatchSize(1)
gen.setBuffer(td)
elif infile[-5:] == '.root':
print('reading from root file')
td = TrainData_NanoML()
tdclass = TrainData_NanoML
td.readFromSourceFile(infile,{},True)
td.writeToFile(infile+'.djctd')
td.readFromFile(infile+'.djctd')
gen = TrainDataGenerator()
gen.setBatchSize(1)
gen.setBuffer(td)
gen.setSkipTooLargeBatches(False)
nevents = gen.getNBatches()
gen.cast_to = tdclass
return gen.feedTrainData,nevents,td
gen,nevents,td = invokeGen(args.inputFile)
def shuffle_truth_colors(df, qualifier="truthHitAssignementIdx"):
ta = df[qualifier]
unta = np.unique(ta)
unta = unta[unta>-0.1]
np.random.seed(42)
np.random.shuffle(unta)
out = ta.copy()
dfo = df.copy()
for i in range(len(unta)):
out[ta ==unta[i]]=i
dfo[qualifier] = out
return dfo
def toDataFrame(thegen, thetd):
data = next(thegen())#this is a dict, row splits can be ignored, this is per event
return data.createPandasDataFrame(0)
def compressShowerFeatures(df):
dfout = df.drop_duplicates(subset = ["truthHitAssignementIdx"])
return dfout[dfout["truthHitAssignementIdx"]>=0]
def quickplotshower(df,out):
fig = px.scatter_3d(df, x="recHitX", y="recHitZ", z="recHitY",
color="hitratio", size="recHitLogEnergy",
symbol = "marker",
hover_data=['rel_std','totruthHitAssignedEnergies_ratio','marker','hitratio','nhits','corratio'],
template='plotly_dark',
color_continuous_scale=px.colors.sequential.Rainbow)
fig.update_traces(marker=dict(line=dict(width=0)))
fig.write_html(out)
def hipsearch(df3d, i, outdir, makeplots=False):
truthHitAssignementIdx = df3d['truthHitAssignementIdx']
utidx = np.unique(truthHitAssignementIdx)
counter=0
Er_dep=[]
Er_corr_dep=[]
E =[]
for t in utidx:
if t < 0:
continue
seldf = df3d[df3d['truthHitAssignementIdx']==t]
depsum = np.ones_like(seldf['recHitEnergy'])*np.sum(seldf['recHitEnergy'])
nhits = float(len(seldf['recHitEnergy']))
seldf['energy_ratio'] = seldf['recHitEnergy']/seldf['truthHitAssignedEnergies']
seldf['totruthHitAssignedEnergies_ratio'] = depsum/seldf['truthHitAssignedEnergies']
E.append(np.mean(seldf['truthHitAssignedEnergies']))
Er_dep.append(np.mean(seldf['totruthHitAssignedEnergies_ratio']))
hitratio = seldf['recHitEnergy']/depsum
seldf['nhits'] = nhits
hitratio *= nhits #1. on average for uniform etc.
seldf['hitratio'] = hitratio
m = np.mean(seldf['hitratio'])
s = np.std(seldf['hitratio']-m)
seldf['rel_std']= (hitratio-m)/s
seldf['marker'] = np.array(seldf['rel_std'] > 5.,dtype='int32')
ewithout = np.sum((1.-seldf['marker'])*seldf['recHitEnergy'])
seldf['corratio'] = ewithout/seldf['truthHitAssignedEnergies']
Er_corr_dep.append(np.mean(seldf['corratio']))
if makeplots and np.all(depsum < seldf['truthHitAssignedEnergies']*1.1):
quickplotshower(seldf,outdir+str(i)+'_'+str(counter)+'.html')
counter+=1
return Er_dep, Er_corr_dep , E
hitdf = pd.DataFrame()
showerdf = pd.DataFrame()
eventdf = pd.DataFrame()
print(nevents,'events')
#3D plots
Er_dep, Er_corr_dep, E = [], [], []
for i in tqdm.tqdm(range(nevents)):
df = toDataFrame(gen,td)
#print(df.columns)
dfshowers = compressShowerFeatures(df)
showerhits = df[df["truthHitAssignementIdx"]>=0]
#depvstruthenergy.append(np.sum(showerhits['recHitEnergy'])/(np.sum(dfshowers['truthHitAssignedEnergies'])+1.))
from globals import pu
#df["recHitLogEnergy"]*= (1. - (1.-1e-2)*(df["truthHitAssignementIdx"]>=pu.t_idx_offset))
df3d = shuffle_truth_colors(df)
df3d['orig_truthHitAssignementIdx']=df['truthHitAssignementIdx']
df3d['t_inv_spec'] = np.where(df3d['truthHitAssignementIdx']<0,
0.,np.ones_like(df3d['truthHitSpectatorFlag'])) * 1./(df3d['truthHitSpectatorFlag']+1e-1)
#plot the first 20 as 3D plots
if i < 20 and args.plots:
#makes a copy
hover_data=['recHitEnergy',
'recHitHitR',
'truthHitAssignedEnergies',
'truthHitAssignedT',
'truthHitAssignedX',
'truthHitAssignedY',
'truthHitAssignementIdx',
'orig_truthHitAssignementIdx',
'truthHitAssignedPIDs',
'truthHitSpectatorFlag']
print('N hits', len(df3d))
fig = px.scatter_3d(df3d, x="recHitX", y="recHitZ", z="recHitY",
color="truthHitAssignementIdx", size="recHitLogEnergy",
symbol = "recHitID",
hover_data=hover_data,
template='plotly_dark',
color_continuous_scale=px.colors.sequential.Rainbow)
fig.update_traces(marker=dict(line=dict(width=0)))
ccfile = outdir + str(i) + "_event.html"
fig.write_html(ccfile)
continue
fig = px.scatter_3d(df3d, x="recHitX", y="recHitZ", z="recHitY",
color="truthHitAssignementIdx", size="recHitHitR",
symbol = "recHitID",
hover_data=hover_data,
template='plotly_dark',
color_continuous_scale=px.colors.sequential.Rainbow)
fig.update_traces(marker=dict(line=dict(width=0)))
ccfile = outdir + str(i) + "_event_hitsize.html"
fig.write_html(ccfile)
fig = px.scatter_3d(df3d, x="recHitX", y="recHitZ", z="recHitY",
color="truthHitAssignementIdx", size="t_inv_spec",
hover_data=hover_data,
symbol = "recHitID",
template='plotly_dark',
color_continuous_scale=px.colors.sequential.Rainbow)
fig.update_traces(marker=dict(line=dict(width=0)))
ccfile = outdir + str(i) + "_spect.html"
fig.write_html(ccfile)
if args.hipsearch:
iEr_dep, iEr_corr_dep, iE = hipsearch(df3d, i, outdir, args.plots)
Er_dep+=iEr_dep
Er_corr_dep+=iEr_corr_dep
E+=iE
plt.hist(Er_dep,bins=31,label='uncorr',alpha=0.5)
plt.hist(Er_corr_dep,bins=31,label='corr',alpha=0.5)
plt.legend()
plt.savefig(outdir +'hipcorr.pdf')
dfout = pd.DataFrame(zip(E,Er_dep,Er_corr_dep), columns = ['E','Er','Er_corr'])
dfout.to_pickle("df.pkl")
```
#### File: HGCalML-1/scripts/visualise_multi_attention.py
```python
import numpy as np
import plotly.graph_objects as go
import pandas as pd
import tqdm
# Generate nicely looking random 3D-field
np.random.seed(0)
l = 40
mgrid = (np.mgrid[:l, :l, :l]-l/2)/l
X, Y, Z = mgrid
#vol = np.zeros((l, l, l))
mgrid = np.transpose(mgrid,[1,2,3,0])
mgrid = np.expand_dims(mgrid,axis=-1)
print(mgrid.shape)
#read data
file3="~/Downloads/multiattention.html.3.df.pkl"
file5="~/Downloads/multiattention.html.5.df.pkl"
file8="~/Downloads/multiattention.html.8.df.pkl"
file=file8
outdir='plts8'
df=pd.read_pickle(file)
#check number of coordinates
cols = df.columns
coords = np.unique([int(c[-1]) for c in cols])
points = np.unique([int(c[-3]) for c in cols])
#find name
import os
os.system('mkdir -p '+outdir)
def printevent(event,counter,outdir=outdir):
data=[]
vardata = []
for pi in points:
pdata=[]
pvar=[]
for ci in coords:
d = df['pre_selection_add_stage_0_att_gn1_coord_add_mean_'+str(pi)+'_'+str(ci)]
pdata.append(d[event])
pvar.append(df['pre_selection_add_stage_0_att_gn1_coord_add_var_'+str(pi)+'_'+str(ci)][event])
data.append(pdata)
vardata.append(pvar)
data = np.array(data)
vardata= np.array(vardata)
def trfdata(x):
x = np.transpose(x,[1,0])
return np.expand_dims(x,axis=(0,1,2))
#process to plot
data = trfdata(data)
vardata = trfdata(vardata)
vol = np.exp(-3.*(data-mgrid)**2/vardata )
#print(vol.shape)
vol = np.prod(vol,axis=3)#the x**2 axis
vol = np.sum(vol,axis=-1)#the points axis
#insert data here. make data span a function for mesh grid
#pts = (l * np.random.rand(3, 15)).astype(np.int)
#vol[tuple(indices for indices in pts)] = 1
#from scipy import ndimage
#vol = ndimage.gaussian_filter(vol, 1)
vol /= vol.max()
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=vol.flatten(),
isomin=0.2,
isomax=0.7,
opacity=0.1,
surface_count=25,
))
#fig.update_layout(scene_xaxis_showticklabels=False,
# scene_yaxis_showticklabels=False,
# scene_zaxis_showticklabels=False)
#
#fig.show(renderer='chrome')
if counter>=0:
fig.write_image(outdir+'/'+str(counter).zfill(10)+'.png')
else:
fig.write_html(outdir+'/last.html')
printevent(len(df)-1,-1)
exit()
nframes=60
events = np.arange(0, len(df)-1, (len(df)-1) // nframes)
#events = [0]
print(events)
counter=0
lastev=0
for e in tqdm.tqdm(events):
printevent(e,counter)
counter+=1
lastev=e
``` |
{
"source": "jkiguru/djangoCRUDapp",
"score": 2
} |
#### File: djangoCRUDapp/task/views.py
```python
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import ListView, DetailView, UpdateView, DeleteView, CreateView,TemplateView
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from .models import Task
from .models import Lecturer
from django.db.models import Q
# @login_required
def home(request):
return render(request, 'task/home.html')
class TaskListView(LoginRequiredMixin, ListView):
model = Task
context_object_name = 'tasks'
class TaskDetailView(LoginRequiredMixin, DetailView):
model = Task
context_object_name = 'task'
class TaskUpdateView(LoginRequiredMixin, UpdateView):
model = Task
fields = ['task_name','task_desc']
success_url = '/task_list'
extra_context = {
'title': 'Edit Task'
}
def get_context_data(self, *args, **kwargs):
kwargs.update(self.extra_context)
return super().get_context_data(*args, **kwargs)
class TaskDeleteView(LoginRequiredMixin, DeleteView):
model = Task
context_object_name = 'task'
success_url = '/task_list'
class TaskCreateView(LoginRequiredMixin, CreateView):
model = Task
fields = ['task_name','task_desc']
success_url = '/task_list'
extra_context = {
'title': 'Create Task'
}
def get_context_data(self, *args, **kwargs):
kwargs.update(self.extra_context)
return super().get_context_data(*args, **kwargs)
def form_valid(self, form):
form.instance.task_creator = self.request.user
form.instance.task_created = timezone.now
return super().form_valid(form)
def take_task(request, pk):
task = Task.objects.get(pk=pk)
task.task_taker = request.user.username
task.time_taken = timezone.now()
task.save()
return redirect('task_list')
def task_done(request, pk):
task = Task.objects.get(pk=pk)
task.time_done = timezone.now()
task.save()
return redirect('task_list')
#lecturer
def view_lecturer(request):
return render(request, 'lecturer/lecturer.html')
class HomePageView(TemplateView):
template_name = 'lecturer/home.html'
class SearchResultsView(ListView):
model = Lecturer
template_name = 'lecturer/search_results.html'
def get_queryset(self): # new
query = self.request.GET.get('q')
object_list = Lecturer.objects.filter(
Q(instructor__icontains=query) | Q(Title__icontains=query)
)
return object_list
``` |
{
"source": "jkillian/hue",
"score": 2
} |
#### File: indexer/indexers/sql_tests.py
```python
from builtins import object
import json
import sys
from nose.tools import assert_equal, assert_true
from desktop.lib.django_test_util import make_logged_in_client
from useradmin.models import User
from azure.conf import ABFS_CLUSTERS
from beeswax.server import dbms
from indexer.indexers.sql import SQLIndexer
if sys.version_info[0] > 2:
from unittest.mock import patch, Mock, MagicMock
else:
from mock import patch, Mock, MagicMock
class TestSQLIndexer(object):
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="empty", recreate=True, is_superuser=False)
self.user = User.objects.get(username="test")
def test_create_table_from_a_file_to_csv(self):
fs = Mock(
stats=Mock(return_value={'mode': 0o0777})
)
def source_dict(key):
return {
'path': 'hdfs:///path/data.csv',
'format': {'quoteChar': '"', 'fieldSeparator': ','},
'sampleCols': [{u'operations': [], u'comment': u'', u'name': u'customers.id'}],
'sourceType': 'hive'
}.get(key, Mock())
source = MagicMock()
source.__getitem__.side_effect = source_dict
def destination_dict(key):
return {
'name': 'default.export_table',
'tableFormat': 'csv',
'importData': True,
'nonDefaultLocation': '/user/hue/customer_stats.csv',
'columns': [{'name': 'id', 'type': 'int'}],
'partitionColumns': [{'name': 'day', 'type': 'date', 'partitionValue': '20200101'}],
'description': 'No comment!',
'sourceType': 'hive-1'
}.get(key, Mock())
destination = MagicMock()
destination.__getitem__.side_effect = destination_dict
with patch('notebook.models.get_interpreter') as get_interpreter:
notebook = SQLIndexer(user=self.user, fs=fs).create_table_from_a_file(source, destination)
assert_equal(
[statement.strip() for statement in u'''DROP TABLE IF EXISTS `default`.`hue__tmp_export_table`;
CREATE TABLE `default`.`hue__tmp_export_table`
(
`id` int ) COMMENT "No comment!"
PARTITIONED BY (
`day` date )
ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
WITH SERDEPROPERTIES ("separatorChar" = ",",
"quoteChar" = """,
"escapeChar" = "\\\\"
)
STORED AS TextFile TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")
;
LOAD DATA INPATH 'hdfs:///path/data.csv' INTO TABLE `default`.`hue__tmp_export_table` PARTITION (day='20200101');
CREATE TABLE `default`.`export_table` COMMENT "No comment!"
STORED AS csv
TBLPROPERTIES("transactional"="true", "transactional_properties"="insert_only")
AS SELECT *
FROM `default`.`hue__tmp_export_table`;
DROP TABLE IF EXISTS `default`.`hue__tmp_export_table`;'''.split(';')],
[statement.strip() for statement in notebook.get_data()['snippets'][0]['statement_raw'].split(';')]
)
class MockRequest(object):
def __init__(self, fs=None, user=None):
self.fs = fs if fs is not None else MockFs()
if user is None:
self.c = make_logged_in_client(username='test_importer', is_superuser=False)
self.user = User.objects.get(username='test_importer')
else:
self.user = user
class MockFs(object):
def __init__(self, path=None):
self.path = {'isDir': False, 'listdir': ['/A'], 'parent_path': '/A'} if path is None else path
def isdir(self, path):
return self.path['isDir']
def split(self, path):
return self.path['split']
def listdir(self, path):
return self.path['listdir']
def parent_path(self, path):
return self.path['parent_path']
def stats(self, path):
return {"mode": 0o0777}
def test_generate_create_text_table_with_data_partition():
source = {
u'sourceType': 'hive', u'sampleCols': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False,
u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type':
u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.addresses',
u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100,
u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False,
u'type': u'string', u'showProperties': False, u'keep': True}], u'name': u'', u'inputFormat': u'file',
u'format': {u'status': 0, u'fieldSeparator': u',', u'hasHeader': True, u'quoteChar': u'"',
u'recordSeparator': u'\\n', u'type': u'csv'}, u'defaultName': u'default.customer_stats', u'show': True,
u'tableName': u'', u'sample': [], u'apiHelperType': u'hive', u'inputFormatsAll': [{u'name': u'File', u'value': u'file'},
{u'name': u'Manually', u'value': u'manual'}, {u'name': u'SQL Query', u'value': u'query'},
{u'name': u'Table', u'value': u'table'}], u'query': u'', u'databaseName': u'default', u'table': u'',
u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'},
{u'name': u'SQL Query', u'value': u'query'}, {u'name': u'Table', u'value': u'table'}],
u'path': u'/user/romain/customer_stats.csv', u'draggedQuery': u'',
u'inputFormatsManual': [{u'name': u'Manually', u'value': u'manual'}], u'isObjectStore': False
}
destination = {
u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'hive',
u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [],
u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=',
u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'isTargetChecking': False, u'tableName': u'customer_stats',
u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'},
{u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'',
u'isTargetExisting': False, u'partitionColumns': [{u'operations': [], u'comment': u'', u'name': u'new_field_1',
u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': True, u'length': 100,
u'partitionValue': u'AAA', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}],
u'useCustomDelimiters': False, u'apiHelperType': u'hive', u'kuduPartitionColumns': [],
u'outputFormats': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'}],
u'customMapDelimiter': u'\\003', u'showProperties': False, u'useDefaultLocation': True, u'description': u'',
u'primaryKeyObjects': [], u'customFieldDelimiter': u',', u'existingTargetUrl': u'', u'importData': True,
u'databaseName': u'default', u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1,
u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [],
u'outputFormat': u'table', u'nonDefaultLocation': u'/user/romain/customer_stats.csv', u'name': u'default.customer_stats',
u'tableFormat': u'text', 'ouputFormat': u'table',
u'bulkColumnNames': u'customers.id,customers.name,customers.email_preferences,customers.addresses,customers.orders',
u'columns': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0, u'keyType': u'string',
u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False,
u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False,
u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string',
u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'customers.addresses', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False,
u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False,
u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string',
u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'hasHeader': True,
u'tableFormats': [{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'},
{u'name': u'Kudu', u'value': u'kudu'}, {u'name': u'Csv', u'value': u'csv'}, {u'name': u'Avro', u'value': u'avro'},
{u'name': u'Json', u'value': u'json'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'ORC', u'value': u'orc'}],
u'customCollectionDelimiter': u'\\002'
}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
statement = '''CREATE TABLE `default`.`customer_stats`
(
`customers.id` bigint ,
`customers.name` string ,
`customers.email_preferences` string ,
`customers.addresses` string ,
`customers.orders` string ) PARTITIONED BY (
`new_field_1` string )
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")
;'''
assert_true(statement in sql, sql)
assert_true(
'''LOAD DATA INPATH '/user/romain/customer_stats.csv' '''
'''INTO TABLE `default`.`customer_stats` PARTITION (new_field_1='AAA');''' in sql,
sql
)
def test_generate_create_kudu_table_with_data():
source = {
u'sourceType': 'impala', u'apiHelperType': 'hive', u'sampleCols': [], u'name': u'', u'inputFormat': u'file',
u'format': {u'quoteChar': u'"', u'recordSeparator': u'\\n', u'type': u'csv', u'hasHeader': True, u'fieldSeparator': u','},
u'show': True, u'tableName': u'', u'sample': [], u'defaultName': u'index_data', u'query': u'', u'databaseName': u'default',
u'table': u'', u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}],
u'path': u'/user/admin/index_data.csv', u'draggedQuery': u'', u'isObjectStore': False
}
destination = {
u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'impala',
u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [],
u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=',
u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'tableName': u'index_data',
u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr+index', u'value': u'index'},
{u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'',
u'isTargetExisting': False, u'partitionColumns': [], u'useCustomDelimiters': True,
u'kuduPartitionColumns': [{u'int_val': 16, u'name': u'HASH', u'columns': [u'id'],
u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=',
u'lower_val': 0, u'values': [{u'value': u''}]}]}], u'outputFormats': [{u'name': u'Table', u'value': u'table'},
{u'name': u'Solr+index', u'value': u'index'}], u'customMapDelimiter': None, u'showProperties': False, u'useDefaultLocation': True,
u'description': u'Big Data', u'primaryKeyObjects': [{u'operations': [], u'comment': u'', u'name': u'id', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'customFieldDelimiter': u',',
u'existingTargetUrl': u'', u'importData': True, u'databaseName': u'default',
u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES',
u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [u'id'],
u'outputFormat': u'table', u'nonDefaultLocation': u'/user/admin/index_data.csv', u'name': u'index_data',
u'tableFormat': u'kudu',
u'bulkColumnNames': u'business_id,cool,date,funny,id,stars,text,type,useful,user_id,name,full_address,latitude,'
'longitude,neighborhoods,open,review_count,state', u'columns': [{u'operations': [], u'comment': u'', u'name': u'business_id',
u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100,
u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'cool', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint',
u'showProperties': False, u'keep': False}, {u'operations': [], u'comment': u'', u'name': u'date', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'funny', u'level': 0, u'scale': 4, u'precision': 10, u'keyType': u'string', u'required': False, u'nested': [],
u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'decimal', u'showProperties': False,
u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'id', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string',
u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'stars', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'text', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100,
u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'type', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [],
u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False,
u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'useful', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint',
u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'user_id', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False,
u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'full_address', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string',
u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'latitude', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'longitude', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False,
u'length': 100, u'multiValued': False, u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'neighborhoods', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string',
u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'open', u'level': 0,
u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False,
u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'',
u'name': u'review_count', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False,
u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True},
{u'operations': [], u'comment': u'', u'name': u'state', u'level': 0, u'keyType': u'string', u'required': False,
u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string',
u'showProperties': False, u'keep': True}], u'hasHeader': True, u'tableFormats': [{u'name': u'Text', u'value': u'text'},
{u'name': u'Parquet', u'value': u'parquet'}, {u'name': u'Json', u'value': u'json'}, {u'name': u'Kudu', u'value': u'kudu'},
{u'name': u'Avro', u'value': u'avro'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'RCFile', u'value': u'rcfile'},
{u'name': u'ORC', u'value': u'orc'}, {u'name': u'SequenceFile', u'value': u'sequencefile'}], u'customCollectionDelimiter': None
}
request = MockRequest(fs=MockFs())
with patch('hadoop.fs.hadoopfs.Hdfs.split') as split:
split.return_value = ('/A', 'a')
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_index_data`;''' in sql, sql)
statement = '''CREATE EXTERNAL TABLE `default`.`hue__tmp_index_data`
(
`business_id` string ,
`cool` bigint ,
`date` string ,
`funny` decimal(10, 4) ,
`id` string ,
`stars` bigint ,
`text` string ,
`type` string ,
`useful` bigint ,
`user_id` string ,
`name` string ,
`full_address` string ,
`latitude` double ,
`longitude` double ,
`neighborhoods` string ,
`open` string ,
`review_count` bigint ,
`state` string ) COMMENT "Big Data"
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TextFile LOCATION '/A'
TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")'''
assert_true(statement in sql, sql)
assert_true('''CREATE TABLE `default`.`index_data` COMMENT "Big Data"
PRIMARY KEY (id)
PARTITION BY HASH PARTITIONS 16
STORED AS kudu
TBLPROPERTIES(
'kudu.num_tablet_replicas' = '1'
)
AS SELECT `id`, `business_id`, `date`, `funny`, `stars`, `text`, `type`, `useful`, `user_id`, `name`, '''
'''`full_address`, `latitude`, `longitude`, `neighborhoods`, `open`, `review_count`, `state`
FROM `default`.`hue__tmp_index_data`''' in sql,
sql
)
def test_generate_create_parquet_table():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694",'''
'''"-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US",'''
'''"Corpus Christi","40.7807998657","-73.9772033691"],["<NAME>","2400000.0","US","Albany","35.7976989746",'''
'''"-78.6252975464"],'''
'''["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"",'''
'''"nested":[],'''
'''"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],'''
'''"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double",'''
'''"showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":'''
'''"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,'''
'''"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"",'''
'''"multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],'''
'''"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,'''
'''"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],'''
'''"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,'''
'''"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},'''
'''{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"inputFormatsManual":[{"value":"manual","name":'''
'''"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":'''
'''"SQL Query"},{"value":"table","name":"Table"}],"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"",'''
'''"tableName":"","databaseName":"default","apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv",'''
'''"fieldSeparator":",","recordSeparator":"\\n","quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":'''
'''"default.query-hive-360"}'''
)
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "hive", "name":"default.parquet_table"'''
''',"apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":[{"name":"Table","value":"table"},'''
'''{"name":"Solr index","value":"index"},{"name":"File","value":"file"},{"name":"Database","value":"database"}],'''
'''"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],"columns":[{"operations":[],'''
'''"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":'''
'''false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":'''
'''"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,'''
'''"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"",'''
'''"multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":'''
'''[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":'''
'''100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],'''
'''"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":'''
'''false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":'''
'''false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,'''
'''vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":'''
'''"","tableName":"parquet_table","databaseName":"default","tableFormat":"parquet","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":'''
'''{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},'''
'''"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,'''
'''"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":'''
'''"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},'''
'''{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc",'''
'''"name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],"importData":true,'''
'''"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,"useCustomDelimiters":'''
'''false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}'''
)
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
statement = '''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")
;'''
assert_true(statement in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS parquet
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;''' in sql, sql)
def test_generate_create_orc_table_transactional():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694",'''
'''"-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US",'''
'''"Corpus Christi","40.7807998657","-73.9772033691"],["<NAME>","2400000.0","US","Albany","35.7976989746",'''
'''"-78.6252975464"],'''
'''["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"",'''
'''"nested":[],'''
'''"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],'''
'''"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":'''
'''false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":'''
'''false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,'''
'''"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"",'''
'''"multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"",'''
'''"nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":'''
'''false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":'''
'''false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"double","showProperties":false,"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},'''
'''{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],'''
'''"inputFormatsManual":[{"value":"manual","name":"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual",'''
'''"name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],'''
'''"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"","tableName":"","databaseName":"default",'''
'''"apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv","fieldSeparator":",","recordSeparator":"\\n",'''
'''"quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":"default.query-hive-360"}'''
)
destination = json.loads('''{"isTransactional": true, "isInsertOnly": true, "sourceType": "hive", "name":'''
'''"default.parquet_table","apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":'''
'''[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"},{"name":"File","value":"file"},'''
'''{"name":"Database","value":"database"}],"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],'''
'''"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},'''
'''{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},'''
'''{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":'''
'''"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,'''
'''tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,'''
'''"isTargetChecking":false,"existingTargetUrl":"","tableName":"parquet_table","databaseName":"default","tableFormat":"orc",'''
'''"KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=",'''
'''"upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":'''
'''[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH",'''
'''"int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},'''
'''{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},'''
'''{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],'''
'''"importData":true,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,'''
'''"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003",'''
'''"customRegexp":""}'''
)
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
statement = '''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")
;'''
assert_true(statement in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS orc
TBLPROPERTIES("transactional"="true", "transactional_properties"="insert_only")
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
def test_generate_create_empty_kudu_table():
source = json.loads('''{"sourceType": "impala", "apiHelperType": "impala", "path": "", "inputFormat": "manual"}''')
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "impala", '''
'''"name":"default.manual_empty_kudu","apiHelperType":"impala","description":"","outputFormat":"table",'''
'''"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":'''
'''"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"",'''
'''"nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,'''
'''"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":'''
'''false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat",'''
'''"level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,'''
'''"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},'''
'''{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,'''
'''"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,'''
'''"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,'''
'''vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":'''
'''"","tableName":"manual_kudu_table","databaseName":"default","tableFormat":"kudu","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":'''
'''{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},'''
'''"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,'''
'''"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text",'''
'''"name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},'''
'''{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],'''
'''"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys": ["acct_client"],"primaryKeyObjects":[],"importData":false,'''
'''"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":false,"useCustomDelimiters":'''
'''false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}'''
)
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''CREATE TABLE `default`.`manual_empty_kudu`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double , PRIMARY KEY (acct_client)
) STORED AS kudu TBLPROPERTIES("transactional" = "false")
;''' in sql, sql)
def test_create_ddl_with_nonascii():
source = {u'kafkaFieldType': u'delimited', u'rdbmsUsername': u'', u'kafkaFieldTypes': u'',
u'selectedTableIndex': 0, u'rdbmsJdbcDriverNames': [], u'tableName': u'',
u'sample': [[u'Weihaiwei', u'\u5a01\u6d77\u536b\u5e02', u'Weihai', u'\u5a01\u6d77\u5e02', u'1949-11-01'],
[u'Xingshan', u'\u5174\u5c71\u5e02', u'Hegang', u'\u9e64\u5c97\u5e02', u'1950-03-23'],
[u"Xi'an", u'\u897f\u5b89\u5e02', u'Liaoyuan', u'\u8fbd\u6e90\u5e02', u'1952-04-03'],
[u'Nanzheng', u'\u5357\u90d1\u5e02', u'Hanzhong', u'\u6c49\u4e2d\u5e02', u'1953-10-24'],
[u'Dihua', u'\u8fea\u5316\u5e02', u'?r\xfcmqi', u'\u4e4c\u9c81\u6728\u9f50\u5e02', u'1953-11-20']],
u'rdbmsTypes': [], u'isFetchingDatabaseNames': False, u'rdbmsDbIsValid': False, u'query': u'',
u'channelSourceSelectedHosts': [], u'table': u'', u'rdbmsAllTablesSelected': False,
u'inputFormatsManual': [{u'name': u'Manually', u'value': u'manual'}], u'rdbmsPassword': u'',
u'isObjectStore': False, u'tables': [{u'name': u''}], u'streamUsername': u'',
u'kafkaSchemaManual': u'detect', u'connectorSelection': u'sfdc', u'namespace':
{u'status': u'CREATED', u'computes':
[{u'credentials': {}, u'type': u'direct', u'id': u'default', u'name': u'default'}],
u'id': u'default', u'name': u'default'}, u'rdbmsIsAllTables': False, u'rdbmsDatabaseNames': [],
u'hasStreamSelected': False, u'channelSourcePath': u'/var/log/hue-httpd/access_log',
u'channelSourceHosts': [], u'show': True, u'streamObjects': [], u'streamPassword': u'',
u'tablesNames': [], u'sampleCols': [{u'operations': [], u'comment': u'', u'unique': False,
u'name': u'Before', u'level': 0, u'keyType': u'string',
u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'keep': True, u'type': u'string',
u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False,
u'name': u'old_Chinese_name', u'level': 0, u'keyType':
u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'keep': True, u'type': u'string',
u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False,
u'name': u'After', u'level': 0, u'keyType': u'string',
u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'keep': True, u'type': u'string',
u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False,
u'name': u'new_Chinese_name', u'level': 0, u'keyType':
u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'keep': True, u'type': u'string',
u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False,
u'name': u'Renamed_date', u'level': 0, u'keyType': u'string',
u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'',
u'multiValued': False, u'keep': True, u'type': u'string',
u'showProperties': False, u'scale': 0}], u'rdbmsDatabaseName': u'',
u'sourceType': u'hive', u'inputFormat': u'file', u'format': {u'status': 0, u'fieldSeparator': u',',
u'hasHeader': True, u'quoteChar': u'"',
u'recordSeparator': u'\\n', u'type': u'csv'},
u'connectorList': [{u'name': u'Salesforce', u'value': u'sfdc'}], u'kafkaFieldDelimiter': u',',
u'rdbmsPort': u'', u'rdbmsTablesExclude': [], u'isFetchingDriverNames': False, u'publicStreams':
[{u'name': u'Kafka Topics', u'value': u'kafka'}, {u'name': u'Flume Agent', u'value': u'flume'}],
u'channelSourceTypes': [{u'name': u'Directory or File', u'value': u'directory'},
{u'name': u'Program', u'value': u'exec'},
{u'name': u'Syslogs', u'value': u'syslogs'},
{u'name': u'HTTP', u'value': u'http'}],
u'databaseName': u'default', u'inputFormats': [{u'name': u'File', u'value': u'file'},
{u'name': u'External Database', u'value': u'rdbms'},
{u'name': u'Manually', u'value': u'manual'}],
u'path': u'/user/admin/renamed_chinese_cities_gb2312.csv', u'streamToken': u'', u'kafkaFieldNames': u'',
u'streamSelection': u'kafka', u'compute': {u'credentials': {}, u'type': u'direct',
u'id': u'default', u'name': u'default'},
u'name': u'', u'kafkaFieldSchemaPath': u'', u'kafkaTopics': [], u'rdbmsJdbcDriver': u'',
u'rdbmsHostname': u'', u'isFetchingTableNames': False, u'rdbmsType': None, u'inputFormatsAll':
[{u'name': u'File', u'value': u'file'}, {u'name': u'External Database', u'value': u'rdbms'},
{u'name': u'Manually', u'value': u'manual'}], u'rdbmsTableNames': [],
u'streamEndpointUrl': u'https://login.salesforce.com/services/Soap/u/42.0', u'kafkaSelectedTopics': u''}
destination = {u'isTransactionalVisible': True, u'KUDU_DEFAULT_PARTITION_COLUMN':
{u'int_val': 16, u'name': u'HASH', u'columns': [], u'range_partitions':
[{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=',
u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'namespaces':
[{u'status': u'CREATED', u'computes': [{u'credentials': {}, u'type': u'direct', u'id': u'default', u'name': u'default'}],
u'id': u'default', u'name': u'default'}], u'isTargetChecking': False, 'ouputFormat': u'table',
u'tableName': u'renamed_chinese_cities_gb2312', u'outputFormatsList':
[{u'name': u'Table', u'value': u'table'}, {u'name': u'Search index', u'value': u'index'},
{u'name': u'Database', u'value': u'database'}, {u'name': u'Folder', u'value': u'file'},
{u'name': u'HBase Table', u'value': u'hbase'}],
u'fieldEditorPlaceHolder': u'Example: SELECT * FROM [object Promise]', u'indexerDefaultField': [],
u'fieldEditorValue':
u'SELECT Before,\n old_Chinese_name,\n After,\n new_Chinese_name,\n Renamed_date\n FROM [object Promise];',
u'customRegexp': u'', u'customLineDelimiter': u'\\n', u'isTargetExisting': False,
u'customEnclosedByDelimiter': u"'", u'indexerConfigSets': [], u'sourceType': u'hive',
u'useCustomDelimiters': False, u'apiHelperType': u'hive', u'numMappers': 1,
u'fieldEditorDatabase': u'default', u'namespace': {u'status': u'CREATED', u'computes':
[{u'credentials': {}, u'type': u'direct', u'id': u'default', u'name': u'default'}], u'id': u'default', u'name': u'default'},
u'indexerPrimaryKeyObject': [], u'kuduPartitionColumns': [], u'rdbmsFileOutputFormats':
[{u'name': u'text', u'value': u'text'}, {u'name': u'sequence', u'value': u'sequence'},
{u'name': u'avro', u'value': u'avro'}], u'outputFormats': [{u'name': u'Table', u'value': u'table'},
{u'name': u'Search index', u'value': u'index'}],
u'fieldEditorEnabled': False, u'indexerDefaultFieldObject': [],
u'customMapDelimiter': u'', u'partitionColumns': [], u'rdbmsFileOutputFormat': u'text',
u'showProperties': False, u'isTransactional': True, u'useDefaultLocation': True, u'description': u'',
u'customFieldsDelimiter': u',', u'primaryKeyObjects': [], u'customFieldDelimiter': u',',
u'rdbmsSplitByColumn': [], u'existingTargetUrl': u'', u'channelSinkTypes':
[{u'name': u'This topic', u'value': u'kafka'}, {u'name': u'Solr', u'value': u'solr'},
{u'name': u'HDFS', u'value': u'hdfs'}], u'defaultName': u'default.renamed_chinese_cities_gb2312',
u'isTransactionalUpdateEnabled': False, u'importData': True, u'databaseName': u'default',
u'indexerRunJob': False, u'indexerReplicationFactor': 1, u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN':
{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=',
u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [], u'indexerConfigSet': u'',
u'sqoopJobLibPaths': [{u'path': u''}], u'outputFormat': u'table',
u'nonDefaultLocation': u'/user/admin/renamed_chinese_cities_gb2312.csv',
u'compute': {u'credentials': {}, u'type': u'direct', u'id': u'default', u'name': u'default'},
u'name': u'default.renamed_chinese_cities_gb2312', u'tableFormat': u'text', u'isInsertOnly': True,
u'targetNamespaceId': u'default', u'bulkColumnNames': u'Before,old_Chinese_name,After,new_Chinese_name,Renamed_date',
u'columns': [{u'operations': [], u'comment': u'', u'unique': False, u'name': u'Before', u'level': 0,
u'keyType': u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'keep': True, u'type': u'string', u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False, u'name': u'old_Chinese_name',
u'level': 0, u'keyType': u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'keep': True, u'type': u'string', u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False, u'name': u'After', u'level': 0,
u'keyType': u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'keep': True, u'type': u'string', u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False, u'name': u'new_Chinese_name',
u'level': 0, u'keyType': u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'keep': True, u'type': u'string', u'showProperties': False, u'scale': 0},
{u'operations': [], u'comment': u'', u'unique': False, u'name': u'Renamed_date',
u'level': 0, u'keyType': u'string', u'required': False, u'precision': 10, u'nested': [],
u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False,
u'keep': True, u'type': u'string', u'showProperties': False, u'scale': 0}],
u'hasHeader': True, u'indexerPrimaryKey': [], u'tableFormats':
[{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'},
{u'name': u'Csv', u'value': u'csv'}, {u'name': u'Avro', u'value': u'avro'},
{u'name': u'Json', u'value': u'json'}, {u'name': u'Regexp', u'value': u'regexp'},
{u'name': u'ORC', u'value': u'orc'}], u'customCollectionDelimiter': u'', u'indexerNumShards': 1,
u'useFieldEditor': False, u'indexerJobLibPath': u'/tmp/smart_indexer_lib'}
file_encoding = u'gb2312'
path = {
'isDir': False,
'split': ('/user/admin', 'renamed_chinese_cities_gb2312.csv'),
'listdir': ['/user/admin/data'],
'parent_path': '/user/admin/.scratchdir/03d184ad-dd11-4ae1-aace-378daaa094e5/renamed_chinese_cities_gb2312.csv/..'
}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination, start_time=-1,
file_encoding=file_encoding).get_str()
assert_true('''USE default;''' in sql, sql)
statement = '''CREATE TABLE `default`.`hue__tmp_renamed_chinese_cities_gb2312`
(
`Before` string ,
`old_Chinese_name` string ,
`After` string ,
`new_Chinese_name` string ,
`Renamed_date` string ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile TBLPROPERTIES("skip.header.line.count" = "1", "transactional" = "false")
;'''
assert_true(statement in sql, sql)
statement = "LOAD DATA INPATH '/user/admin/renamed_chinese_cities_gb2312.csv' " + \
"INTO TABLE `default`.`hue__tmp_renamed_chinese_cities_gb2312`;"
assert_true(statement in sql, sql)
statement = '''CREATE TABLE `default`.`renamed_chinese_cities_gb2312`
STORED AS TextFile
TBLPROPERTIES("transactional"="true", "transactional_properties"="insert_only")
AS SELECT *
FROM `default`.`hue__tmp_renamed_chinese_cities_gb2312`;'''
assert_true(statement in sql, sql)
statement = '''DROP TABLE IF EXISTS `default`.`hue__tmp_renamed_chinese_cities_gb2312`;'''
assert_true(statement in sql, sql)
statement = '''ALTER TABLE `default`.`renamed_chinese_cities_gb2312` ''' + \
'''SET serdeproperties ("serialization.encoding"="gb2312");'''
assert_true(statement in sql, sql)
def test_create_ddl_with_abfs():
finish = ABFS_CLUSTERS.set_for_testing(
{
'default': {
'fs_defaultfs': 'abfs://[email protected]',
'webhdfs_url': 'https://yingstorage.dfs.core.windows.net'
}
}
)
form_data = {'path': u'abfs://my-data/test_data/cars.csv', 'partition_columns': [], 'overwrite': False}
sql = ''
request = MockRequest(fs=MockFs())
query_server_config = dbms.get_query_server_config(name='impala')
db = dbms.get(request.user, query_server=query_server_config)
try:
sql = "\n\n%s;" % db.load_data('default', 'cars', form_data, None, generate_ddl_only=True)
finally:
finish()
assert_true(u"\'abfs://[email protected]/test_data/cars.csv\'" in sql)
def test_create_table_from_local():
source = {
'path': '',
'sourceType': 'hive'
}
destination = {
'name': 'default.test1',
'columns': [
{'name': 'date', 'type': 'timestamp'},
{'name': 'hour', 'type': 'bigint'},
{'name': 'minute', 'type': 'bigint'},
{'name': 'dep', 'type': 'bigint'},
{'name': 'arr', 'type': 'bigint'},
{'name': 'dep_delay', 'type': 'bigint'},
{'name': 'arr_delay', 'type': 'bigint'},
{'name': 'carrier', 'type': 'string'},
{'name': 'flight', 'type': 'bigint'},
{'name': 'dest', 'type': 'string'},
{'name': 'plane', 'type': 'string'},
{'name': 'cancelled', 'type': 'boolean'},
{'name': 'time', 'type': 'bigint'},
{'name': 'dist', 'type': 'bigint'},
],
'indexerPrimaryKey': [],
'sourceType': 'hive'
}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_local_file(source, destination).get_str()
statement = '''USE default;
CREATE TABLE IF NOT EXISTS default.test1 (
`date` timestamp,
`hour` bigint,
`minute` bigint,
`dep` bigint,
`arr` bigint,
`dep_delay` bigint,
`arr_delay` bigint,
`carrier` string,
`flight` bigint,
`dest` string,
`plane` string,
`cancelled` boolean,
`time` bigint,
`dist` bigint);'''
assert_equal(statement, sql)
def test_create_table_from_local_mysql():
source = {
'path': '/apps/beeswax/data/tables/us_population.csv',
'sourceType': 'mysql',
'format': {'hasHeader': False}
}
destination = {
'name': 'default.test1',
'columns': [
{'name': 'field_1', 'type': 'string'},
{'name': 'field_2', 'type': 'string'},
{'name': 'field_3', 'type': 'bigint'},
],
'sourceType': 'mysql'
}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_local_file(source, destination).get_str()
statement = '''USE default;
CREATE TABLE IF NOT EXISTS default.test1 (
`field_1` VARCHAR(255),
`field_2` VARCHAR(255),
`field_3` bigint);
INSERT INTO default.test1 VALUES ('NY', 'New York', '8143197'), ('CA', 'Los Angeles', '3844829'), \
('IL', 'Chicago', '2842518'), ('TX', 'Houston', '2016582'), ('PA', 'Philadelphia', '1463281'), \
('AZ', 'Phoenix', '1461575'), ('TX', 'San Antonio', '1256509'), ('CA', 'San Diego', '1255540'), \
('TX', 'Dallas', '1213825'), ('CA', 'San Jose', '912332');'''
assert_equal(statement, sql)
``` |
{
"source": "jkilpatr/Kibana-Protector",
"score": 2
} |
#### File: jkilpatr/Kibana-Protector/main.py
```python
from flask import Flask, render_template, request, send_from_directory
from flask_login import LoginManager, login_user, current_user
import requests
import tools
import user
import uuid
application = Flask(__name__, static_url_path='')
application.secret_key = ''
kibana_url = ""
grafana_url = ""
domain = ""
login_manager = LoginManager()
login_manager.init_app(application)
@login_manager.user_loader
def load_user(user_id):
person = user.User(user_id)
return person
@application.route('/<subdir>/<path:path>')
def kibana_get(path, subdir):
#this way we check the db only once
auth = current_user.is_authenticated
if auth:
print "User is authed, proxying to kibana for " + path
response = \
tools.parse_proxy_request(request, \
kibana_url + subdir + "/" + path, \
"get", \
True)
print "Response from Elastic to GET " + str(response)
return tools.send_to_user(response)
else:
return render_template('sorry.html')
@application.route("/<subdir>/<path:path>", methods=["POST"])
def kibana_post(path, subdir):
print "someone is trying to talk to elasticsearch " + path + " <"
auth = current_user.is_authenticated
if auth and not tools.allowed(path, request):
print "data for disallowed post"
print request.data
return render_template('sorry.html')
elif auth:
response = \
tools.parse_proxy_request(request, \
kibana_url + subdir + "/"+ path \
,"post"
,True)
print "Response from Elastic to POST " + str(response)
return tools.send_to_user(response)
else:
print "user is not authed"
return render_template('capcha.html')
@application.route("/<subdir>/<path:path>", methods=["PUT"])
def kibana_put(path, subdir):
print "someone is trying to talk to elasticsearch " + path + " <"
auth = current_user.is_authenticated
if auth and not tools.allowed(path, request):
print "data for disallowed put"
print requests.data
return render_template('sorry.html')
elif auth:
response = \
tools.parse_proxy_request(request, \
kibana_url + subdir + "/"+ path \
,"put"
,True)
print "Response from Elastic to PUT " + str(response)
return tools.send_to_user(response)
else:
return render_template('sorry.html', grafana_url=grafana_url)
@application.route("/<subdir>/<path:path>", methods=["DELETE"])
def kibana_delete(path, subdir):
print "someone is trying to talk to elasticsearch " + path + " <"
auth = current_user.is_authenticated
if auth and not tools.allowed(path, request):
print "data for disallowed delete"
print request.data
return render_template('sorry.html')
elif auth:
response = \
tools.parse_proxy_request(request, \
kibana_url + subdir + "/"+ path \
,"delete"
,True)
print "Response from Elastic to DELETE " + str(response)
return tools.send_to_user(response)
else:
return render_template('sorry.html', grafana_url=grafana_url)
@application.route("/shorten", methods=["POST"])
def kibana_shorten():
print "someone is trying to shorten a link"
auth = current_user.is_authenticated
if auth:
response = \
tools.parse_proxy_request(request, \
kibana_url + "shorten" \
,"post"
,False)
print "Response from Elastic" + str(response)
return tools.send_to_user(response)
else:
return render_template('capcha.html', grafana_url=grafana_url)
@application.route("/goto/<short>")
def kibana_unshorten(short):
auth = current_user.is_authenticated
if auth:
print "short request to proxy " + str(request.headers)
response = \
tools.parse_proxy_request(request, \
kibana_url + "goto/" + short, \
"get", \
True)
print "Response from Elastic to short GET " + str(response.headers)
return tools.send_to_user(response)
else:
return render_template('sorry.html')
@application.route('/')
def default():
print "you have reached the root directory, how can I help you?"
if current_user.is_authenticated:
response = requests.get(kibana_url, stream = True)
return tools.send_to_user(response)
else:
return render_template('capcha.html', grafana_url=grafana_url)
@application.route("/submit", methods=["POST"])
def submit():
print "should only see this when submitting capcha"
if tools.verify_captcha(request.form['g-recaptcha-response']):
# SUCCESS
user_obj = user.User(str(uuid.uuid4()), True)
login_user(user_obj)
return render_template('thanks.html')
pass
else:
# FAILED
return render_template('sorry.html', grafana_url=grafana_url)
pass
#TODO DO NOT use this in prod, very very slow, move to Nginx
# this is *supposedly* acceptible using uwsgi, we shall see
@application.route('/js/<path:path>')
def send_js(path):
return send_from_directory('js', path)
@application.route('/css/<path:path>')
def send_css(path):
return send_from_directory('css', path)
@application.route('/img/<path:path>')
def send_img(path):
return send_from_directory('img', path)
if __name__ == "__main__":
application.run(debug=True)
``` |
{
"source": "jkim117/IWSpring2020",
"score": 2
} |
#### File: IWSpring2020/Combined_Netassay/comb_json_155.py
```python
import json
globalID1T = 0
globalID2T = 0
globalID3T = 0
globalID4T = 0
globalID5T = 0
globalIDT = 0
priority1T = 0
priority2T = 0
priority3T = 0
priority4T = 0
priority5T = 0
globalID1 = 0
globalID2 = 0
globalID3 = 0
globalID4 = 0
globalID5 = 0
globalID = 0
priority1 = 0
priority2 = 0
priority3 = 0
priority4 = 0
priority5 = 0
data = {}
data["target"] = "bmv2"
data["p4info"] = "build/calc2.p4.p4info.txt"
data["bmv2_json"] = "build/calc2.json"
data["table_entries"] = []
def dictSetUpT(partNum):
if (partNum == 1):
partsDict = {
"headers.q1_1.char": [0, 255],
"headers.q1_2.char": [0, 255],
"headers.q1_3.char": [0, 255],
"headers.q1_4.char": [0, 255],
"headers.q1_5.char": [0, 255],
"headers.q1_6.char": [0, 255],
"headers.q1_7.char": [0, 255],
"headers.q1_8.char": [0, 255],
"headers.q1_9.char": [0, 255],
"headers.q1_10.char": [0, 255],
"headers.q1_11.char": [0, 255],
"headers.q1_12.char": [0, 255],
"headers.q1_13.char": [0, 255],
"headers.q1_14.char": [0, 255],
"headers.q1_15.char": [0, 255],
"headers.q1_16.char": [0, 255],
"headers.q1_17.char": [0, 255],
"headers.q1_18.char": [0, 255],
"headers.q1_19.char": [0, 255],
"headers.q1_20.char": [0, 255],
"headers.q1_21.char": [0, 255],
"headers.q1_22.char": [0, 255],
"headers.q1_23.char": [0, 255],
"headers.q1_24.char": [0, 255],
"headers.q1_25.char": [0, 255],
"headers.q1_26.char": [0, 255],
"headers.q1_27.char": [0, 255],
"headers.q1_28.char": [0, 255],
"headers.q1_29.char": [0, 255],
"headers.q1_30.char": [0, 255],
"headers.q1_31.char": [0, 255],
"headers.q1_32.char": [0, 255]
}
return partsDict
elif (partNum == 2):
partsDict = {
"headers.q2_1.char": [0, 255],
"headers.q2_2.char": [0, 255],
"headers.q2_3.char": [0, 255],
"headers.q2_4.char": [0, 255],
"headers.q2_5.char": [0, 255],
"headers.q2_6.char": [0, 255],
"headers.q2_7.char": [0, 255],
"headers.q2_8.char": [0, 255],
"headers.q2_9.char": [0, 255],
"headers.q2_10.char": [0, 255],
"headers.q2_11.char": [0, 255],
"headers.q2_12.char": [0, 255],
"headers.q2_13.char": [0, 255],
"headers.q2_14.char": [0, 255],
"headers.q2_15.char": [0, 255],
"headers.q2_16.char": [0, 255],
"headers.q2_17.char": [0, 255],
"headers.q2_18.char": [0, 255],
"headers.q2_19.char": [0, 255],
"headers.q2_20.char": [0, 255],
"headers.q2_21.char": [0, 255],
"headers.q2_22.char": [0, 255],
"headers.q2_23.char": [0, 255],
"headers.q2_24.char": [0, 255],
"headers.q2_25.char": [0, 255],
"headers.q2_26.char": [0, 255],
"headers.q2_27.char": [0, 255],
"headers.q2_28.char": [0, 255],
"headers.q2_29.char": [0, 255],
"headers.q2_30.char": [0, 255],
"headers.q2_31.char": [0, 255],
"headers.q2_32.char": [0, 255]
}
return partsDict
elif (partNum == 3):
partsDict = {
"headers.q3_1.char": [0, 255],
"headers.q3_2.char": [0, 255],
"headers.q3_3.char": [0, 255],
"headers.q3_4.char": [0, 255],
"headers.q3_5.char": [0, 255],
"headers.q3_6.char": [0, 255],
"headers.q3_7.char": [0, 255],
"headers.q3_8.char": [0, 255],
"headers.q3_9.char": [0, 255],
"headers.q3_10.char": [0, 255],
"headers.q3_11.char": [0, 255],
"headers.q3_12.char": [0, 255],
"headers.q3_13.char": [0, 255],
"headers.q3_14.char": [0, 255],
"headers.q3_15.char": [0, 255],
"headers.q3_16.char": [0, 255],
"headers.q3_17.char": [0, 255],
"headers.q3_18.char": [0, 255],
"headers.q3_19.char": [0, 255],
"headers.q3_20.char": [0, 255],
"headers.q3_21.char": [0, 255],
"headers.q3_22.char": [0, 255],
"headers.q3_23.char": [0, 255],
"headers.q3_24.char": [0, 255],
"headers.q3_25.char": [0, 255],
"headers.q3_26.char": [0, 255],
"headers.q3_27.char": [0, 255],
"headers.q3_28.char": [0, 255],
"headers.q3_29.char": [0, 255],
"headers.q3_30.char": [0, 255],
"headers.q3_31.char": [0, 255],
"headers.q3_32.char": [0, 255]
}
return partsDict
elif (partNum == 4):
partsDict = {
"headers.q4_1.char": [0, 255],
"headers.q4_2.char": [0, 255],
"headers.q4_3.char": [0, 255],
"headers.q4_4.char": [0, 255],
"headers.q4_5.char": [0, 255],
"headers.q4_6.char": [0, 255],
"headers.q4_7.char": [0, 255],
"headers.q4_8.char": [0, 255],
"headers.q4_9.char": [0, 255],
"headers.q4_10.char": [0, 255],
"headers.q4_11.char": [0, 255],
"headers.q4_12.char": [0, 255],
"headers.q4_13.char": [0, 255],
"headers.q4_14.char": [0, 255],
"headers.q4_15.char": [0, 255],
"headers.q4_17.char": [0, 255],
"headers.q4_18.char": [0, 255],
"headers.q4_19.char": [0, 255],
"headers.q4_20.char": [0, 255],
"headers.q4_21.char": [0, 255],
"headers.q4_22.char": [0, 255],
"headers.q4_23.char": [0, 255],
"headers.q4_24.char": [0, 255],
"headers.q4_25.char": [0, 255],
"headers.q4_26.char": [0, 255],
"headers.q4_27.char": [0, 255],
"headers.q4_28.char": [0, 255],
"headers.q4_29.char": [0, 255],
"headers.q4_30.char": [0, 255],
"headers.q4_31.char": [0, 255],
"headers.q4_32.char": [0, 255]
}
return partsDict
elif (partNum == 5):
partsDict = {
"headers.q5_1.char": [0, 255],
"headers.q5_2.char": [0, 255],
"headers.q5_3.char": [0, 255],
"headers.q5_4.char": [0, 255],
"headers.q5_5.char": [0, 255],
"headers.q5_6.char": [0, 255],
"headers.q5_7.char": [0, 255],
"headers.q5_8.char": [0, 255],
"headers.q5_9.char": [0, 255],
"headers.q5_10.char": [0, 255],
"headers.q5_11.char": [0, 255],
"headers.q5_12.char": [0, 255],
"headers.q5_13.char": [0, 255],
"headers.q5_14.char": [0, 255],
"headers.q5_15.char": [0, 255],
"headers.q5_17.char": [0, 255],
"headers.q5_18.char": [0, 255],
"headers.q5_19.char": [0, 255],
"headers.q5_20.char": [0, 255],
"headers.q5_21.char": [0, 255],
"headers.q5_22.char": [0, 255],
"headers.q5_23.char": [0, 255],
"headers.q5_24.char": [0, 255],
"headers.q5_25.char": [0, 255],
"headers.q5_26.char": [0, 255],
"headers.q5_27.char": [0, 255],
"headers.q5_28.char": [0, 255],
"headers.q5_29.char": [0, 255],
"headers.q5_30.char": [0, 255],
"headers.q5_31.char": [0, 255],
}
return partsDict
return -1
def addPart1ToDictT(part, partsDict):
part_len = len(part)
if (part_len > 32):
print("Domain with part longer than 31 characters")
exit(-1)
for i in range(part_len):
if (i == 0):
partsDict["headers.q1_1.char"] = [part[i], 255]
elif (i == 1):
partsDict["headers.q1_2.char"] = [part[i], 255]
elif (i == 2):
partsDict["headers.q1_3.char"] = [part[i], 255]
elif (i == 3):
partsDict["headers.q1_4.char"] = [part[i], 255]
elif (i == 4):
partsDict["headers.q1_5.char"] = [part[i], 255]
elif (i == 5):
partsDict["headers.q1_6.char"] = [part[i], 255]
elif (i == 6):
partsDict["headers.q1_7.char"] = [part[i], 255]
elif (i == 7):
partsDict["headers.q1_8.char"] = [part[i], 255]
elif (i == 8):
partsDict["headers.q1_9.char"] = [part[i], 255]
elif (i == 9):
partsDict["headers.q1_10.char"] = [part[i], 255]
elif (i == 10):
partsDict["headers.q1_11.char"] = [part[i], 255]
elif (i == 11):
partsDict["headers.q1_12.char"] = [part[i], 255]
elif (i == 12):
partsDict["headers.q1_13.char"] = [part[i], 255]
elif (i == 13):
partsDict["headers.q1_14.char"] = [part[i], 255]
elif (i == 14):
partsDict["headers.q1_15.char"] = [part[i], 255]
elif (i == 15):
partsDict["headers.q1_16.char"] = [part[i], 255]
elif (i == 16):
partsDict["headers.q1_17.char"] = [part[i], 255]
elif (i == 17):
partsDict["headers.q1_18.char"] = [part[i], 255]
elif (i == 18):
partsDict["headers.q1_19.char"] = [part[i], 255]
elif (i == 19):
partsDict["headers.q1_20.char"] = [part[i], 255]
elif (i == 20):
partsDict["headers.q1_21.char"] = [part[i], 255]
elif (i == 21):
partsDict["headers.q1_22.char"] = [part[i], 255]
elif (i == 22):
partsDict["headers.q1_23.char"] = [part[i], 255]
elif (i == 23):
partsDict["headers.q1_24.char"] = [part[i], 255]
elif (i == 24):
partsDict["headers.q1_25.char"] = [part[i], 255]
elif (i == 25):
partsDict["headers.q1_26.char"] = [part[i], 255]
elif (i == 26):
partsDict["headers.q1_27.char"] = [part[i], 255]
elif (i == 27):
partsDict["headers.q1_28.char"] = [part[i], 255]
elif (i == 28):
partsDict["headers.q1_29.char"] = [part[i], 255]
elif (i == 29):
partsDict["headers.q1_30.char"] = [part[i], 255]
elif (i == 30):
partsDict["headers.q1_31.char"] = [part[i], 255]
elif (i == 31):
partsDict["headers.q1_32.char"] = [part[i], 255]
return partsDict
def addPart2ToDictT(part, partsDict):
part_len = len(part)
if (part_len > 32):
print("Domain with part longer than 31 characters")
exit(-1)
for i in range(part_len):
if (i == 0):
partsDict["headers.q2_1.char"] = [part[i], 255]
elif (i == 1):
partsDict["headers.q2_2.char"] = [part[i], 255]
elif (i == 2):
partsDict["headers.q2_3.char"] = [part[i], 255]
elif (i == 3):
partsDict["headers.q2_4.char"] = [part[i], 255]
elif (i == 4):
partsDict["headers.q2_5.char"] = [part[i], 255]
elif (i == 5):
partsDict["headers.q2_6.char"] = [part[i], 255]
elif (i == 6):
partsDict["headers.q2_7.char"] = [part[i], 255]
elif (i == 7):
partsDict["headers.q2_8.char"] = [part[i], 255]
elif (i == 8):
partsDict["headers.q2_9.char"] = [part[i], 255]
elif (i == 9):
partsDict["headers.q2_10.char"] = [part[i], 255]
elif (i == 10):
partsDict["headers.q2_11.char"] = [part[i], 255]
elif (i == 11):
partsDict["headers.q2_12.char"] = [part[i], 255]
elif (i == 12):
partsDict["headers.q2_13.char"] = [part[i], 255]
elif (i == 13):
partsDict["headers.q2_14.char"] = [part[i], 255]
elif (i == 14):
partsDict["headers.q2_15.char"] = [part[i], 255]
elif (i == 15):
partsDict["headers.q2_16.char"] = [part[i], 255]
elif (i == 16):
partsDict["headers.q2_17.char"] = [part[i], 255]
elif (i == 17):
partsDict["headers.q2_18.char"] = [part[i], 255]
elif (i == 18):
partsDict["headers.q2_19.char"] = [part[i], 255]
elif (i == 19):
partsDict["headers.q2_20.char"] = [part[i], 255]
elif (i == 20):
partsDict["headers.q2_21.char"] = [part[i], 255]
elif (i == 21):
partsDict["headers.q2_22.char"] = [part[i], 255]
elif (i == 22):
partsDict["headers.q2_23.char"] = [part[i], 255]
elif (i == 23):
partsDict["headers.q2_24.char"] = [part[i], 255]
elif (i == 24):
partsDict["headers.q2_25.char"] = [part[i], 255]
elif (i == 25):
partsDict["headers.q2_26.char"] = [part[i], 255]
elif (i == 26):
partsDict["headers.q2_27.char"] = [part[i], 255]
elif (i == 27):
partsDict["headers.q2_28.char"] = [part[i], 255]
elif (i == 28):
partsDict["headers.q2_29.char"] = [part[i], 255]
elif (i == 29):
partsDict["headers.q2_30.char"] = [part[i], 255]
elif (i == 30):
partsDict["headers.q2_31.char"] = [part[i], 255]
elif (i == 31):
partsDict["headers.q2_32.char"] = [part[i], 255]
return partsDict
def addPart3ToDictT(part, partsDict):
part_len = len(part)
if (part_len > 32):
print("Domain with part longer than 31 characters")
exit(-1)
for i in range(part_len):
if (i == 0):
partsDict["headers.q3_1.char"] = [part[i], 255]
elif (i == 1):
partsDict["headers.q3_2.char"] = [part[i], 255]
elif (i == 2):
partsDict["headers.q3_3.char"] = [part[i], 255]
elif (i == 3):
partsDict["headers.q3_4.char"] = [part[i], 255]
elif (i == 4):
partsDict["headers.q3_5.char"] = [part[i], 255]
elif (i == 5):
partsDict["headers.q3_6.char"] = [part[i], 255]
elif (i == 6):
partsDict["headers.q3_7.char"] = [part[i], 255]
elif (i == 7):
partsDict["headers.q3_8.char"] = [part[i], 255]
elif (i == 8):
partsDict["headers.q3_9.char"] = [part[i], 255]
elif (i == 9):
partsDict["headers.q3_10.char"] = [part[i], 255]
elif (i == 10):
partsDict["headers.q3_11.char"] = [part[i], 255]
elif (i == 11):
partsDict["headers.q3_12.char"] = [part[i], 255]
elif (i == 12):
partsDict["headers.q3_13.char"] = [part[i], 255]
elif (i == 13):
partsDict["headers.q3_14.char"] = [part[i], 255]
elif (i == 14):
partsDict["headers.q3_15.char"] = [part[i], 255]
elif (i == 15):
partsDict["headers.q3_16.char"] = [part[i], 255]
elif (i == 16):
partsDict["headers.q3_17.char"] = [part[i], 255]
elif (i == 17):
partsDict["headers.q3_18.char"] = [part[i], 255]
elif (i == 18):
partsDict["headers.q3_19.char"] = [part[i], 255]
elif (i == 19):
partsDict["headers.q3_20.char"] = [part[i], 255]
elif (i == 20):
partsDict["headers.q3_21.char"] = [part[i], 255]
elif (i == 21):
partsDict["headers.q3_22.char"] = [part[i], 255]
elif (i == 22):
partsDict["headers.q3_23.char"] = [part[i], 255]
elif (i == 23):
partsDict["headers.q3_24.char"] = [part[i], 255]
elif (i == 24):
partsDict["headers.q3_25.char"] = [part[i], 255]
elif (i == 25):
partsDict["headers.q3_26.char"] = [part[i], 255]
elif (i == 26):
partsDict["headers.q3_27.char"] = [part[i], 255]
elif (i == 27):
partsDict["headers.q3_28.char"] = [part[i], 255]
elif (i == 28):
partsDict["headers.q3_29.char"] = [part[i], 255]
elif (i == 29):
partsDict["headers.q3_30.char"] = [part[i], 255]
elif (i == 30):
partsDict["headers.q3_31.char"] = [part[i], 255]
elif (i == 31):
partsDict["headers.q3_32.char"] = [part[i], 255]
return partsDict
def addPart4ToDictT(part, partsDict):
part_len = len(part)
if (part_len > 32):
print("Domain with part longer than 31 characters")
exit(-1)
for i in range(part_len):
if (i == 0):
partsDict["headers.q4_1.char"] = [part[i], 255]
elif (i == 1):
partsDict["headers.q4_2.char"] = [part[i], 255]
elif (i == 2):
partsDict["headers.q4_3.char"] = [part[i], 255]
elif (i == 3):
partsDict["headers.q4_4.char"] = [part[i], 255]
elif (i == 4):
partsDict["headers.q4_5.char"] = [part[i], 255]
elif (i == 5):
partsDict["headers.q4_6.char"] = [part[i], 255]
elif (i == 6):
partsDict["headers.q4_7.char"] = [part[i], 255]
elif (i == 7):
partsDict["headers.q4_8.char"] = [part[i], 255]
elif (i == 8):
partsDict["headers.q4_9.char"] = [part[i], 255]
elif (i == 9):
partsDict["headers.q4_10.char"] = [part[i], 255]
elif (i == 10):
partsDict["headers.q4_11.char"] = [part[i], 255]
elif (i == 11):
partsDict["headers.q4_12.char"] = [part[i], 255]
elif (i == 12):
partsDict["headers.q4_13.char"] = [part[i], 255]
elif (i == 13):
partsDict["headers.q4_14.char"] = [part[i], 255]
elif (i == 14):
partsDict["headers.q4_15.char"] = [part[i], 255]
elif (i == 15):
partsDict["headers.q4_16.char"] = [part[i], 255]
elif (i == 16):
partsDict["headers.q4_17.char"] = [part[i], 255]
elif (i == 17):
partsDict["headers.q4_18.char"] = [part[i], 255]
elif (i == 18):
partsDict["headers.q4_19.char"] = [part[i], 255]
elif (i == 19):
partsDict["headers.q4_20.char"] = [part[i], 255]
elif (i == 20):
partsDict["headers.q4_21.char"] = [part[i], 255]
elif (i == 21):
partsDict["headers.q4_22.char"] = [part[i], 255]
elif (i == 22):
partsDict["headers.q4_23.char"] = [part[i], 255]
elif (i == 23):
partsDict["headers.q4_24.char"] = [part[i], 255]
elif (i == 24):
partsDict["headers.q4_25.char"] = [part[i], 255]
elif (i == 25):
partsDict["headers.q4_26.char"] = [part[i], 255]
elif (i == 26):
partsDict["headers.q4_27.char"] = [part[i], 255]
elif (i == 27):
partsDict["headers.q4_28.char"] = [part[i], 255]
elif (i == 28):
partsDict["headers.q4_29.char"] = [part[i], 255]
elif (i == 29):
partsDict["headers.q4_30.char"] = [part[i], 255]
elif (i == 30):
partsDict["headers.q4_31.char"] = [part[i], 255]
elif (i == 31):
partsDict["headers.q4_32.char"] = [part[i], 255]
return partsDict
def addPart5ToDictT(part, partsDict):
part_len = len(part)
if (part_len > 31):
print("Domain with part longer than 31 characters")
exit(-1)
for i in range(part_len):
if (i == 0):
partsDict["headers.q5_1.char"] = [part[i], 255]
elif (i == 1):
partsDict["headers.q5_2.char"] = [part[i], 255]
elif (i == 2):
partsDict["headers.q5_3.char"] = [part[i], 255]
elif (i == 3):
partsDict["headers.q5_4.char"] = [part[i], 255]
elif (i == 4):
partsDict["headers.q5_5.char"] = [part[i], 255]
elif (i == 5):
partsDict["headers.q5_6.char"] = [part[i], 255]
elif (i == 6):
partsDict["headers.q5_7.char"] = [part[i], 255]
elif (i == 7):
partsDict["headers.q5_8.char"] = [part[i], 255]
elif (i == 8):
partsDict["headers.q5_9.char"] = [part[i], 255]
elif (i == 9):
partsDict["headers.q5_10.char"] = [part[i], 255]
elif (i == 10):
partsDict["headers.q5_11.char"] = [part[i], 255]
elif (i == 11):
partsDict["headers.q5_12.char"] = [part[i], 255]
elif (i == 12):
partsDict["headers.q5_13.char"] = [part[i], 255]
elif (i == 13):
partsDict["headers.q5_14.char"] = [part[i], 255]
elif (i == 14):
partsDict["headers.q5_15.char"] = [part[i], 255]
elif (i == 15):
partsDict["headers.q5_16.char"] = [part[i], 255]
elif (i == 16):
partsDict["headers.q5_17.char"] = [part[i], 255]
elif (i == 17):
partsDict["headers.q5_18.char"] = [part[i], 255]
elif (i == 18):
partsDict["headers.q5_19.char"] = [part[i], 255]
elif (i == 19):
partsDict["headers.q5_20.char"] = [part[i], 255]
elif (i == 20):
partsDict["headers.q5_21.char"] = [part[i], 255]
elif (i == 21):
partsDict["headers.q5_22.char"] = [part[i], 255]
elif (i == 22):
partsDict["headers.q5_23.char"] = [part[i], 255]
elif (i == 23):
partsDict["headers.q5_24.char"] = [part[i], 255]
elif (i == 24):
partsDict["headers.q5_25.char"] = [part[i], 255]
elif (i == 25):
partsDict["headers.q5_26.char"] = [part[i], 255]
elif (i == 26):
partsDict["headers.q5_27.char"] = [part[i], 255]
elif (i == 27):
partsDict["headers.q5_28.char"] = [part[i], 255]
elif (i == 28):
partsDict["headers.q5_29.char"] = [part[i], 255]
elif (i == 29):
partsDict["headers.q5_30.char"] = [part[i], 255]
elif (i == 30):
partsDict["headers.q5_31.char"] = [part[i], 255]
return partsDict
part5DictT = {}
part4DictT = {}
part3DictT = {}
part2DictT = {}
part1DictT = {}
# If len(parts)==1
def onepartsT(parts):
if parts[0] in part1DictT:
return part1DictT[parts[0]]
if (parts[0] == '*' and '*.' in part1DictT):
return part1DictT['*.']
if (parts[0] == '*.' and '*' in part1DictT):
return part1DictT['*']
global globalID1T
global priority1T
globalID1T = globalID1T + 1
part1DictT[parts[0]] = globalID1T
if (parts[0] == '*' or parts[0] == '*.'):
data["table_entries"].append({
"table": "TopIngress.tlsknown_domain_list_q1",
"match": {},
"action_name": "TopIngress.match_q1",
"priority": 1,
"action_params": {"q1id": globalID1T}
})
return globalID1T
dict_t = dictSetUpT(1)
addPart1ToDictT(parts[0], dict_t)
data["table_entries"].append({
"table": "TopIngress.tlsknown_domain_list_q1",
"match": dict_t,
"action_name": "TopIngress.match_q1",
"priority": priority1T,
"action_params": {"q1id": globalID1T}
})
priority1T = priority1T - 1
return globalID1T
# If len(parts)==2
def twopartsT(parts):
if parts[1] in part2DictT:
return part2DictT[parts[1]]
if (parts[1] == '*' and '*.' in part2DictT):
return part2DictT['*.']
if (parts[1] == '*.' and '*' in part2DictT):
return part2DictT['*']
global globalID2T
global priority2T
globalID2T = globalID2T + 1
part2DictT[parts[1]] = globalID2T
if (parts[1] == '*' or parts[1] == '*.'):
data["table_entries"].append({
"table": "TopIngress.tlsknown_domain_list_q2",
"match": {},
"action_name": "TopIngress.match_q2",
"priority": 1,
"action_params": {"q2id": globalID2T}
})
return globalID2T
dict_t = dictSetUpT(2)
addPart2ToDictT(parts[1], dict_t)
data["table_entries"].append({
"table": "TopIngress.tlsknown_domain_list_q2",
"match": dict_t,
"action_name": "TopIngress.match_q2",
"priority": priority2T,
"action_params": {"q2id": globalID2T}
})
priority2T = priority2T - 1
return globalID2T
# If len(parts)==3
def threepartsT(parts):
if parts[2] in part3DictT:
return part3DictT[parts[2]]
if (parts[2] == '*' and '*.' in part3DictT):
return part3DictT['*.']
if (parts[2] == '*.' and '*' in part3DictT):
return part3DictT['*']
global globalID3T
global priority3T
globalID3T = globalID3T + 1
part3DictT[parts[2]] = globalID3T
if (parts[2] == '*' or parts[2] == '*.'):
data["table_entries"].append({
"table": "TopIngress.tlsknown_domain_list_q3",
"match": {},
"action_name": "TopIngress.match_q3",
"priority": 1,
"action_params": {"q3id": globalID3T}
})
return globalID3T
dict_t = dictSetUpT(3)
addPart3ToDictT(parts[2], dict_t)
data["table_entries"].append({
"table": "TopIngress.tlsknown_domain_list_q3",
"match": dict_t,
"action_name": "TopIngress.match_q3",
"priority": priority3T,
"action_params": {"q3id": globalID3T}
})
priority3T = priority3T - 1
return globalID3T
# If len(parts)==4
def fourpartsT(parts):
if parts[3] in part4DictT:
return part4DictT[parts[3]]
if (parts[3] == '*' and '*.' in part4DictT):
return part4DictT['*.']
if (parts[3] == '*.' and '*' in part4DictT):
return part4DictT['*']
global globalID4T
global priority4T
globalID4T = globalID4T + 1
part4DictT[parts[3]] = globalID4T
if (parts[3] == '*' or parts[3] == '*.'):
data["table_entries"].append({
"table": "TopIngress.tlsknown_domain_list_q4",
"match": {},
"action_name": "TopIngress.match_q4",
"priority": 1,
"action_params": {"q4id": globalID4T}
})
return globalID4T
dict_t = dictSetUpT(4)
addPart4ToDictT(parts[3], dict_t)
data["table_entries"].append({
"table": "TopIngress.tlsknown_domain_list_q4",
"match": dict_t,
"action_name": "TopIngress.match_q4",
"priority": priority4T,
"action_params": {"q4id": globalID4T}
})
priority4T = priority4T - 1
return globalID4T
# If len(parts)==5
def fivepartsT(parts):
if parts[4] in part5DictT:
return part5DictT[parts[4]]
if (parts[4] == '*' and '*.' in part5DictT):
return part5DictT['*.']
if (parts[4] == '*.' and '*' in part5DictT):
return part5DictT['*']
global globalID5T
global priority5T
globalID5T = globalID5T + 1
part5DictT[parts[4]] = globalID5T
if (parts[4] == '*' or parts[4] == '*.'):
data["table_entries"].append({
"table": "TopIngress.tlsknown_domain_list_q5",
"match": {},
"action_name": "TopIngress.match_q5",
"priority": 1,
"action_params": {"q5id": globalID5T}
})
return globalID5T
dict_t = dictSetUpT(5)
addPart5ToDictT(parts[4], dict_t)
data["table_entries"].append({
"table": "TopIngress.tlsknown_domain_list_q5",
"match": dict_t,
"action_name": "TopIngress.match_q5",
"priority": priority5T,
"action_params": {"q5id": globalID5T}
})
priority5T = priority5T - 1
return globalID5T
def creatDomainEntryT(parts):
id5 = fivepartsT(parts)
id4 = fourpartsT(parts)
id3 = threepartsT(parts)
id2 = twopartsT(parts)
id1 = onepartsT(parts)
global globalIDT
globalIDT = globalIDT + 1
idDict = {
"user_metadata.q1_id": id1,
"user_metadata.q2_id": id2,
"user_metadata.q3_id": id3,
"user_metadata.q4_id": id4,
"user_metadata.q5_id": id5
}
data["table_entries"].append({
"table": "TopIngress.tlsmatch_known_domain_list",
"match": idDict,
"action_name": "TopIngress.match_domain",
"action_params": {"id": globalID}
})
def dictSetUp(partNum):
if (partNum == 1):
partsDict = {
"headers.q1_part1.part": [0, 255],
"headers.q1_part2.part": [0, 65535],
"headers.q1_part4.part": [0, 4294967295],
"headers.q1_part8_1.part": [0, 4294967295],
"headers.q1_part8_2.part": [0, 4294967295],
"headers.q1_part16_1.part": [0, 4294967295],
"headers.q1_part16_2.part": [0, 4294967295],
"headers.q1_part16_3.part": [0, 4294967295],
"headers.q1_part16_4.part": [0, 4294967295]
}
return partsDict
elif (partNum == 2):
partsDict = {
"headers.q2_part1.part": [0, 255],
"headers.q2_part2.part": [0, 65535],
"headers.q2_part4.part": [0, 4294967295],
"headers.q2_part8_1.part": [0, 4294967295],
"headers.q2_part8_2.part": [0, 4294967295],
"headers.q2_part16_1.part": [0, 4294967295],
"headers.q2_part16_2.part": [0, 4294967295],
"headers.q2_part16_3.part": [0, 4294967295],
"headers.q2_part16_4.part": [0, 4294967295],
}
return partsDict
elif (partNum == 3):
partsDict = {
"headers.q3_part1.part": [0, 255],
"headers.q3_part2.part": [0, 65535],
"headers.q3_part4.part": [0, 4294967295],
"headers.q3_part8_1.part": [0, 4294967295],
"headers.q3_part8_2.part": [0, 4294967295],
"headers.q3_part16_1.part": [0, 4294967295],
"headers.q3_part16_2.part": [0, 4294967295],
"headers.q3_part16_3.part": [0, 4294967295],
"headers.q3_part16_4.part": [0, 4294967295],
}
return partsDict
elif (partNum == 4):
partsDict = {
"headers.q4_part1.part": [0, 255],
"headers.q4_part2.part": [0, 65535],
"headers.q4_part4.part": [0, 4294967295],
"headers.q4_part8_1.part": [0, 4294967295],
"headers.q4_part8_2.part": [0, 4294967295],
"headers.q4_part16_1.part": [0, 4294967295],
"headers.q4_part16_2.part": [0, 4294967295],
"headers.q4_part16_3.part": [0, 4294967295],
"headers.q4_part16_4.part": [0, 4294967295],
}
return partsDict
elif (partNum == 5):
partsDict = {
"headers.q5_part1.part": [0, 255],
"headers.q5_part2.part": [0, 65535],
"headers.q5_part4.part": [0, 4294967295],
"headers.q5_part8_1.part": [0, 4294967295],
"headers.q5_part8_2.part": [0, 4294967295],
"headers.q5_part16_1.part": [0, 4294967295],
"headers.q5_part16_2.part": [0, 4294967295],
"headers.q5_part16_3.part": [0, 4294967295],
"headers.q5_part16_4.part": [0, 4294967295],
}
return partsDict
return -1
# Outputs a reversed, 5 digit, binary representation
def toReversedBinary(num):
num1 = bin(num)[2::] # cut out 0b prefix
if len(num1) >= 5:
num1 = num1[len(num1)-5:len(num1):]
else:
for i in range(0, 5-len(num1)):
num1 = '0' + num1
return num1[::-1]
def addPart1ToDict(part, partsDict):
if (part == '*'):
partsDict.pop("headers.q1_part1.part")
partsDict.pop("headers.q1_part2.part")
partsDict.pop("headers.q1_part4.part")
partsDict.pop("headers.q1_part8_1.part")
partsDict.pop("headers.q1_part8_2.part")
partsDict.pop("headers.q1_part16_1.part")
partsDict.pop("headers.q1_part16_2.part")
partsDict.pop("headers.q1_part16_3.part")
partsDict.pop("headers.q1_part16_4.part")
return partsDict
part1Spec = toReversedBinary(len(part))
charIndex = 0
if part1Spec[0] == '1':
partsDict["headers.q1_part1.part"] = [part[charIndex], 255]
charIndex = charIndex + 1
if part1Spec[1] == '1':
partsDict["headers.q1_part2.part"] = [part[charIndex:charIndex+2], 65535]
charIndex = charIndex + 2
if part1Spec[2] == '1':
partsDict["headers.q1_part4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part1Spec[3] == '1':
partsDict["headers.q1_part8_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q1_part8_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part1Spec[4] == '1':
partsDict["headers.q1_part16_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q1_part16_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q1_part16_3.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q1_part16_4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
return partsDict
def addPart2ToDict(part, partsDict):
if (part == '*'):
partsDict.pop("headers.q2_part1.part")
partsDict.pop("headers.q2_part2.part")
partsDict.pop("headers.q2_part4.part")
partsDict.pop("headers.q2_part8_1.part")
partsDict.pop("headers.q2_part8_2.part")
partsDict.pop("headers.q2_part16_1.part")
partsDict.pop("headers.q2_part16_2.part")
partsDict.pop("headers.q2_part16_3.part")
partsDict.pop("headers.q2_part16_4.part")
return partsDict
part2Spec = toReversedBinary(len(part))
charIndex = 0
if part2Spec[0] == '1':
partsDict["headers.q2_part1.part"] = [part[charIndex], 255]
charIndex = charIndex + 1
if part2Spec[1] == '1':
partsDict["headers.q2_part2.part"] = [part[charIndex:charIndex+2], 65535]
charIndex = charIndex + 2
if part2Spec[2] == '1':
partsDict["headers.q2_part4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part2Spec[3] == '1':
partsDict["headers.q2_part8_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q2_part8_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part2Spec[4] == '1':
partsDict["headers.q2_part16_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q2_part16_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q2_part16_3.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q2_part16_4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
return partsDict
def addPart3ToDict(part, partsDict):
if (part == '*'):
partsDict.pop("headers.q3_part1.part")
partsDict.pop("headers.q3_part2.part")
partsDict.pop("headers.q3_part4.part")
partsDict.pop("headers.q3_part8_1.part")
partsDict.pop("headers.q3_part8_2.part")
partsDict.pop("headers.q3_part16_1.part")
partsDict.pop("headers.q3_part16_2.part")
partsDict.pop("headers.q3_part16_3.part")
partsDict.pop("headers.q3_part16_4.part")
return partsDict
part3Spec = toReversedBinary(len(part))
charIndex = 0
if part3Spec[0] == '1':
partsDict["headers.q3_part1.part"] = [part[charIndex], 255]
charIndex = charIndex + 1
if part3Spec[1] == '1':
partsDict["headers.q3_part2.part"] = [part[charIndex:charIndex+2], 65535]
charIndex = charIndex + 2
if part3Spec[2] == '1':
partsDict["headers.q3_part4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part3Spec[3] == '1':
partsDict["headers.q3_part8_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q3_part8_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part3Spec[4] == '1':
partsDict["headers.q3_part16_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q3_part16_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q3_part16_3.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q3_part16_4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
return partsDict
def addPart4ToDict(part, partsDict):
if (part == '*'):
partsDict.pop("headers.q4_part1.part")
partsDict.pop("headers.q4_part2.part")
partsDict.pop("headers.q4_part4.part")
partsDict.pop("headers.q4_part8_1.part")
partsDict.pop("headers.q4_part8_2.part")
partsDict.pop("headers.q4_part16_1.part")
partsDict.pop("headers.q4_part16_2.part")
partsDict.pop("headers.q4_part16_3.part")
partsDict.pop("headers.q4_part16_4.part")
return partsDict
part4Spec = toReversedBinary(len(part))
charIndex = 0
if part4Spec[0] == '1':
partsDict["headers.q4_part1.part"] = [part[charIndex], 255]
charIndex = charIndex + 1
if part4Spec[1] == '1':
partsDict["headers.q4_part2.part"] = [part[charIndex:charIndex+2], 65535]
charIndex = charIndex + 2
if part4Spec[2] == '1':
partsDict["headers.q4_part4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part4Spec[3] == '1':
partsDict["headers.q4_part8_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q4_part8_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part4Spec[4] == '1':
partsDict["headers.q4_part16_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q4_part16_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q4_part16_3.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q4_part16_4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
return partsDict
def addPart5ToDict(part, partsDict):
if (part == '*'):
partsDict.pop("headers.q5_part1.part")
partsDict.pop("headers.q5_part2.part")
partsDict.pop("headers.q5_part4.part")
partsDict.pop("headers.q5_part8_1.part")
partsDict.pop("headers.q5_part8_2.part")
partsDict.pop("headers.q5_part16_1.part")
partsDict.pop("headers.q5_part16_2.part")
partsDict.pop("headers.q5_part16_3.part")
partsDict.pop("headers.q5_part16_4.part")
return partsDict
part5Spec = toReversedBinary(len(part))
charIndex = 0
if part5Spec[0] == '1':
partsDict["headers.q5_part1.part"] = [part[charIndex], 255]
charIndex = charIndex + 1
if part5Spec[1] == '1':
partsDict["headers.q5_part2.part"] = [part[charIndex:charIndex+2], 65535]
charIndex = charIndex + 2
if part5Spec[2] == '1':
partsDict["headers.q5_part4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part5Spec[3] == '1':
partsDict["headers.q5_part8_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q5_part8_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part5Spec[4] == '1':
partsDict["headers.q5_part16_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q5_part16_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q5_part16_3.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q5_part16_4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
return partsDict
part5Dict = {}
part4Dict = {}
part3Dict = {}
part2Dict = {}
part1Dict = {}
# If len(parts)==1
def oneparts(parts):
if parts[0] in part1Dict:
return part1Dict[parts[0]]
global globalID1
global priority1
globalID1 = globalID1 + 1
part1Dict[parts[0]] = globalID1
dict_t = dictSetUp(1)
addPart1ToDict(parts[0], dict_t)
if (parts[0] == '*'):
data["table_entries"].append({
"table": "TopIngress.dnsknown_domain_list_q1",
"match": dict_t,
"action_name": "TopIngress.match_q1",
"priority": 1,
"action_params": {"q1id": globalID1}
})
return globalID1
data["table_entries"].append({
"table": "TopIngress.dnsknown_domain_list_q1",
"match": dict_t,
"action_name": "TopIngress.match_q1",
"priority": priority1,
"action_params": {"q1id": globalID1}
})
priority1 = priority1 - 1
return globalID1
# If len(parts)==2
def twoparts(parts):
if parts[1] in part2Dict:
return part2Dict[parts[1]]
global globalID2
global priority2
globalID2 = globalID2 + 1
part2Dict[parts[1]] = globalID2
dict_t = dictSetUp(2)
addPart2ToDict(parts[1], dict_t)
if (parts[1] == '*'):
data["table_entries"].append({
"table": "TopIngress.dnsknown_domain_list_q2",
"match": dict_t,
"action_name": "TopIngress.match_q2",
"priority": 1,
"action_params": {"q2id": globalID2}
})
return globalID2
data["table_entries"].append({
"table": "TopIngress.dnsknown_domain_list_q2",
"match": dict_t,
"action_name": "TopIngress.match_q2",
"priority": priority2,
"action_params": {"q2id": globalID2}
})
priority2 = priority2 - 1
return globalID2
# If len(parts)==3
def threeparts(parts):
if parts[2] in part3Dict:
return part3Dict[parts[2]]
global globalID3
global priority3
globalID3 = globalID3 + 1
part3Dict[parts[2]] = globalID3
dict_t = dictSetUp(3)
addPart3ToDict(parts[2], dict_t)
if (parts[2] == '*'):
data["table_entries"].append({
"table": "TopIngress.dnsknown_domain_list_q3",
"match": dict_t,
"action_name": "TopIngress.match_q3",
"priority": 1,
"action_params": {"q3id": globalID3}
})
return globalID3
data["table_entries"].append({
"table": "TopIngress.dnsknown_domain_list_q3",
"match": dict_t,
"action_name": "TopIngress.match_q3",
"priority": priority3,
"action_params": {"q3id": globalID3}
})
priority3 = priority3 - 1
return globalID3
# If len(parts)==4
def fourparts(parts):
if parts[3] in part4Dict:
return part4Dict[parts[3]]
global globalID4
global priority4
globalID4 = globalID4 + 1
part4Dict[parts[3]] = globalID4
dict_t = dictSetUp(4)
addPart4ToDict(parts[3], dict_t)
if (parts[3] == '*'):
data["table_entries"].append({
"table": "TopIngress.dnsknown_domain_list_q4",
"match": dict_t,
"action_name": "TopIngress.match_q4",
"priority": 1,
"action_params": {"q4id": globalID4}
})
return globalID4
data["table_entries"].append({
"table": "TopIngress.dnsknown_domain_list_q4",
"match": dict_t,
"action_name": "TopIngress.match_q4",
"priority": priority4,
"action_params": {"q4id": globalID4}
})
priority4 = priority4 - 1
return globalID4
# If len(parts)==5
def fiveparts(parts):
if parts[4] in part5Dict:
return part5Dict[parts[4]]
global globalID5
global priority5
globalID5 = globalID5 + 1
part5Dict[parts[4]] = globalID5
dict_t = dictSetUp(5)
addPart5ToDict(parts[4], dict_t)
if (parts[4] == '*'):
data["table_entries"].append({
"table": "TopIngress.dnsknown_domain_list_q5",
"match": dict_t,
"action_name": "TopIngress.match_q5",
"priority": 1,
"action_params": {"q5id": globalID5}
})
return globalID5
data["table_entries"].append({
"table": "TopIngress.dnsknown_domain_list_q5",
"match": dict_t,
"action_name": "TopIngress.match_q5",
"priority": priority5,
"action_params": {"q5id": globalID5}
})
priority5 = priority5 - 1
return globalID5
def creatDomainEntry(parts):
id5 = fiveparts(parts)
id4 = fourparts(parts)
id3 = threeparts(parts)
id2 = twoparts(parts)
id1 = oneparts(parts)
global globalID
globalID = globalID + 1
idDict = {
"user_metadata.q1_id": id1,
"user_metadata.q2_id": id2,
"user_metadata.q3_id": id3,
"user_metadata.q4_id": id4,
"user_metadata.q5_id": id5
}
data["table_entries"].append({
"table": "TopIngress.dnsmatch_known_domain_list",
"match": idDict,
"action_name": "TopIngress.match_domain",
"action_params": {"id": globalID}
})
def addDomainToTable(domain):
parts = domain.split('.')
numParts = len(parts)
if numParts > 5:
print("error: " + domain)
return -1
if numParts == 1:
parts.append('')
parts.append('')
parts.append('')
parts.append('')
creatDomainEntry(parts)
creatDomainEntryT(parts)
elif numParts == 2:
parts.append('')
parts.append('')
parts.append('')
creatDomainEntry(parts)
parts[0] = parts[0] + '.'
creatDomainEntryT(parts)
elif numParts == 3:
parts.append('')
parts.append('')
creatDomainEntry(parts)
parts[0] = parts[0] + '.'
parts[1] = parts[1] + '.'
creatDomainEntryT(parts)
elif numParts == 4:
parts.append('')
creatDomainEntry(parts)
parts[0] = parts[0] + '.'
parts[1] = parts[1] + '.'
parts[2] = parts[2] + '.'
creatDomainEntryT(parts)
elif numParts == 5:
creatDomainEntry(parts)
parts[0] = parts[0] + '.'
parts[1] = parts[1] + '.'
parts[2] = parts[2] + '.'
parts[3] = parts[3] + '.'
creatDomainEntryT(parts)
def addBannedIpToTable(ip):
ipList = ip.split('/')
if (len(ipList) == 2):
mask = int(ipList[1])
elif (len(ipList) == 1):
mask = 32
else:
exit(-1)
ipaddr = ipList[0]
ip_dict = {
"headers.ipv4.dst": [ipaddr, mask]
}
ip_dict['headers.ipv4.dst']
data["table_entries"].append({
"table": "TopIngress.banned_dns_dst",
"match": ip_dict,
"action_name": "TopIngress.match_banned_dns_dst",
"action_params": {}
})
def addAllowedIpToTable(ip):
ipList = ip.split('/')
if (len(ipList) == 2):
mask = int(ipList[1])
elif (len(ipList) == 1):
mask = 32
else:
exit(-1)
ipaddr = ipList[0]
ip_dict = {
"headers.ipv4.dst": [ipaddr, mask]
}
ip_dict['headers.ipv4.dst']
data["table_entries"].append({
"table": "TopIngress.allowable_dns_dst",
"match": ip_dict,
"action_name": "NoAction",
"action_params": {}
})
knownlist = open('known_domains.txt', 'r')
domains = knownlist.read().split()
knownlist.close()
priority1T = len(domains) + 1
priority2T = len(domains) + 1
priority3T = len(domains) + 1
priority4T = len(domains) + 1
priority5T = len(domains) + 1
for d in domains:
addDomainToTable(d)
bannedlist = open('banned_dns_dst.txt', 'r')
bannedip = bannedlist.read().split()
bannedlist.close()
data["table_entries"].append({
"table": "TopIngress.banned_dns_dst",
"default_action": True,
"action_name": "NoAction",
"action_params": {}
})
for ip in bannedip:
addBannedIpToTable(ip)
allowedlist = open('allowed_dns_dst.txt', 'r')
allowedip = allowedlist.read().split()
allowedlist.close()
data["table_entries"].append({
"table": "TopIngress.allowable_dns_dst",
"default_action": True,
"action_name": "TopIngress.match_banned_dns_dst",
"action_params": {}
})
for ip in allowedip:
addAllowedIpToTable(ip)
with open('s1-runtime.json', 'w') as outFile:
json.dump(data, outFile, indent='\t')
```
#### File: IWSpring2020/DNS_Netassay/domain_name_filter.py
```python
from sys import argv
import csv
def matchDomain(known, domain):
knownparts = known.split('.')
domainparts = domain.split('.')
if len(knownparts) != len(domainparts):
return False
for i in range(0, len(knownparts)):
if (knownparts[i] == '*'):
continue
if (knownparts[i] != domainparts[i]):
return False
return True
# parse the command line argument and open the file specified
if __name__ == '__main__':
if len(argv) != 3:
print('usage: python domain_name_filter.py csv_file known_domains.txt')
exit(-1)
knownlist = open(argv[2], 'r')
domains = knownlist.read().split()
knownlist.close()
with open('out_domains.csv', 'w') as csvwritefile:
csvwriter = csv.writer(csvwritefile)
with open(argv[1], 'r') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
if row[0] == 'Domain':
continue
found = False
for d in domains:
if (matchDomain(d, row[0])):
found = True
break
if (found == False and float(row[2]) > 0):
csvwriter.writerow(row)
```
#### File: IWSpring2020/DNS_Netassay/knownlist_json_60.py
```python
import json
globalID = 0
globalPriority = 0
data = {}
data["target"] = "bmv2"
data["p4info"] = "build/calc2.p4.p4info.txt"
data["bmv2_json"] = "build/calc2.json"
data["table_entries"] = []
def dictSetUp():
partsDict = {
"headers.q1_part1.part": [0, 255],
"headers.q1_part2.part": [0, 65535],
"headers.q1_part4.part": [0, 4294967295],
"headers.q1_part8_1.part": [0, 4294967295],
"headers.q1_part8_2.part": [0, 4294967295],
"headers.q2_part1.part": [0, 255],
"headers.q2_part2.part": [0, 65535],
"headers.q2_part4.part": [0, 4294967295],
"headers.q2_part8_1.part": [0, 4294967295],
"headers.q2_part8_2.part": [0, 4294967295],
"headers.q3_part1.part": [0, 255],
"headers.q3_part2.part": [0, 65535],
"headers.q3_part4.part": [0, 4294967295],
"headers.q3_part8_1.part": [0, 4294967295],
"headers.q3_part8_2.part": [0, 4294967295],
"headers.q4_part1.part": [0, 255],
"headers.q4_part2.part": [0, 65535],
"headers.q4_part4.part": [0, 4294967295],
"headers.q4_part8_1.part": [0, 4294967295],
"headers.q4_part8_2.part": [0, 4294967295]
}
return partsDict
# Outputs a reversed, 5 digit, binary representation
def toReversedBinary(num):
num1 = bin(num)[2::] # cut out 0b prefix
if len(num1) >= 5:
num1 = num1[len(num1)-5:len(num1):]
else:
for i in range(0, 5-len(num1)):
num1 = '0' + num1
return num1[::-1]
def addPart1ToDict(part, partsDict):
if (part == '*'):
partsDict.pop("headers.q1_part1.part")
partsDict.pop("headers.q1_part2.part")
partsDict.pop("headers.q1_part4.part")
partsDict.pop("headers.q1_part8_1.part")
partsDict.pop("headers.q1_part8_2.part")
return partsDict
part1Spec = toReversedBinary(len(part))
charIndex = 0
if part1Spec[0] == '1':
partsDict["headers.q1_part1.part"] = [part[charIndex], 255]
charIndex = charIndex + 1
if part1Spec[1] == '1':
partsDict["headers.q1_part2.part"] = [part[charIndex:charIndex+2], 65535]
charIndex = charIndex + 2
if part1Spec[2] == '1':
partsDict["headers.q1_part4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part1Spec[3] == '1':
partsDict["headers.q1_part8_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q1_part8_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
return partsDict
def addPart2ToDict(part, partsDict):
if (part == '*'):
partsDict.pop("headers.q2_part1.part")
partsDict.pop("headers.q2_part2.part")
partsDict.pop("headers.q2_part4.part")
partsDict.pop("headers.q2_part8_1.part")
partsDict.pop("headers.q2_part8_2.part")
return partsDict
part2Spec = toReversedBinary(len(part))
charIndex = 0
if part2Spec[0] == '1':
partsDict["headers.q2_part1.part"] = [part[charIndex], 255]
charIndex = charIndex + 1
if part2Spec[1] == '1':
partsDict["headers.q2_part2.part"] = [part[charIndex:charIndex+2], 65535]
charIndex = charIndex + 2
if part2Spec[2] == '1':
partsDict["headers.q2_part4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part2Spec[3] == '1':
partsDict["headers.q2_part8_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q2_part8_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
return partsDict
def addPart3ToDict(part, partsDict):
if (part == '*'):
partsDict.pop("headers.q3_part1.part")
partsDict.pop("headers.q3_part2.part")
partsDict.pop("headers.q3_part4.part")
partsDict.pop("headers.q3_part8_1.part")
partsDict.pop("headers.q3_part8_2.part")
return partsDict
part3Spec = toReversedBinary(len(part))
charIndex = 0
if part3Spec[0] == '1':
partsDict["headers.q3_part1.part"] = [part[charIndex], 255]
charIndex = charIndex + 1
if part3Spec[1] == '1':
partsDict["headers.q3_part2.part"] = [part[charIndex:charIndex+2], 65535]
charIndex = charIndex + 2
if part3Spec[2] == '1':
partsDict["headers.q3_part4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part3Spec[3] == '1':
partsDict["headers.q3_part8_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q3_part8_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
return partsDict
def addPart4ToDict(part, partsDict):
if (part == '*'):
partsDict.pop("headers.q4_part1.part")
partsDict.pop("headers.q4_part2.part")
partsDict.pop("headers.q4_part4.part")
partsDict.pop("headers.q4_part8_1.part")
partsDict.pop("headers.q4_part8_2.part")
return partsDict
part4Spec = toReversedBinary(len(part))
charIndex = 0
if part4Spec[0] == '1':
partsDict["headers.q4_part1.part"] = [part[charIndex], 255]
charIndex = charIndex + 1
if part4Spec[1] == '1':
partsDict["headers.q4_part2.part"] = [part[charIndex:charIndex+2], 65535]
charIndex = charIndex + 2
if part4Spec[2] == '1':
partsDict["headers.q4_part4.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
if part4Spec[3] == '1':
partsDict["headers.q4_part8_1.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
partsDict["headers.q4_part8_2.part"] = [part[charIndex:charIndex+4], 4294967295]
charIndex = charIndex + 4
return partsDict
# If len(parts)==1
def oneparts(parts):
global globalID
global globalPriority
globalID = globalID + 1
dict_t = dictSetUp()
addPart1ToDict(parts[0], dict_t)
data["table_entries"].append({
"table": "TopIngress.known_domain_list",
"match": dict_t,
"action_name": "TopIngress.match_domain",
"priority": globalPriority,
"action_params": {"id": globalID}
})
globalPriority = globalPriority - 1
return globalID
# If len(parts)==2
def twoparts(parts):
global globalID
global globalPriority
globalID = globalID + 1
dict_t = dictSetUp()
addPart1ToDict(parts[0], dict_t)
addPart2ToDict(parts[1], dict_t)
data["table_entries"].append({
"table": "TopIngress.known_domain_list",
"match": dict_t,
"action_name": "TopIngress.match_domain",
"priority": globalPriority,
"action_params": {"id": globalID}
})
globalPriority = globalPriority - 1
return globalID
# If len(parts)==3
def threeparts(parts):
global globalID
global globalPriority
globalID = globalID + 1
dict_t = dictSetUp()
addPart1ToDict(parts[0], dict_t)
addPart2ToDict(parts[1], dict_t)
addPart3ToDict(parts[2], dict_t)
data["table_entries"].append({
"table": "TopIngress.known_domain_list",
"match": dict_t,
"action_name": "TopIngress.match_domain",
"priority": globalPriority,
"action_params": {"id": globalID}
})
globalPriority = globalPriority - 1
return globalID
# If len(parts)==4
def fourparts(parts):
global globalID
global globalPriority
globalID = globalID + 1
dict_t = dictSetUp()
addPart1ToDict(parts[0], dict_t)
addPart2ToDict(parts[1], dict_t)
addPart3ToDict(parts[2], dict_t)
addPart4ToDict(parts[3], dict_t)
data["table_entries"].append({
"table": "TopIngress.known_domain_list",
"match": dict_t,
"action_name": "TopIngress.match_domain",
"priority": globalPriority,
"action_params": {"id": globalID}
})
globalPriority = globalPriority - 1
return globalID
def addDomainToTable(domain):
parts = domain.split('.')
numParts = len(parts)
if numParts > 4:
print("error: " + domain)
return -1
if numParts == 1:
oneparts(parts)
elif numParts == 2:
twoparts(parts)
elif numParts == 3:
threeparts(parts)
elif numParts == 4:
fourparts(parts)
def addBannedIpToTable(ip):
ipList = ip.split('/')
if (len(ipList) == 2):
mask = int(ipList[1])
elif (len(ipList) == 1):
mask = 32
else:
exit(-1)
ipaddr = ipList[0]
ip_dict = {
"headers.ipv4.dst": [ipaddr, mask]
}
ip_dict['headers.ipv4.dst']
data["table_entries"].append({
"table": "TopIngress.banned_dns_dst",
"match": ip_dict,
"action_name": "TopIngress.match_banned_dns_dst",
"action_params": {}
})
def addAllowedIpToTable(ip):
ipList = ip.split('/')
if (len(ipList) == 2):
mask = int(ipList[1])
elif (len(ipList) == 1):
mask = 32
else:
exit(-1)
ipaddr = ipList[0]
ip_dict = {
"headers.ipv4.dst": [ipaddr, mask]
}
ip_dict['headers.ipv4.dst']
data["table_entries"].append({
"table": "TopIngress.allowable_dns_dst",
"match": ip_dict,
"action_name": "NoAction",
"action_params": {}
})
knownlist = open('known_domains.txt', 'r')
domains = knownlist.read().split()
knownlist.close()
globalPriority = len(domains)
for d in domains:
addDomainToTable(d)
bannedlist = open('banned_dns_dst.txt', 'r')
bannedip = bannedlist.read().split()
bannedlist.close()
data["table_entries"].append({
"table": "TopIngress.banned_dns_dst",
"default_action": True,
"action_name": "NoAction",
"action_params": {}
})
for ip in bannedip:
addBannedIpToTable(ip)
allowedlist = open('allowed_dns_dst.txt', 'r')
allowedip = allowedlist.read().split()
allowedlist.close()
data["table_entries"].append({
"table": "TopIngress.allowable_dns_dst",
"default_action": True,
"action_name": "TopIngress.match_banned_dns_dst",
"action_params": {}
})
for ip in allowedip:
addAllowedIpToTable(ip)
with open('s1-runtime.json', 'w') as outFile:
json.dump(data, outFile, indent='\t')
```
#### File: PaperResults/parser/p4_graphs_full.py
```python
import operator
from sys import argv
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import csv
true_dns_total = 0
true_packets_total = 0
true_bytes_total = 0
with open('unlimited_15min_full.csv') as csvfile:
#with open('unlimited0000.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[0] == 'Domain':
continue
true_dns_total += float(row[1])
true_packets_total += float(row[3])
true_bytes_total += float(row[4])
avg_dns = 0
avg_packet = 0
avg_byte = 0
num = 0
with open ('unlimited_percent_15min.csv', 'w') as csvout:
writer = csv.writer(csvout)
with open('unlimited_15min_full.csv') as csvfile:
#with open('unlimited0000.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[0] == 'Domain':
continue
dns_percent = float(row[1])/true_dns_total
packet_percent = float(row[3])/true_packets_total
bytes_percent = float(row[4])/true_bytes_total
if row[0] != '*':
avg_dns += dns_percent
avg_packet += packet_percent
avg_byte += bytes_percent
num += 1
writer.writerow([row[0], dns_percent, packet_percent, bytes_percent])
print(avg_dns / num)
print(avg_packet / num)
print(avg_byte / num)
#scatter_compare(python_byt, p4_byt)
#rank_compare(python_byt, p4_byt)
'''fig, ax = plt.subplots()
def reverse_dict_sort(d):
return dict(sorted(d.items(), key=operator.itemgetter(1),reverse=True))
def cumulative_graph(d, words, descrip):
sum_d = 0
for i in d.values():
sum_d = sum_d + i
cumulative = 0
cum_percent = [1]
keys = [0]
rank = 1
for i in d.items():
if i[1] == 0:
break
cumulative = cumulative + i[1] / sum_d
cum_percent.append(1-cumulative)
if (words):
keys.append(i[0])
else:
keys.append(rank)
rank = rank + 1
line, = ax.plot(keys, cum_percent)
line.set_label(descrip)
ax.legend()
if words:
ax.tick_params(axis='x', rotation=70, labelsize = 5)
ax.set(xlabel='Domain Rank', ylabel='Cumulative Ratio of Traffic', title='Cumulative Fraction of Traffic Contributed By Top Domains')
ax.grid()
fig.savefig("byt_p4_cumulative.png")
python_dns = {}
python_pac = {}
python_byt = {}
# Python script file
f = open('DNS_name_2.txt', 'r')
flist = f.read().split('\n')
numBytesList = []
percentDNSLost = []
percentPacketsLost = []
percentBytesLost = []
count = 4
for i in flist:
row = i.split()
numBytesList.append(count)
count = count + 4
percentDNSLost.append(1 - float(row[1]))
percentPacketsLost.append(1 - float(row[2]))
percentBytesLost.append(1 - float(row[3]))
line1, = ax.plot(numBytesList, percentDNSLost)
line1.set_label('Traffic by DNS Queries')
line2, = ax.plot(numBytesList, percentPacketsLost)
line2.set_label('Traffic by Packets')
line3, = ax.plot(numBytesList, percentBytesLost)
line3.set_label('Traffic by Bytes')
ax.legend()
ax.set(xlabel='Maximum Bytes allowed in Domain Name Parser', ylabel='Ratio of Traffic Lost', title='Percentage of Traffic Lost Due to Domain Name Parser Limitations')
ax.grid()
fig.savefig("dns_parser_limit.png")
plt.show()
#scatter_compare(python_byt, p4_byt)
#rank_compare(python_byt, p4_byt)
'''
```
#### File: Relative_error/15min_timeout100/combined_sim_v1.py
```python
from sys import argv
import dpkt
import csv
import socket
import ipaddress
import pickle
import zlib
import numpy as np
import statistics
import crc16
# Data structure and global variables
allowed_ips = []
banned_ips = []
known_domains = []
unlimitedNetTable = {}
unlimitedKnownDict = {}
netassayTables_stages = {}
knownlistDicts_stages = {}
usedHashes = {}
TIMEOUT = 100 # standard timeout
def hash_function(ip1, ip2, salt):
return np.uint32(((0x0000ffff & ip1) << 32) + (0x0000ffff & ip2) + salt)
def is_subnet_of(a, b):
return (b.network_address <= a.network_address and b.broadcast_address >= a.broadcast_address)
def parse_dns_response(ip_packet, ts):
# Check if it is in the allowed or banned IP lists
clientIP = socket.inet_ntoa(ip_packet.dst)
cip_object = ipaddress.ip_network(clientIP)
allowed = False
for ip in allowed_ips:
if is_subnet_of(cip_object, ip):
allowed = True
break
if (not allowed):
return
for ip in banned_ips:
if is_subnet_of(cip_object, ip):
return
try:
dns = dpkt.dns.DNS(ip_packet.data.data)
except:
return
answers = dns.an
if len(answers) <= 0:
return
domain = answers[0].name
domain_name = domain.split('.')
# Parser limitations
if (len(domain_name) > 4):
return
for part in domain_name:
if (len(part) > 15):
return
for d in known_domains:
if (matchDomain(d, domain)):
for rr in answers:
if (rr.type != 1):
continue
if (rr.type == 1): #DNS.A
entry = unlimitedKnownDict[d]
unlimitedKnownDict[d][0] = unlimitedKnownDict[d][0] + 1
serverIP = socket.inet_ntoa(rr.rdata)
key = clientIP + serverIP
unlimitedNetTable[key] = d
break
break
for g in [1, 2, 4, 8]:
for q in range(0, 34, 2):
modulo = int((2 ** q) / g)
for d in known_domains:
if (matchDomain(d, domain)):
for rr in answers:
if (rr.type != 1):
continue
if (rr.type == 1): #DNS.A
entry = knownlistDicts_stages[g][q][d]
knownlistDicts_stages[g][q][d][0] = knownlistDicts_stages[g][q][d][0] + 1
serverIP = socket.inet_ntoa(rr.rdata)
serverIP32 = np.uint64(int.from_bytes(socket.inet_aton(serverIP), byteorder='big'))
clientIP32 = np.uint64(int.from_bytes(socket.inet_aton(clientIP), byteorder='big'))
#serverIP32 = int.from_bytes(socket.inet_aton(serverIP), byteorder='big')
#clientIP32 = int.from_bytes(socket.inet_aton(clientIP), byteorder='big')
salts = [np.uint64(134140211), np.uint64(187182238), np.uint64(187238), np.uint64(1853238), np.uint64(1828), np.uint64(12238), np.uint64(72134), np.uint64(152428), np.uint64(164314534), np.uint64(223823)]
key = clientIP + serverIP
for z in range(0, 8):
if modulo > 0:
hashz = (zlib.crc32(np.uint64(serverIP32 + clientIP32 + salts[z]))& 0xffffffff) % modulo
#hashz = hash_function(serverIP32, clientIP32, salts[z]) % modulo
else:
hashz = 0
if(not hashz in usedHashes[g][q][z]):
usedHashes[g][q][z][hashz] = [ts, key, domain]
elif (ts - usedHashes[g][q][z][hashz][0] > TIMEOUT): # timestamp expires
netassayTables_stages[g][q][z].pop(usedHashes[g][q][z][hashz][1])
usedHashes[g][q][z][hashz] = [ts, key, domain]
elif(usedHashes[g][q][z][hashz][1] == key): # update timestamp for existing entry
usedHashes[g][q][z][hashz] = [ts, key, domain]
elif(g < z + 2):
knownlistDicts_stages[g][q][d][3] = knownlistDicts_stages[g][q][d][3]+1
break
else:
continue
netassayTables_stages[g][q][z][key] = d
break
break
break
def parse_tcp(packet_len, ip_packet, ts):
source = socket.inet_ntoa(ip_packet['src']) #server
dest = socket.inet_ntoa(ip_packet['dst']) #client
key = dest + source
if key in unlimitedNetTable:
d = unlimitedNetTable[key]
unlimitedKnownDict[d][1] = unlimitedKnownDict[d][1] + 1
unlimitedKnownDict[d][2] = unlimitedKnownDict[d][2] + packet_len
serverIP32 = np.uint64(int.from_bytes(socket.inet_aton(source), byteorder='big'))
clientIP32 = np.uint64(int.from_bytes(socket.inet_aton(dest), byteorder='big'))
#serverIP32 = int.from_bytes(socket.inet_aton(source), byteorder='big')
#clientIP32 = int.from_bytes(socket.inet_aton(dest), byteorder='big')
salts = [np.uint64(134140211), np.uint64(187182238), np.uint64(187238), np.uint64(1853238), np.uint64(1828), np.uint64(12238), np.uint64(72134), np.uint64(152428), np.uint64(164314534), np.uint64(223823)]
for g in [1, 2, 4, 8]:
for q in range(0, 34, 2):
modulo = int((2 ** q) / g)
for z in range(0, 8):
if (z + 1 > g):
break
if key in netassayTables_stages[g][q][z]:
d = netassayTables_stages[g][q][z][key]
knownlistDicts_stages[g][q][d][1] = knownlistDicts_stages[g][q][d][1] + 1
knownlistDicts_stages[g][q][d][2] = knownlistDicts_stages[g][q][d][2] + packet_len
if modulo > 0:
hashz = (zlib.crc32(np.uint64(serverIP32 + clientIP32 + salts[z]))& 0xffffffff) % modulo
#hashz = hash_function(serverIP32, clientIP32, salts[z]) % modulo
else:
hashz = 0
if hashz in usedHashes[g][q][z] and usedHashes[g][q][z][hashz][1] == key:
usedHashes[g][q][z][hashz][0] = ts
else:
print("error in hash storage")
exit(-1)
break
def matchDomain(known, domain):
knownparts = known.split('.')
domainparts = domain.split('.')
if len(knownparts) != len(domainparts):
return False
for i in range(0, len(knownparts)):
if (knownparts[i] == '*'):
continue
if (knownparts[i] != domainparts[i]):
return False
return True
# parse the command line argument and open the file specified
if __name__ == '__main__':
if len(argv) != 5:
print('usage: python netassay_python3_p4sim.py pickleFile knownlist.txt allowed_dns_dst.txt banned_dns_dst.txt')
exit(-1)
# Parse allowed IP and banned IP files
allowed_ip_file = open(argv[3], 'r')
allowed_ip_list = allowed_ip_file.read().split()
allowed_ip_file.close()
for ip in allowed_ip_list:
allowed_ips.append(ipaddress.ip_network(ip))
banned_ip_file = open(argv[4], 'r')
banned_ip_list = banned_ip_file.read().split()
banned_ip_file.close()
for ip in banned_ip_list:
banned_ips.append(ipaddress.ip_network(ip))
# Create knownlist
knownlist = open(argv[2], 'r')
known_domains = knownlist.read().split()
knownlist.close()
for d in known_domains:
unlimitedKnownDict[d] = [0, 0, 0, 0, 0, 0]
for i in [1, 2, 4, 8]:
knownlistDict_mem = {}
netassayTable_mem = {}
usedHash_mem = {}
for q in range(0, 34, 2):
knownlistDict_q = {}
for d in known_domains:
knownlistDict_q[d] = [0, 0, 0, 0, 0, 0]
usedHash_individual_run = []
netTable_individual = []
for l in range(0, 8):
usedHash_individual_run.append({})
netTable_individual.append({})
knownlistDict_mem[q] = (knownlistDict_q)
netassayTable_mem[q] = (netTable_individual)
usedHash_mem[q] = (usedHash_individual_run)
knownlistDicts_stages[i] = knownlistDict_mem
netassayTables_stages[i] = netassayTable_mem
usedHashes[i] = usedHash_mem
f = open(argv[1], 'rb')
pcap_obj = pickle.load(f)
f.close()
num_packets = len(pcap_obj)
packet_count = 0.0
for p in pcap_obj:
ts = p[0]
dns_code = p[1]
ip = p[2]
# For each packet parse the dns responses
if (dns_code == -1):
#try:
parse_dns_response(ip, ts)
'''except Exception as e:
print(e)
continue'''
else:
parse_tcp(dns_code, ip, ts)
packet_count += 1
if (packet_count % 1000 == 0):
print(packet_count / num_packets)
outfile_stage = open('stage_limits.txt', 'w')
for v in [1, 2, 4, 8]:
for c in range(0, 34, 2):
packet_errors = []
byte_errors = []
with open('stage_limit' + str(v) + '_' + str(c) + '.csv', 'w') as csvfile:
w = csv.writer(csvfile)
w.writerow(["Domain", "Number of DNS requests", "Missed DNS requests missed", "Number of Packets", "Number of Bytes", "Estimated Packets", "Estimated Bytes", "Error_Packets", "Error_Bytes"])
for k in knownlistDicts_stages[v][c].keys():
num_packets = knownlistDicts_stages[v][c][k][1]
num_bytes = knownlistDicts_stages[v][c][k][2]
num_missed = knownlistDicts_stages[v][c][k][3]
num_dns = knownlistDicts_stages[v][c][k][0]
error_packet = -1
error_byte = -1
if (num_dns > 0 and num_missed < num_dns):
knownlistDicts_stages[v][c][k][4] = num_packets / (1 - (num_missed / num_dns))
knownlistDicts_stages[v][c][k][5] = num_bytes / (1 - (num_missed / num_dns))
if (unlimitedKnownDict[k][1] > 0):
error_packet = abs(unlimitedKnownDict[k][1] - knownlistDicts_stages[v][c][k][4]) / unlimitedKnownDict[k][1]
packet_errors.append(error_packet)
if (unlimitedKnownDict[k][2] > 0):
error_byte = abs(unlimitedKnownDict[k][2] - knownlistDicts_stages[v][c][k][5]) / unlimitedKnownDict[k][2]
byte_errors.append(error_byte)
w.writerow([k, num_dns, num_missed, num_packets, num_bytes, knownlistDicts_stages[v][c][k][4], knownlistDicts_stages[v][c][k][5], error_packet, error_byte])
packet_error_med = statistics.median(packet_errors)
byte_error_med = statistics.median(byte_errors)
total_dns = 0
total_packets = 0
total_bytes = 0
total_dns_missed = 0
total_est_packets = 0
total_est_bytes = 0
for l in knownlistDicts_stages[v][c].items():
total_dns += l[1][0]
total_packets += l[1][1]
total_bytes += l[1][2]
total_dns_missed += l[1][3]
total_est_packets += l[1][4]
total_est_bytes += l[1][5]
outfile_stage.write(str(total_dns)+','+str(total_packets)+','+str(total_bytes)+','+str(total_dns_missed)+','+str(total_est_packets)+','+str(total_est_bytes)+','+str(packet_error_med)+','+str(byte_error_med)+'\n')
outfile_stage.write('*')
outfile_stage.close()
```
#### File: PaperResults/total_memory/memory_limits.py
```python
from sys import argv
import dpkt
import csv
import socket
import ipaddress
import pickle
import crc16
import numpy as np
import statistics
# Data structure and global variables
allowed_ips = []
banned_ips = []
known_domains = []
knownlistDict = {} # Key is knowlist domain, values are number of dns, number of packets, number of bytes, number missed dns, estimated packets, estimated bytes
netassayTable = {} # Key is concatentation of serever IP/client IP. Value is a knownlist domain name
usedHash1 = {}
usedHash2 = {}
TABLE_SIZE = 0
TIMEOUT = 300
def is_subnet_of(a, b):
return (b.network_address <= a.network_address and b.broadcast_address >= a.broadcast_address)
def parse_dns_response(ip_packet, ts):
# Check if it is in the allowed or banned IP lists
clientIP = socket.inet_ntoa(ip_packet.dst)
cip_object = ipaddress.ip_network(clientIP)
allowed = False
for ip in allowed_ips:
if is_subnet_of(cip_object, ip):
allowed = True
break
if (not allowed):
return
for ip in banned_ips:
if is_subnet_of(cip_object, ip):
return
dns = dpkt.dns.DNS(ip_packet.data.data)
answers = dns.an
domain = answers[0].name
domain_name = domain.split('.')
# Parser limitations
if (len(domain_name) > 4):
return
for part in domain_name:
if (len(part) > 15):
return
global TIMEOUT
global TABLE_SIZE
for d in known_domains:
if (matchDomain(d, domain)):
for rr in answers:
if (rr.type != 1):
continue
if (rr.type == 1): #DNS.A
entry = knownlistDict[d]
knownlistDict[d][0] = knownlistDict[d][0] + 1
serverIP = socket.inet_ntoa(rr.rdata)
serverIP32 = np.uint32(int.from_bytes(socket.inet_aton(serverIP), byteorder='big'))
clientIP32 = np.uint32(int.from_bytes(socket.inet_aton(clientIP), byteorder='big'))
salt1 = np.uint32(134140211)
salt2 = np.uint32(187182238)
key = clientIP + serverIP
hash1 = crc16.crc16xmodem(np.uint32(serverIP32 + clientIP32 + salt1)) % TABLE_SIZE
hash2 = crc16.crc16xmodem(np.uint32(serverIP32 + clientIP32 + salt2)) % TABLE_SIZE
if(not hash1 in usedHash1):
usedHash1[hash1] = [ts, key, domain]
elif (ts - usedHash1[hash1][0] > TIMEOUT): # timestamp expires
netassayTable.pop(usedHash1[hash1][1])
usedHash1[hash1] = [ts, key, domain]
elif(usedHash1[hash1][1] == key): # update timestamp for existing entry
usedHash1[hash1] = [ts, key, domain]
elif(not hash2 in usedHash2):
usedHash2[hash2] = [ts, key, domain]
elif (ts - usedHash2[hash2][0] > TIMEOUT): # timestamp expires
netassayTable.pop(usedHash2[hash2][1])
usedHash2[hash2] = [ts, key, domain]
elif(usedHash2[hash2][1] == key): # update timestamp for existing entry
usedHash2[hash2] = [ts, key, domain]
else:
knownlistDict[d][3] = knownlistDict[d][3]+1
return
netassayTable[key] = d
break
break
def parse_tcp(packet_len, ip_packet, ts):
source = socket.inet_ntoa(ip_packet['src']) #server
dest = socket.inet_ntoa(ip_packet['dst']) #client
global TIMEOUT
global TABLE_SIZE
key = dest + source
if key in netassayTable:
d = netassayTable[key]
knownlistDict[d][1] = knownlistDict[d][1] + 1
knownlistDict[d][2] = knownlistDict[d][2] + packet_len
serverIP32 = np.uint32(int.from_bytes(socket.inet_aton(source), byteorder='big'))
clientIP32 = np.uint32(int.from_bytes(socket.inet_aton(dest), byteorder='big'))
salt1 = np.uint32(134140211)
salt2 = np.uint32(187182238)
hash1 = crc16.crc16xmodem(np.uint32(serverIP32 + clientIP32 + salt1)) % TABLE_SIZE
hash2 = crc16.crc16xmodem(np.uint32(serverIP32 + clientIP32 + salt2)) % TABLE_SIZE
if hash1 in usedHash1 and usedHash1[hash1][1] == key:
usedHash1[hash1][0] = ts
elif hash2 in usedHash2 and usedHash2[hash2][1] == key:
usedHash2[hash2][0] = ts
else:
print("error in hash storage")
exit(-1)
def matchDomain(known, domain):
knownparts = known.split('.')
domainparts = domain.split('.')
if len(knownparts) != len(domainparts):
return False
for i in range(0, len(knownparts)):
if (knownparts[i] == '*'):
continue
if (knownparts[i] != domainparts[i]):
return False
return True
# parse the command line argument and open the file specified
if __name__ == '__main__':
if len(argv) != 6:
print('usage: python netassay_python3_p4sim.py pickleFile knownlist.txt allowed_dns_dst.txt banned_dns_dst.txt outfilename')
exit(-1)
true_60 = {} # key is domain value is [packets, bytes]
with open('parse60_15min.csv') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if row[0] == 'Domain':
continue
true_60[row[0]] = [float(row[3]), float(row[4])]
# Parse allowed IP and banned IP files
allowed_ip_file = open(argv[3], 'r')
allowed_ip_list = allowed_ip_file.read().split()
allowed_ip_file.close()
for ip in allowed_ip_list:
allowed_ips.append(ipaddress.ip_network(ip))
banned_ip_file = open(argv[4], 'r')
banned_ip_list = banned_ip_file.read().split()
banned_ip_file.close()
for ip in banned_ip_list:
banned_ips.append(ipaddress.ip_network(ip))
# Create knownlist
knownlist = open(argv[2], 'r')
known_domains = knownlist.read().split()
knownlist.close()
f = open(argv[1], 'rb')
pcap_obj = pickle.load(f)
f.close()
outfile = open(argv[5], 'w')
for i in range(0, 33):
TABLE_SIZE = 2 ** i
print(i)
knownlistDict = {}
netassayTable = {}
usedHash1 = {}
usedHash2 = {}
for d in known_domains:
knownlistDict[d] = [0, 0, 0, 0, 0, 0]
for p in pcap_obj:
ts = p[0]
dns_code = p[1]
ip = p[2]
# For each packet parse the dns responses
if (dns_code == -1):
try:
parse_dns_response(ip, ts)
except Exception as e:
continue
else:
parse_tcp(dns_code, ip, ts)
packet_errors = []
byte_errors = []
with open('memory_limit' + str(i) + '.csv', 'w') as csvfile:
w = csv.writer(csvfile)
w.writerow(["Domain", "Number of DNS requests", "Missed DNS requests missed", "Number of Packets", "Number of Bytes", "Estimated Packets", "Estimated Bytes", "Error_Packets", "Error_Bytes"])
for j in knownlistDict.keys():
num_packets = knownlistDict[j][1]
num_bytes = knownlistDict[j][2]
num_missed = knownlistDict[j][3]
num_dns = knownlistDict[j][0]
error_packet = -1
error_byte = -1
if (num_dns > 0 and num_missed < num_dns):
knownlistDict[j][4] = num_packets / (1 - (num_missed / num_dns))
knownlistDict[j][5] = num_bytes / (1 - (num_missed / num_dns))
if (true_60[j][0] > 0):
error_packet = abs(true_60[j][0] - knownlistDict[j][4]) / true_60[j][0]
packet_errors.append(error_packet)
if (true_60[j][1] > 0):
error_byte = abs(true_60[j][1] - knownlistDict[j][5]) / true_60[j][1]
byte_errors.append(error_byte)
w.writerow([j, num_dns, num_missed, num_packets, num_bytes, knownlistDict[j][4], knownlistDict[j][5], error_packet, error_byte])
packet_error_med = statistics.median(packet_errors)
byte_error_med = statistics.median(byte_errors)
total_dns = 0
total_packets = 0
total_bytes = 0
total_dns_missed = 0
total_est_packets = 0
total_est_bytes = 0
for i in knownlistDict.items():
total_dns += i[1][0]
total_packets += i[1][1]
total_bytes += i[1][2]
total_dns_missed += i[1][3]
total_est_packets += i[1][4]
total_est_bytes += i[1][5]
outfile.write(str(total_dns)+','+str(total_packets)+','+str(total_bytes)+','+str(total_dns_missed)+','+str(total_est_packets)+','+str(total_est_bytes)+','+str(packet_error_med)+','+str(byte_error_med)+'\n')
outfile.close()
```
#### File: IWSpring2020/IOT_Fingerprint/netassay_python_preprocess_dedup.py
```python
from sys import argv
from sys import exit
import dpkt
import pickle
import glob
import os
import datetime
def check_dup(store_dict, value):
for d in store_dict:
indict_ts = store_dict[d][1]
given_ts = value[1]
# If same and timestamp is within 0.5 second
if (store_dict[d][0] == value[0]) and (abs(indict_ts - given_ts) < 0.5):
return True
return False
# parse the command line argument and open the file specified
if __name__ == '__main__':
if len(argv) != 3:
print('usage: python netassay_python3.py pcap_directory outfileName')
exit(-1)
outFile = open(argv[2], 'wb')
ethPacketList = []
# Add slash if not there
input_dir = argv[1]
if not argv[1].endswith("/"):
input_dir = input_dir + "/"
# List files by ascending modification time
files = glob.glob(input_dir + "*.pcap*")
files.sort(key=os.path.getmtime)
# data structure for dedup
dedup_dict = {}
index = 0
# Go through files
for thisf in files:
with open(thisf, 'rb') as f:
try:
pcap_obj = dpkt.pcap.Reader(f)
#pcap_obj = dpkt.pcapng.Reader(f)
except:
pcap_obj = dpkt.pcap.Reader(f)
for ts, buf in pcap_obj:
eth = dpkt.ethernet.Ethernet(buf)
if (eth.type != 2048):
continue
ip = eth.data
protocol = ip.p
packet_len = eth.__len__()
packet_processed = False
try:
if (protocol == 17 and ip.data.sport == 53):
ip_header_selected = {
'_v_hl':ip._v_hl,
'tos':ip.tos,
'len':ip.len,
'id':ip.id,
'p':ip.p,
'src':ip.src,
'dst':ip.dst,
'src_port': ip.data.sport,
'dst_port': ip.data.dport
}
# check dup. If yes, skip
if check_dup(dedup_dict, (ip_header_selected,ts)):
pass
# If DNS, we want the entire IP packet
ethPacketList.append([ts, -1, ip]) # 0 is to indicate DNS response
packet_processed = True
dedup_dict[index%4] = (ip_header_selected,ts)
index += 1
except:
pass
try:
if (packet_processed == False):
# Else, we just want the IP header
ip_header = {
'_v_hl':ip._v_hl,
'tos':ip.tos,
'len':ip.len,
'id':ip.id,
'off':ip.off,
'ttl':ip.ttl,
'p':ip.p,
'sum':ip.sum,
'src':ip.src,
'dst':ip.dst,
'src_port': ip.data.sport,
'dst_port': ip.data.dport
}
ethPacketList.append([ts, packet_len, ip_header])
except Exception as e:
pass
pickle.dump(ethPacketList, outFile)
outFile.close()
```
#### File: IWSpring2020/OldVersions/pcapanalysis.py
```python
from sys import argv
import dpkt
import socket
TOTAL_DNS_RESPONSE_COUNT = 0
NUMBER_DOMAINS_LARGE_PART = 0
cnameCountDict = {}
serverIpPrecedenceDict = {}
serverIpUsed = {}
precedenceResultsByPairing = {}
precedenceResultsByPacket = {}
NUM_CLIENTS = 0
NUM_PACKETS = 0
def parse_dns_response(ip_packet):
global TOTAL_DNS_RESPONSE_COUNT
global NUMBER_DOMAINS_LARGE_PART
TOTAL_DNS_RESPONSE_COUNT = TOTAL_DNS_RESPONSE_COUNT + 1
dns = dpkt.dns.DNS(ip_packet.data.data)
answers = dns.an
cname_count = 0
ipPrecedence = 1
for rr in answers:
if (rr.type == 5): #DNS.CNAME
cname_count = cname_count + 1
elif (rr.type == 1): #DNS.A
domain_name = rr.name.split('.')
for part in domain_name:
if (len(part) > 15):
NUMBER_DOMAINS_LARGE_PART = NUMBER_DOMAINS_LARGE_PART + 1
break
clientIP = socket.inet_ntoa(ip_packet.dst)
serverIP = socket.inet_ntoa(rr.rdata)
serverIpPrecedenceDict[clientIP + serverIP] = ipPrecedence
serverIpUsed[clientIP + serverIP] = False
ipPrecedence = ipPrecedence + 1
if cname_count in cnameCountDict:
cnameCountDict[cname_count] = cnameCountDict[cname_count] + 1
else:
cnameCountDict[cname_count] = 1
def parse_tcp(ip_packet):
source = socket.inet_ntoa(ip_packet.src) #client
dest = socket.inet_ntoa(ip_packet.dst) #server
key = source + dest
if (key in serverIpPrecedenceDict):
global NUM_PACKETS
NUM_PACKETS = NUM_PACKETS + 1
ipPrecedence = serverIpPrecedenceDict[key]
if (not serverIpUsed[key]):
global NUM_CLIENTS
NUM_CLIENTS = NUM_CLIENTS + 1
serverIpUsed[key] = True
if (ipPrecedence in precedenceResultsByPairing):
precedenceResultsByPairing[ipPrecedence] = precedenceResultsByPairing[ipPrecedence] + 1
else:
precedenceResultsByPairing[ipPrecedence] = 1
if (ipPrecedence in precedenceResultsByPacket):
precedenceResultsByPacket[ipPrecedence] = precedenceResultsByPacket[ipPrecedence] + 1
else:
precedenceResultsByPacket[ipPrecedence] = 1
# parse the command line argument and open the file specified
if __name__ == '__main__':
if len(argv) != 2:
print('usage: python pcapanalysis.py capture.pcap')
exit(-1)
with open(argv[1], 'rb') as f:
pcap_obj = dpkt.pcap.Reader(f)
for ts, buf in pcap_obj:
eth = dpkt.ethernet.Ethernet(buf)
if (eth.type != 2048): # If not IPV4
continue
ip = eth.data
protocol = ip.p
if (protocol == 17 and ip.data.sport == 53):
parse_dns_response(ip)
else:
parse_tcp(ip)
# Final Stats report
print("Total Number of DNS Response: " + str(TOTAL_DNS_RESPONSE_COUNT))
for x in cnameCountDict.items():
print(str(x[0]) + ' CNAME entries -> ' + str(x[1]) + ' DNS responses')
print("*********************************************************\n")
print("Number of domain names with a part larger than 15 characters: " + str(NUMBER_DOMAINS_LARGE_PART))
print("*********************************************************\n")
print("Total number of individual clients: " + str(NUM_CLIENTS))
for x in precedenceResultsByPairing.items():
print("Number of clients that used IP address in DNS response of precedence: " + str(x[0]) + ": " + str(x[1]))
if (1 in precedenceResultsByPairing):
print("Percentage of clients that used the first IP address from the DNS response: " + str(precedenceResultsByPairing[1] / float(NUM_CLIENTS)))
else:
print("Percentage of clients that used the first IP address from the DNS response: 0")
print("*********************************************************\n")
print("Total number of packets: " + str(NUM_PACKETS))
for x in precedenceResultsByPacket.items():
print("Number of packets from clients that used IP address in DNS response of precedence: " + str(x[0]) + ": " + str(x[1]))
if (1 in precedenceResultsByPacket):
print("Percentage of packets that used the first IP address from the DNS response: " + str(precedenceResultsByPacket[1] / float(NUM_PACKETS)))
else:
print("Percentage of packets that used the first IP address from the DNS response: 0")
print("*********************************************************")
``` |
{
"source": "jkim1881/BDCN",
"score": 2
} |
#### File: BDCN/extra/min_w_slack.py
```python
import numpy as np
def get_column_as_list(arr, slack, pad_value=0):
arr_temp_list = []
for xslack in range(slack*2+1):
real_xslack = xslack-slack
if real_xslack < 0:
arr_temp = np.pad(arr.copy()[:,:real_xslack], ((0,0),(0,-real_xslack)),
mode='constant', constant_values=pad_value)
elif real_xslack > 0:
arr_temp = np.pad(arr.copy()[:,real_xslack:], ((0,0),(real_xslack, 0)),
mode='constant', constant_values=pad_value)
else:
arr_temp = arr.copy()
for yslack in range(slack*2+1):
real_yslack = yslack-slack
if real_yslack < 0:
arr_temp_ = np.pad(arr_temp.copy()[:real_yslack, :], ((0, -real_yslack), (0, 0)),
mode='constant',constant_values=pad_value)
elif real_yslack > 0:
arr_temp_ = np.pad(arr_temp.copy()[real_yslack:, :], ((real_yslack, 0), (0, 0)),
mode='constant',constant_values=pad_value)
else:
arr_temp_ = arr_temp.copy()
arr_temp_list.append(arr_temp_)
return arr_temp_list
def min_w_slack(yhat, y, dist_func, slack, pad_value=0):
y_column = np.array(get_column_as_list(y, slack, pad_value=pad_value))
dist_image = np.min(dist_func(yhat, y_column), axis=0)
return dist_image
if __name__ == '__main__':
collist = get_column_as_list(np.ones((4,4)), slack=2, pad_value=0)
```
#### File: jkim1881/BDCN/test_tiltillusion.py
```python
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
import argparse
import time
import re
import os
import sys
import bdcn, bdcn_decoder
from datasets.dataset import BSDS_crops, Multicue_crops, Tilt_illusion
import cfg
import log
import cv2
def l2_loss_center(out, labels):
_, _, h, w = out.size()
out_center = out[:, :, h//2, w//2]
return out_center, torch.nn.MSELoss(size_average=None, reduce=None, reduction='mean')(out_center, labels)
def l2_loss(out, labels):
# ipdb > out.shape
# torch.Size([B, 2, 1, 1])
# ipdb > labels.shape
# torch.Size([B, 1, 2])
labels = labels.permute(0,2,1).unsqueeze(3)
return torch.nn.MSELoss(size_average=None, reduce=None, reduction='mean')(out, labels)
def orientation_diff(array1, array2):
concat = np.concatenate((np.expand_dims(array1, axis=1),
np.expand_dims(array2, axis=1)), axis=1)
diffs = np.concatenate((np.expand_dims(concat[:,0] - concat[:,1], axis=1),
np.expand_dims(concat[:,0] - concat[:,1] - 180, axis=1),
np.expand_dims(concat[:,0] - concat[:,1] + 180, axis=1)), axis=1)
diffs_argmin = np.argmin(np.abs(diffs),axis=1)
return [idiff[argmin] for idiff, argmin in zip(diffs, diffs_argmin)]
def cluster_points(xs, ys, stepsize):
xss = list(xs)
sort_args = np.array(xss).argsort()
xss.sort()
ys_sorted = np.array(ys)[sort_args]
x_accumulator = []
y_mu = []
y_25 = []
y_75 = []
x_perbin = []
y_perbin = []
icut = -90 + stepsize
for ix, iy in zip(xss, ys_sorted):
if ix < icut:
x_perbin.append(ix)
y_perbin.append(iy)
else:
if len(y_perbin) > 0:
x_accumulator.append(icut - stepsize / 2)
y_mu.append(np.median(y_perbin))
y_25.append(np.percentile(y_perbin, 25))
y_75.append(np.percentile(y_perbin, 75))
icut += stepsize
x_perbin = []
y_perbin = []
return x_accumulator, y_mu, y_25, y_75
def collapse_points(cs_diff, out_gt_diff):
cs_diff_collapsed =[]
out_gt_diff_collapsed = []
for ix, iy in zip(cs_diff, out_gt_diff):
if ix < -10:
cs_diff_collapsed.append(-ix)
out_gt_diff_collapsed.append(-iy)
else:
cs_diff_collapsed.append(ix)
out_gt_diff_collapsed.append(iy)
return cs_diff_collapsed, out_gt_diff_collapsed
def screen(r1, lambda1, theta, r1min=None, r1max=None, lambda1min=None, lambda1max=None, thetamin=None, thetamax=None):
if np.array(r1).size > 1:
cond = np.ones_like(r1).astype(np.bool)
else:
cond = True
if r1min is not None:
cond = cond * (r1 > r1min)
if r1max is not None:
cond = cond * (r1 < r1max)
if lambda1min is not None:
cond = cond * (lambda1 > lambda1min)
if lambda1max is not None:
cond = cond * (lambda1 < lambda1max)
if thetamin is not None:
cond = cond * ((theta > thetamin) | (theta > thetamin+180))
if thetamax is not None:
cond = cond * (theta < thetamax)
return cond
def train(model, args):
# Configure datasets
# import ipdb;
# ipdb.set_trace()
print(args.dataset)
crop_size = args.crop_size
if 'tiltillusion' in args.dataset:
data_root = '/media/data_cifs/tilt_illusion'
# Construct data loader
test_img = Tilt_illusion(data_root, type='test', test_mode=True,
max_examples=args.max_test_examples, scale=[0.3], crop_size=crop_size)
testloader = torch.utils.data.DataLoader(test_img,
batch_size=args.batch_size, shuffle=True, num_workers=5)
else:
raise ValueError('dataset should be tiltillusion.')
# Configure train
logger = args.logger
start_step = 1
mean_loss = []
cur = 0
pos = 0
data_iter = iter(testloader)
iter_per_epoch = len(testloader)
start_time = time.time()
if args.cuda:
model.cuda()
model.eval() # same as model.train(mode=False)
# EVAL
import matplotlib.pyplot as plt
accumulator = np.zeros((0,7))
for step in xrange(start_step, args.max_test_examples/(args.iter_size * args.batch_size) + 1):
batch_loss = 0
for i in xrange(args.iter_size):
if cur == iter_per_epoch:
cur = 0
data_iter = iter(testloader)
images, labels, meta = next(data_iter) # [r1, theta1, lambda1, shift1, r2 ....]
# import ipdb;ipdb.set_trace()
if args.cuda:
images, labels = images.cuda(), labels.cuda()
images, labels = Variable(images), Variable(labels)
out = model(images)
out_arr = out.squeeze().cpu().detach().numpy()
out_deg = ((np.arctan2(out_arr[:,0], out_arr[:,1]))*180/np.pi)%180
labels_arr = labels.squeeze().cpu().detach().numpy()
labels_deg = ((np.arctan2(labels_arr[:,0], labels_arr[:,1]))*180/np.pi)%180
meta_arr = meta.cpu().detach().numpy()
results = np.concatenate((np.expand_dims(meta_arr[:, 1], axis=1),
np.expand_dims(meta_arr[:, 5], axis=1),
np.expand_dims(out_deg, axis=1),
np.expand_dims(meta_arr[:, 0], axis=1),
np.expand_dims(meta_arr[:, 2], axis=1),
np.expand_dims(meta_arr[:, 3], axis=1),
np.expand_dims(meta_arr[:, 7], axis=1)),
axis=1)
accumulator = np.concatenate((accumulator, results), axis=0)
loss = l2_loss(out, labels)
# import ipdb;ipdb.set_trace()
batch_loss += loss.item()
cur += 1
# update parameter
if len(mean_loss) < args.average_loss:
mean_loss.append(batch_loss)
else:
mean_loss[pos] = batch_loss
pos = (pos + 1) % args.average_loss
if step % args.display == 0:
tm = time.time() - start_time
logger.info('iter: %d, loss: %f, time using: %f(%fs/batch)' %
(step, np.mean(mean_loss), tm, tm/(args.iter_size*args.display)))
start_time = time.time()
# FIGURE
plt.figure(figsize=(4, 4))
f, axarr = plt.subplots(4, 4) # (4, 4)
for ir, rmin in enumerate([40, 60, 80, 100]):
for ith, thetamin in enumerate([-22.5, 22.5, 67.5, 112.5]):
center_gt = []
surround_gt = []
predictions = []
for i in xrange(accumulator.shape[0]):
cond = screen(accumulator[i, 3].astype(np.float), accumulator[i, 4].astype(np.float),
accumulator[i, 0].astype(np.float),
r1min=rmin, r1max=rmin + 20, lambda1min=None, lambda1max=None, thetamin=thetamin,
thetamax=thetamin + 45)
if cond:
center_gt.append(accumulator[i, 0].astype(np.float))
surround_gt.append(accumulator[i, 1].astype(np.float))
predictions.append(accumulator[i, 2])
if len(center_gt) > 0:
# # plot
# print('filtered ' + str(len(predictions)) + ' data')
# import matplotlib.pyplot as plt
# plt.figure(figsize=(16, 4))
# plt.subplot(141)
# plt.scatter(center_gt, np.array(predictions), s=10, vmin=0, vmax=180)
#
import numpy.polynomial.polynomial as poly
# plt.subplot(142)
cs_diff = orientation_diff(center_gt, surround_gt) # center - surround in x axis
out_gt_diff = orientation_diff(predictions, center_gt) # pred - gt in y axis
cs_diff_collapsed, out_gt_diff_collapsed = collapse_points(cs_diff, out_gt_diff)
coefs = poly.polyfit(cs_diff_collapsed, out_gt_diff_collapsed, 5)
ffit = poly.polyval(np.arange(-90, 90, 1), coefs)
axarr[ir, ith].scatter(cs_diff_collapsed, out_gt_diff_collapsed, s=40, alpha=0.25, vmin=0, vmax=180)
# coefs = poly.polyfit(cs_diff, out_gt_diff, 5)
# ffit = poly.polyval(np.arange(-90, 90, 1), coefs)
# axarr[ir, ith].scatter(cs_diff, out_gt_diff, s=15, alpha=0.3, vmin=0, vmax=180)
axarr[ir, ith].plot(np.arange(-90, 90, 1), ffit, linewidth=3, alpha=0.5, color='black')
axarr[ir, ith].plot(np.arange(-90, 90, 1), [0] * np.arange(-90, 90, 1).size, color='black')
axarr[ir, ith].set_xlim(0, 87)
axarr[ir, ith].set_ylim(-20, 40)
axarr[ir, ith].set_title('r in ' + str([rmin, rmin + 20]) + ', tht in ' + str([thetamin, thetamin + 45]))
plt.show()
def main():
args = parse_args()
logger = log.get_logger(args.log)
args.logger = logger
logger.info('*'*80)
logger.info('the args are the below')
logger.info('*'*80)
for x in args.__dict__:
logger.info(x+','+str(args.__dict__[x]))
logger.info(cfg.config[args.dataset])
logger.info('*'*80)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
torch.manual_seed(long(time.time()))
model = bdcn.BDCN_ti(pretrain=None, logger=logger)
# model = bdcn_decoder.Decoder(pretrain=None, logger=logger, in_dim=XXXXX)
model.initialize_ti_weights()
if args.complete_pretrain:
model.load_state_dict(torch.load(args.complete_pretrain))
logger.info(model)
train(model, args)
def parse_args():
parser = argparse.ArgumentParser(description='Train BDCN for different args')
parser.add_argument('-d', '--dataset', type=str, choices=cfg.config.keys(),
default='bsds500', help='The dataset to train')
parser.add_argument('--max-test-examples', type=int, default=None,
help='(jk) max iters to test network, default is None (200 for BSDS)')
parser.add_argument('-c', '--cuda', action='store_true',
help='whether use gpu to train network')
parser.add_argument('-g', '--gpu', type=str, default='0',
help='the gpu id to train net')
parser.add_argument('--iter-size', type=int, default=10,
help='iter size equal to the batch size, default 10')
parser.add_argument('--average-loss', type=int, default=50,
help='smoothed loss, default is 50')
parser.add_argument('--step-size', type=int, default=10000,
help='the number of iters to decrease the learning rate, default is 10000')
parser.add_argument('--display', type=int, default=20,
help='how many iters display one time, default is 20')
parser.add_argument('-l', '--log', type=str, default='log.txt',
help='the file to store log, default is log.txt')
parser.add_argument('--batch-size', type=int, default=1,
help='batch size of one iteration, default 1')
parser.add_argument('--crop-size', type=int, default=None,
help='the size of image to crop, default not crop')
parser.add_argument('--complete-pretrain', type=str, default=None,
help='finetune on the complete_pretrain, default None')
return parser.parse_args()
if __name__ == '__main__':
main()
``` |
{
"source": "jkim327/rig_class",
"score": 2
} |
#### File: rig_class/pyQT_file/spine_ui_test.py
```python
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_spine_dialog(object):
def setupUi(self, spine_dialog):
spine_dialog.setObjectName("spine_dialog")
spine_dialog.resize(313, 406)
self.spine_widget = QtWidgets.QWidget(spine_dialog)
self.spine_widget.setGeometry(QtCore.QRect(10, 10, 291, 411))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spine_widget.sizePolicy().hasHeightForWidth())
self.spine_widget.setSizePolicy(sizePolicy)
self.spine_widget.setMinimumSize(QtCore.QSize(291, 411))
self.spine_widget.setMaximumSize(QtCore.QSize(291, 411))
self.spine_widget.setObjectName("spine_widget")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.spine_widget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 271, 41))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.spine_num_layout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.spine_num_layout.setContentsMargins(0, 0, 0, 0)
self.spine_num_layout.setObjectName("spine_num_layout")
self.spine_label = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.spine_label.setObjectName("spine_label")
self.spine_num_layout.addWidget(self.spine_label)
self.spine_int = QtWidgets.QSpinBox(self.horizontalLayoutWidget)
self.spine_int.setObjectName("spine_int")
self.spine_num_layout.addWidget(self.spine_int)
spacerItem = QtWidgets.QSpacerItem(15, 0, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.spine_num_layout.addItem(spacerItem)
self.spine_int_slider = QtWidgets.QSlider(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spine_int_slider.sizePolicy().hasHeightForWidth())
self.spine_int_slider.setSizePolicy(sizePolicy)
self.spine_int_slider.setOrientation(QtCore.Qt.Horizontal)
self.spine_int_slider.setObjectName("spine_int_slider")
self.spine_num_layout.addWidget(self.spine_int_slider)
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.spine_widget)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(10, 90, 271, 191))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.spine_opt_ho_layout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.spine_opt_ho_layout.setContentsMargins(0, 0, 0, 0)
self.spine_opt_ho_layout.setObjectName("spine_opt_ho_layout")
self.img_field = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(2)
sizePolicy.setHeightForWidth(self.img_field.sizePolicy().hasHeightForWidth())
self.img_field.setSizePolicy(sizePolicy)
self.img_field.setMaximumSize(QtCore.QSize(132, 189))
self.img_field.setText("")
self.img_field.setPixmap(QtGui.QPixmap("images/bell.png"))
self.img_field.setScaledContents(True)
self.img_field.setObjectName("img_field")
self.spine_opt_ho_layout.addWidget(self.img_field)
self.spine_opt_ver_layout = QtWidgets.QVBoxLayout()
self.spine_opt_ver_layout.setObjectName("spine_opt_ver_layout")
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.spine_opt_ver_layout.addItem(spacerItem1)
self.FK_spine_opt = QtWidgets.QRadioButton(self.horizontalLayoutWidget_2)
self.FK_spine_opt.setObjectName("FK_spine_opt")
self.spine_opt_ver_layout.addWidget(self.FK_spine_opt)
self.IK_spine_opt = QtWidgets.QRadioButton(self.horizontalLayoutWidget_2)
self.IK_spine_opt.setObjectName("IK_spine_opt")
self.spine_opt_ver_layout.addWidget(self.IK_spine_opt)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.spine_opt_ver_layout.addItem(spacerItem2)
self.spine_opt_ho_layout.addLayout(self.spine_opt_ver_layout)
self.horizontalLayoutWidget_3 = QtWidgets.QWidget(self.spine_widget)
self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(10, 290, 271, 41))
self.horizontalLayoutWidget_3.setObjectName("horizontalLayoutWidget_3")
self.name_layout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_3)
self.name_layout.setContentsMargins(0, 0, 0, 0)
self.name_layout.setObjectName("name_layout")
self.name_label = QtWidgets.QLabel(self.horizontalLayoutWidget_3)
self.name_label.setObjectName("name_label")
self.name_layout.addWidget(self.name_label)
self.name_field = QtWidgets.QTextEdit(self.horizontalLayoutWidget_3)
self.name_field.setObjectName("name_field")
self.name_layout.addWidget(self.name_field)
self.horizontalLayoutWidget_4 = QtWidgets.QWidget(self.spine_widget)
self.horizontalLayoutWidget_4.setGeometry(QtCore.QRect(10, 330, 271, 51))
self.horizontalLayoutWidget_4.setObjectName("horizontalLayoutWidget_4")
self.button_layout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_4)
self.button_layout.setContentsMargins(0, 0, 0, 0)
self.button_layout.setObjectName("button_layout")
self.run_btn = QtWidgets.QPushButton(self.horizontalLayoutWidget_4)
self.run_btn.setObjectName("run_btn")
self.button_layout.addWidget(self.run_btn)
self.close_btn = QtWidgets.QPushButton(self.horizontalLayoutWidget_4)
self.close_btn.setObjectName("close_btn")
self.button_layout.addWidget(self.close_btn)
self.sample_btn = QtWidgets.QPushButton(self.spine_widget)
self.sample_btn.setGeometry(QtCore.QRect(210, 60, 75, 23))
self.sample_btn.setObjectName("sample_btn")
self.retranslateUi(spine_dialog)
QtCore.QMetaObject.connectSlotsByName(spine_dialog)
def retranslateUi(self, spine_dialog):
spine_dialog.setWindowTitle(QtWidgets.QApplication.translate("spine_dialog", "Dialog", None, -1))
self.spine_label.setText(QtWidgets.QApplication.translate("spine_dialog", "spine", None, -1))
self.FK_spine_opt.setText(QtWidgets.QApplication.translate("spine_dialog", "FK Spine", None, -1))
self.IK_spine_opt.setText(QtWidgets.QApplication.translate("spine_dialog", "IK Spine", None, -1))
self.name_label.setText(QtWidgets.QApplication.translate("spine_dialog", "Name", None, -1))
self.run_btn.setText(QtWidgets.QApplication.translate("spine_dialog", "Run", None, -1))
self.close_btn.setText(QtWidgets.QApplication.translate("spine_dialog", "Close", None, -1))
self.sample_btn.setText(QtWidgets.QApplication.translate("spine_dialog", "call sample", None, -1))
```
#### File: jkim327/rig_class/spine_rig.py
```python
import maya.cmds as cmds
import maya.mel as mel
import logging
logger = logging.getLogger(__name__)
import rig_class.spine_data as sd
class SpineRig(object):
def __init__(self, spine_data):
self.spine_data = sd.SpineData()
def create_sample(self):
'''
Description:
Creates temporary spine joint chain.
Returns:
spine_data updates
'''
logging.info('Before temp_jnt_list Updates :{}'.format(self.spine_data)) #it has correct dic
startTemp = 'spine_0_temp_jnt'
if cmds.objExists(startTemp):
cmds.delete(startTemp)
trsY = 10
self.spine_data.temp_jnt_list = list()
for num in range(self.spine_data.num_jnt):
if num == 0:
new_jnt = cmds.joint(n='spine_{}_temp_jnt'.format(num))
self.spine_data.temp_jnt_list.append(new_jnt)
else:
new_jnt = cmds.joint(n='spine_{}_temp_jnt'.format(num),position = [0, trsY*num, 0])
self.spine_data.temp_jnt_list.append(new_jnt)
logging.info('After temp_jnt_list Updates :{}'.format(self.spine_data))
def create_joint(self):
'''
Description:
Creates final spine joint chain.
Returns:
spine_data updates
a list of final joints.
'''
self.spine_data.final_jnt_list = list()
num = 1
cmds.select(cl=True)
temp = self.spine_data.temp_jnt_list
name = self.spine_data.cha_naming
logging.info('Before final_jnt_list Updates :{}'.format(self.spine_data))
for tempJnt in temp:
transVar = cmds.xform(tempJnt, worldSpace = True, query = True, translation = True)
new_rig_jnt = cmds.joint(n = '{}_spine_{}_jnt'.format(name, num), absolute = True, position = transVar)
self.spine_data.final_jnt_list.append(new_rig_jnt)
num = num + 1
for finalJnt in self.spine_data.final_jnt_list:
cmds.joint(finalJnt, e=True, oj='xyz', secondaryAxisOrient = 'yup', ch=True, zso=True)
#clean the end joint's orientation
endJnt = self.spine_data.final_jnt_list[-1]
cmds.setAttr('{}.jointOrientX'.format(endJnt), 0)
cmds.setAttr('{}.jointOrientY'.format(endJnt), 0)
cmds.setAttr('{}.jointOrientZ'.format(endJnt), 0)
logging.info('After final_jnt_list Updates :{}'.format(self.spine_data))
def create_control(self, target):
'''
Description:
Creates nurbs curve controller and its parent group.
Parameters:
target
Returns:
a list of nurbs curve and its parent group
'''
name = '{}_ctl'.format(target)
ctl_pair = list()
if self.spine_data.fk_rig == True:
ctl = create_circle(name)
elif self.spine_data.ik_rig == True:
ctl = create_box(name)
ctl_grp = create_group(ctl)
# Warning: Cannot parent components or objects in the underworld.
cmds.parent(ctl, ctl_grp)
ctl_pair.append(ctl)
ctl_pair.append(ctl_grp)
return ctl_pair
class FK_rig(SpineRig):
def __init__(self, spine_data):
super(FK_rig, self).__init__(self)
self.spine_data = spine_data
def create_FK_con(self, target):
'''
Description:
Creates FK controllers.
Parameters:
target = single final joint
Returns:
ctl string
'''
pair = self.create_control(target)#return list of a pair (ctl and ctl grp)
ctl, ctl_grp = pair
cmds.matchTransform(ctl_grp, target)
constraints_objs(ctl, target)
self.spine_data.ctl_list.append(ctl)
return ctl
def organize_fk(self):
'''
Description:
Parent fk controllers in order.
Returns:
None
'''
ctl_grp = self.spine_data.ctl_list
for num in range(len(ctl_grp)):
if num != 0:
currentCtl = ctl_grp[num]#Find the current control
currentGrp = cmds.listRelatives(currentCtl, parent=True)#Find the parent group of the current control.
aboveCtl = ctl_grp[num-1]#Find the control before the current one.
cmds.parent(currentGrp, aboveCtl)#Parent current control's parent group to the above control.
def create_FK(self):
'''
Description:
Creates FK spine.
Returns:
spine_data updates
Final joints
FK controllers
'''
temp_jnts = self.spine_data.temp_jnt_list[0]
#If temporary joints does not exists, stop the process.
if not cmds.objExists(temp_jnts):
return logging.error('Temporary joints not exist.')
#create final joints
self.create_joint()
#clear list
self.spine_data.ctl_list = list()
#create controllers
for jnt in self.spine_data.final_jnt_list:
self.create_FK_con(jnt)
#organize hierarchy
self.organize_fk()
logging.info('fk controls update {}'.format(self.spine_data))
class IK_rig(SpineRig):
def __init__(self, spine_data):
super(IK_rig, self).__init__(self)
self.spine_data = spine_data
self.ik_product = list()
self.skin_jnt = list()
self.fk_jnt = list()
self.startJ = None
self.endJ = None
def create_ikHandle(self):
'''
Description:
Creates ik Spline Handle
Returns:
self.ik_product updates
ikHandle, curve object
'''
self.startJ = self.spine_data.final_jnt_list[0]
self.endJ = self.spine_data.final_jnt_list[-1]
nameIkh = '{}_spine_ikh'.format(self.spine_data.cha_naming)
nameCuv = '{}_spine_cuv'.format(self.spine_data.cha_naming)
self.ik_product = cmds.ikHandle(solver='ikSplineSolver',
ccv = True,
n = nameIkh,
parentCurve = True,
rootOnCurve = True,
scv = False,
ns = 4,
sj = self.startJ,
ee = self.endJ)
#rename newly created curve
cmds.rename(self.ik_product[2], nameCuv)
self.ik_product[2] = nameCuv
logging.info('ik product: {}'.format(self.ik_product))
def set_twist(self):
'''
Description:
set ikSplineHandle's twist setting.
Returns:
None
'''
ik_Handle = self.ik_product[0]
cmds.setAttr(ik_Handle +'.dTwistControlEnable', True)
cmds.setAttr(ik_Handle +'.dWorldUpType', 4)
cmds.setAttr(ik_Handle +'.dWorldUpAxis', 0)
cmds.connectAttr(self.skin_jnt[0]+'.worldMatrix[0]', ik_Handle +'.dWorldUpMatrix' )
cmds.connectAttr(self.skin_jnt[-1]+'.worldMatrix[0]', ik_Handle +'.dWorldUpMatrixEnd' )
def manage_skin_jnt(self):
'''
Description:
Creates ik Spline Handle
Returns:
self.ik_product updates
ikHandle, curve object
'''
self.skin_jnt = list()
skin_start_j = create_skin_jnt(self.startJ, 'root')
skin_end_j = create_skin_jnt(self.endJ, 'chest')
self.skin_jnt.append(skin_start_j)
self.skin_jnt.append(skin_end_j)
def skin_jnt_to_curve(self):
'''
Description:
Skin newly created self.skin_jnt to ikhandle curve.
Returns:
None
'''
cmds.skinCluster(self.skin_jnt, self.ik_product[2], mi=3)
def create_fk_chain(self):
'''
Description:
Create waist FK joint.
Returns:
self.fk_jnt updates
'''
all_num = len(self.spine_data.final_jnt_list)
if all_num % 2 == 0:
mid_num = all_num/2-1
else:
mid_num = all_num/2
fk_goal_jnts = [self.spine_data.final_jnt_list[0], self.spine_data.final_jnt_list[mid_num], self.spine_data.final_jnt_list[-1]]
fk_part_name = ['root', 'waist', 'chest']
cmds.select(cl=True)
for num in range(len(fk_goal_jnts)):
jnt = fk_goal_jnts[num]
part = fk_part_name[num]
new_jnt = cmds.joint(n='{}_fk_jnt'.format(part))
cmds.matchTransform(new_jnt, jnt)
self.fk_jnt.append(new_jnt)
def create_waist(self):
'''
Description:
Create waist FK setting.
Returns:
None
'''
waist_jnt = self.fk_jnt[1]
ctl_name = '{}_ctl'.format(waist_jnt)
ctl = create_circle(ctl_name)
ctl_grp = create_group(ctl)
cmds.parent(ctl, ctl_grp)
cmds.matchTransform(ctl_grp, waist_jnt, pos=True, rot=True)
self.spine_data.ctl_list.append(ctl)
constraints_objs(ctl, waist_jnt)
def create_IK_con(self, target):
'''
Description:
Creates IK controllers.
Parameters:
target = single final joint
Returns:
ctl string
'''
pair = self.create_control(target)#return list of a pair (ctl and ctl grp)
ctl, ctl_grp = pair
cmds.matchTransform(ctl_grp, target)
# find target joint's parent group
target_parent = cmds.listRelatives(target, parent=True)
constraints_objs(ctl, target)
self.spine_data.ctl_list.append(ctl)
return ctl
def organize_ik(self):
'''
Description:
Constraints IK controllers in order.
Returns:
None
'''
waist_con = self.spine_data.ctl_list[-1]
chest_con = self.spine_data.ctl_list[1]
root_con = self.spine_data.ctl_list[0]
fk_end_jnt = self.fk_jnt[0]
chest_grp = cmds.listRelatives(chest_con, parent=True)
waist_grp = cmds.listRelatives(waist_con, parent=True)
constraints_objs(waist_con, chest_grp)
constraints_objs(root_con, waist_grp)
constraints_objs(root_con, fk_end_jnt)
def create_IK(self):
'''
Description:
Create IK Spine objects.
Returns:
None
'''
self.create_joint()
self.create_ikHandle()
self.manage_skin_jnt()
self.skin_jnt_to_curve()
self.set_twist()
for jnt in self.skin_jnt:
self.create_IK_con(jnt)
self.create_fk_chain()
self.create_waist()
self.organize_ik()
# Outside Class Functions
def create_group(target):
'''
Description:
Creates parent group of the target object.
Parameters:
target
Returns:
newly created parent group
'''
name = '{}_grp'.format(target)
group_product = cmds.group(n = name, em=True)
return group_product
def create_circle(name):
'''
Description:
Creates nurbs circle with name.
Parameters:
name string
Returns:
newly created nurbs circle's name
'''
circle = cmds.circle(n = name, r=5, nr=(1,0,0))
return circle[0]
def create_box(name):
'''
Description:
Creates nurbs cube with name.
Parameters:
name string
Returns:
newly created nurbs cube's name
'''
box = cmds.curve(n = name, d=1, p=[(2.5, 2.5, 2.5), (2.5, 2.5, -2.5), (-2.5, 2.5, -2.5), (-2.5, -2.5, -2.5), (2.5, -2.5, -2.5), (2.5, 2.5, -2.5), (-2.5, 2.5, -2.5), (-2.5, 2.5, 2.5), (2.5, 2.5, 2.5), (2.5, -2.5, 2.5), (2.5, -2.5, -2.5), (-2.5, -2.5, -2.5), (-2.5, -2.5, 2.5), (2.5, -2.5, 2.5), (-2.5, -2.5, 2.5), (-2.5, 2.5, 2.5)], k=[0,1,2,3,4,2.5,7,8,9,10,11,12,13,14,12.5,16])
return box
def constraints_objs(ctl, target):
cmds.parentConstraint(ctl, target, mo=True)
def create_skin_jnt(target, name):#receives self.startJ, self.endJ
'''
Description:
Creates extra joint chain for curve skinning.
Parameters:
target = target joint
name
Returns:
newly created joint's name
'''
cmds.select(cl=True)
new_joint = cmds.joint(n='{}_skin_jnt'.format(name))
new_joint_grp = create_group(new_joint)
cmds.parent(new_joint, new_joint_grp)
cmds.matchTransform(new_joint_grp, target, pos=True, rot=True)
return new_joint
```
#### File: jkim327/rig_class/spine_UI.py
```python
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from PySide2.QtCore import *
from shiboken2 import wrapInstance
#import logger
import logging
logger = logging.getLogger(__name__)
#import maya modules
import os.path
import maya.OpenMayaUI as OpenMayaUI
import maya.cmds as cmds
#import modules
import rig_class.spine_rig as st
import rig_class.spine_data as sd
#get file directory
base_dir = os.path.dirname(os.path.abspath(st.__file__))
fk_path = '{}\\fk_img.png'.format(base_dir)
ik_path = '{}\\ik_img.png'.format(base_dir)
def _getMayaWindow():
ptr = OpenMayaUI.MQtUtil.mainWindow ()
if ptr is not None:
return wrapInstance (long (ptr), QMainWindow)
class spine_window(QDialog, object):
def __init__(self, spine_data = None):
super(spine_window, self).__init__(parent=_getMayaWindow())
winName = 'spine_tool_window'
self.spine_data = sd.SpineData()
self.spine_object = st.SpineRig(self.spine_data)
# If the UI already exists, delete the old one.
if cmds.window (winName, exists=True):
cmds.deleteUI (winName, window=True)
# Set the dialog object name, window title and size
self.setObjectName(winName)
self.setWindowTitle('spine_test')
self.setMinimumSize(313, 406)
self.setFixedSize(QSize(313, 406))
self.customUI()
self.show()
def customUI(self):
self.spine_widget = QtWidgets.QWidget(self)
self.spine_widget.setGeometry(QtCore.QRect(10, 10, 291, 411))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spine_widget.sizePolicy().hasHeightForWidth())
self.spine_widget.setSizePolicy(sizePolicy)
self.spine_widget.setMinimumSize(QtCore.QSize(291, 411))
self.spine_widget.setMaximumSize(QtCore.QSize(291, 411))
self.spine_widget.setObjectName("spine_widget")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.spine_widget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 271, 41))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.spine_num_layout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.spine_num_layout.setContentsMargins(0, 0, 0, 0)
self.spine_num_layout.setObjectName("spine_num_layout")
self.spine_label = QtWidgets.QLabel(self.horizontalLayoutWidget)
self.spine_label.setObjectName("spine_label")
self.spine_num_layout.addWidget(self.spine_label)
self.spine_int = QtWidgets.QSpinBox(self.horizontalLayoutWidget)
self.spine_int.setMinimum(3)
self.spine_int.setMaximum(10)
self.spine_int.setObjectName("spine_int")
self.spine_num_layout.addWidget(self.spine_int)
spacerItem = QtWidgets.QSpacerItem(15, 0, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.spine_num_layout.addItem(spacerItem)
self.spine_int_slider = QtWidgets.QSlider(self.horizontalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spine_int_slider.sizePolicy().hasHeightForWidth())
self.spine_int_slider.setSizePolicy(sizePolicy)
self.spine_int_slider.setMinimum(3)
self.spine_int_slider.setMaximum(10)
self.spine_int_slider.setOrientation(QtCore.Qt.Horizontal)
self.spine_int_slider.setObjectName("spine_int_slider")
self.spine_num_layout.addWidget(self.spine_int_slider)
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.spine_widget)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(10, 90, 271, 191))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.spine_opt_ho_layout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.spine_opt_ho_layout.setContentsMargins(0, 0, 0, 0)
self.spine_opt_ho_layout.setObjectName("spine_opt_ho_layout")
self.img_field = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(2)
sizePolicy.setHeightForWidth(self.img_field.sizePolicy().hasHeightForWidth())
self.img_field.setSizePolicy(sizePolicy)
self.img_field.setMaximumSize(QtCore.QSize(132, 189))
self.img_field.setText("")
self.img_field.setPixmap(QtGui.QPixmap(fk_path))
self.img_field.setScaledContents(True)
self.img_field.setObjectName("img_field")
self.spine_opt_ho_layout.addWidget(self.img_field)
self.spine_opt_ver_layout = QtWidgets.QVBoxLayout()
self.spine_opt_ver_layout.setObjectName("spine_opt_ver_layout")
spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.spine_opt_ver_layout.addItem(spacerItem1)
self.FK_spine_opt = QtWidgets.QRadioButton(self.horizontalLayoutWidget_2)
self.FK_spine_opt.setObjectName("FK_spine_opt")
self.FK_spine_opt.setChecked(True)
#self.spine_data.fk_rig
self.spine_opt_ver_layout.addWidget(self.FK_spine_opt)
self.IK_spine_opt = QtWidgets.QRadioButton(self.horizontalLayoutWidget_2)
self.IK_spine_opt.setObjectName("IK_spine_opt")
#self.spine_data.ik_rig
self.spine_opt_ver_layout.addWidget(self.IK_spine_opt)
spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.spine_opt_ver_layout.addItem(spacerItem2)
self.spine_opt_ho_layout.addLayout(self.spine_opt_ver_layout)
self.horizontalLayoutWidget_3 = QtWidgets.QWidget(self.spine_widget)
self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(10, 290, 271, 31))
self.horizontalLayoutWidget_3.setObjectName("horizontalLayoutWidget_3")
self.name_layout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_3)
self.name_layout.setContentsMargins(0, 0, 0, 0)
self.name_layout.setObjectName("name_layout")
self.name_label = QtWidgets.QLabel(self.horizontalLayoutWidget_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.name_label.sizePolicy().hasHeightForWidth())
self.name_label.setSizePolicy(sizePolicy)
self.name_label.setMaximumSize(QtCore.QSize(34, 29))
self.name_label.setObjectName("name_label")
self.name_layout.addWidget(self.name_label)
self.name_field = QtWidgets.QTextEdit(self.horizontalLayoutWidget_3)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.name_field.sizePolicy().hasHeightForWidth())
self.name_field.setSizePolicy(sizePolicy)
self.name_field.setMaximumSize(QtCore.QSize(229, 29))
self.name_field.setObjectName("name_field")
self.name_layout.addWidget(self.name_field)
self.horizontalLayoutWidget_4 = QtWidgets.QWidget(self.spine_widget)
self.horizontalLayoutWidget_4.setGeometry(QtCore.QRect(10, 330, 271, 51))
self.horizontalLayoutWidget_4.setObjectName("horizontalLayoutWidget_4")
self.button_layout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_4)
self.button_layout.setContentsMargins(0, 0, 0, 0)
self.button_layout.setObjectName("button_layout")
self.run_btn = QtWidgets.QPushButton(self.horizontalLayoutWidget_4)
self.run_btn.setObjectName("run_btn")
self.button_layout.addWidget(self.run_btn)
self.close_btn = QtWidgets.QPushButton(self.horizontalLayoutWidget_4)
self.close_btn.setObjectName("close_btn")
self.button_layout.addWidget(self.close_btn)
self.sample_btn = QtWidgets.QPushButton(self.spine_widget)
self.sample_btn.setGeometry(QtCore.QRect(210, 60, 75, 23))
self.sample_btn.setObjectName("sample_btn")
self.retranslateUi(self)
QtCore.QMetaObject.connectSlotsByName(self)
#----------------signals-------------------
#change img thumbnails when radio button is clicked.
self.FK_spine_opt.clicked.connect(self.show_fk)
self.IK_spine_opt.clicked.connect(self.show_ik)
#spin box and slider are connected
self.spine_int.valueChanged.connect(self.slider_change)
self.spine_int_slider.valueChanged.connect(self.spine_change)
#the int value affects function
self.sample_btn.clicked.connect(self.call_sample)
#create spine rig
self.run_btn.clicked.connect(self.create_spine)
#close the window
self.close_btn.clicked.connect(self.close_window)
def retranslateUi(self, spine_dialog):
spine_dialog.setWindowTitle(QtWidgets.QApplication.translate("spine_dialog", "Spine", None, -1))
self.spine_label.setText(QtWidgets.QApplication.translate("spine_dialog", "spine", None, -1))
self.FK_spine_opt.setText(QtWidgets.QApplication.translate("spine_dialog", "FK Spine", None, -1))
self.IK_spine_opt.setText(QtWidgets.QApplication.translate("spine_dialog", "IK Spine", None, -1))
self.name_label.setText(QtWidgets.QApplication.translate("spine_dialog", "Name", None, -1))
self.run_btn.setText(QtWidgets.QApplication.translate("spine_dialog", "Run", None, -1))
self.close_btn.setText(QtWidgets.QApplication.translate("spine_dialog", "Close", None, -1))
self.sample_btn.setText(QtWidgets.QApplication.translate("spine_dialog", "call sample", None, -1))
#----------------slots-------------------
def show_fk(self):
self.img_field.setPixmap(QtGui.QPixmap(fk_path))
def show_ik(self):
self.img_field.setPixmap(QtGui.QPixmap(ik_path))
def slider_change(self):
size = self.spine_int.value()
self.spine_int_slider.setValue(size)
def spine_change(self):
size = self.spine_int_slider.value()
self.spine_int.setValue(size)
def call_sample(self):
self.spine_object.spine_data.num_jnt = self.spine_int.value()#update data object
self.spine_object.create_sample()
def close_window(self):
self.close
def create_spine(self):
self.spine_object.spine_data.cha_naming = self.name_field.toPlainText()
self.spine_object.spine_data.fk_rig = self.FK_spine_opt.isChecked()
self.spine_object.spine_data.ik_rig = self.IK_spine_opt.isChecked()
if not self.spine_object.spine_data.temp_jnt_list:
return logging.error('Nothing exists. Please set temporary joints first.')
if not self.spine_object.spine_data.cha_naming:
return logging.error('Name is not specified.')
if self.FK_spine_opt.isChecked():
spineJnt = st.FK_rig(self.spine_object.spine_data)
spineJnt.create_FK()
logging.info('FK Creation {}'.format(self.spine_object.spine_data))
elif self.IK_spine_opt.isChecked():
spineJnt = st.IK_rig(self.spine_object.spine_data)
spineJnt.create_IK()
logging.info('IK Creation {}'.format(self.spine_object.spine_data))
def initUI():
spine_window()
initUI()
``` |
{
"source": "Jkim516/Bber",
"score": 3
} |
#### File: Bber/Flask/app.py
```python
from flask import Flask, render_template, request
from time import strftime, time
from datetime import datetime
import pytz
import requests
import pandas as pd
from function import make_map
import csv
import json
from waitress import serve
app = Flask(__name__, static_url_path="/static")
@app.route("/")
def index():
"""Return the main page."""
with open('station_name.csv') as csvfile:
reader = csv.DictReader(csvfile)
# for row in reader:
# print(row)
tz = pytz.timezone('US/Pacific')
LA_now = datetime.now(tz)
time_str = LA_now.strftime("%m/%d/%Y %H:%M")
print(time_str)
# stations = ["3047", "3005", "3023"]
return render_template("index.html", time_info=time_str, reader=reader)
# @app.route("/")
# def index():
# """Return the main page."""
# time_str = strftime("%m/%d/%Y %H:%M")
# print(time_str)
# restaurants = ["Din Tai Fung", "Rocco's", "Chipotle"]
# return render_template("index.html", time_info=time_str, restaurants=restaurants)
# @app.route('/')
# def index():
# folium_map = make_map()
# return folium_map._repr_html_()
# if __name__ == '__main__':
# app.run(debug=True)
@app.route("/get_results", methods=["POST"])
def get_results():
with open('station_name.json') as json_file:
stations = json.load(json_file)
data = request.form
print(data)
station = data["station"]
# stations = {}
# for row in reader:
# kiosk_id= row['kioskId']
# lat = row['latitude']
# lng = row['longitude']
# stations[kiosk_id] = (lat, lng)
lat, lng = stations[station]
# live_station_df = get_live_station("https://bikeshare.metro.net/stations/json/")
# print(live_station_df)
# # answer = should_make_transaction(user_id)
# return render_template("results.html", station=station, lat=lat, lng=lng)
folium_map = make_map(stations[station])
return folium_map._repr_html_()
if __name__ == "__main__":
serve(app, host='0.0.0.0', port=5000)
# def should_make_transaction(user_id):
# return False
# def get_live_station(url):
# response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0'})
# stations = response.json()
# output = []
# for station in stations['features']:
# dict_keys = ['kioskId', 'bikesAvailable', 'docksAvailable', 'name', 'latitude', 'longitude']
# data = {k : station['properties'][k] for k in dict_keys}
# data['time'] = time()
# output.append(data)
# return pd.DataFrame(output)
``` |
{
"source": "JKimani77/grupe-project",
"score": 3
} |
#### File: app/auth/views.py
```python
from flask import render_template,url_for, flash,redirect,request
from . import auth
from flask_login import login_user, logout_user ,login_required
from .forms import RegForm,LoginForm
from ..models import User
from ..email import mail_message
@auth.route('/login', methods = ['GET','POST'])
def login():
form = LoginForm(csrf_enable=False)
if form.validate_on_submit():
user = User.query.filter_by(name = form.name.data).first()
if user != None and user.verify_password(form.password.data):
login_user(user,form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid')
return render_template('auth/login.html', form = form)
@auth.route('/signup', methods = ["GET","POST"])
def signup():
form = RegForm(csrf_enable=False)
if form.validate_on_submit():
user = User(email = form.email.data, name = form.name.data, password = form.password.data)
user.save_user()
mail_message("Welcome to our Application","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
return render_template('auth/signup.html', reg_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('main.index'))
```
#### File: app/main/views.py
```python
from flask import render_template,redirect,url_for,abort,request, flash
from flask_login import login_required,current_user
from . import main
from .. import db,photos
from ..request import search_user
from ..models import User,Post,Review
from .forms import PostForm, ReviewForm, SearchForm
@main.route('/')
def index():
posts = Post.query.all()
form = SearchForm()
if form.validate_on_submit():
return redirect(url_for('main.search'))
#posts = get_post_by_id()
return render_template('index.html', posts= posts)
@main.route('/search', methods = ['POST','GET'])
@login_required
def search():
'''
function to return github users search results
'''
searched = search_user(username,page)
return render_template('search.html' searched = searched)
@main.route('/create_new', methods = ['POST','GET'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
title = form.title.data
post_info = form.post_info.data
#image = form.image.data
user_id = current_user
new_post_object = Post(post_info=post_info,user_id=current_user._get_current_object().id,image=image,title=title)
new_post_object.save_post()
return redirect(url_for('main.index'))
import pdb; pdb.set_trace()
return render_template('new_post.html', form = form)
@main.route('/create_review', methods = ['POST', 'GET'])
@login_required
def review(post_id):
form = ReviewForm()
post = Post.query.get(post_id)
all_reviews = Review.query.filter_by(post_id = post_id).all()
if form.validate_on_submit():
review = form.review.data
comments= form.comments.data
user_id = current_user._get_current_object().id
post_id = form.post_id.data
new_review = Review(comments = comments,user_id = user_id,post_id = post_id ,review = review)
new_review.save_review()
return redirect(url_for('main.index', post_id = post_id))
return render_template('review.html', form =form, post = post,all_reviews=all_reviews)
``` |
{
"source": "JKimani77/News",
"score": 3
} |
#### File: News/tests/newsmodel_test.py
```python
import unittest
from app.models import Newssourcemodel
#id,name,description,language,url,category,country
class Newssource(unittest.TestCase):
def setUp(self):
self.news = Newssourcemodel(1,'bloomberg','news source that features latest news', 'www.news.news','en','business','usa')
def test_instance(self):
self.assertTrue(isinstance(self.news,Newssourcemodel))
``` |
{
"source": "JKimani77/pics",
"score": 3
} |
#### File: pics/apppics/tests.py
```python
from django.test import TestCase
# Create your tests here.
from django.test import TestCase
from .models import Image,Location, Category
class ImageTestClass(TestCase):
'''
Test case for model image
'''
def setUp(self):
self.any_category = Category(name = 'FOOD')
self.any_location = Location(image_location = 'Bahrain')
self.any_image = Image(image_name = 'TOMATOES', image_description = 'a tomato tomattoed', image = '/path.image.png', category =self.any_category)
def test_search_by_category(self):
self.any_category.save_tag()
self.any_image.save_image()
images = self.any_image.search_by_tag('FOOD')
self.assertTrue(len(images)>0)
#
class LocationTestClass(TestCase):
'''
test case for location model
'''
def setUp(self):
self.any_location = Location(image_location = 'Poland')
def test_save_location(self):
self.any_location.save_location()
any_locations = Location.objects.all()
self.assertTrue(len(any_locations)>0)
#
``` |
{
"source": "JKimani77/wun-minute-pitch-app",
"score": 3
} |
#### File: JKimani77/wun-minute-pitch-app/manage.py
```python
from app import make_app,db
from flask_script import Manager,Server
from app.models import User,Pitch
from flask_migrate import Migrate, MigrateCommand
#Creating app instance
app = make_app('production')
manager = Manager(app)
manager.add_command('server',Server)
#init migrate class and pass in app and sqlalchemy instance
migrate = Migrate(app,db)
manager.add_command('db',MigrateCommand)
@manager.command #run the test files
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.shell #create shell context and return app,db instance and user class
def make_shell_context():
'''
flask-script shell for testing
features in app and debugging
'''
return dict(app = app, db = db, User = User, Pitch = Pitch) ##add models
if __name__ == '__main__':
manager.run()
``` |
{
"source": "jkimanzi/daraja",
"score": 3
} |
#### File: jkimanzi/daraja/utils.py
```python
import requests
from requests.auth import HTTPBasicAuth
import base64
from datetime import datetime
import keys
def get_timestamp():
time_now = datetime.now()
time_stamp = time_now.strftime("%Y%m%d%H%M%S")
return time_stamp
def generate_token():
consumer_key = keys.consumer_key
consumer_secret = keys.consumer_secret
api_URL = "https://sandbox.safaricom.co.ke/oauth/v1/generate?grant_type=client_credentials"
response = requests.get(api_URL, auth=HTTPBasicAuth(consumer_key, consumer_secret))
json_response = response.json()
access_token = json_response['access_token']
return access_token
def generate_passwd(time_stamp):
time_now = datetime.now()
time_stamp = time_now.strftime("%Y%m%d%H%M%S")
#Generate password by base64 encoding BusinessShortcode, Passkey and Timestamp.
data_to_encode = keys.business_code + keys.passkey + time_stamp
encoded_string = base64.b64encode(data_to_encode.encode())
decoded_password = encoded_string.decode('utf-8')
return decoded_password
``` |
{
"source": "jkimblad/jabberjay",
"score": 3
} |
#### File: jkimblad/jabberjay/brush_stroke.py
```python
import numpy as np
class BrushStroke:
def __init__(self, color, pos, size, rot):
self.color = color
self.pos = pos
self.size = size
self.rot = rot
def __str__(self):
temp = "color: " + str(self.color) + "\n"
temp += "pos_x: " + str(self.pos[0]) + "\n"
temp += "pos_y: " + str(self.pos[1]) + "\n"
temp += "size_x: " + str(self.size[0]) + "\n"
temp += "size_y: " + str(self.size[1]) + "\n"
temp += "rot: " + str(self.rot) + "\n\n"
return temp
def create_random_brushstroke(width, height, brush_size):
color = np.random.rand(1)[0]
pos = [
np.random.randint(width - brush_size[0], size=1)[0],
np.random.randint(height - brush_size[1], size=1)[0]
]
rot = np.random.randint(-180, 180, size=1)[0]
b_size = (np.random.randint(1, brush_size[0], size=1)[0], np.random.randint(1, brush_size[1], size=1)[0])
return BrushStroke(color, pos, b_size, rot)
```
#### File: jkimblad/jabberjay/main.py
```python
import cv2
import numpy as np
import random
from population import Population
# from brush_stroke import BrushStroke
# from stroke_layer import StrokeLayer
DEBUG = 1
def read_brush(size):
img = cv2.imread("./brushes/1.jpg", cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, size, interpolation=cv2.INTER_CUBIC)
return img
def show_painting(window_name, img):
# Normalize from 0-255 to 0-1 which openCV likes =)
img = img / 255.0
cv2.imshow(window_name, img)
cv2.waitKey(1)
def paint(canvas, brush_img, brushstroke):
pos = brushstroke.pos
#resize the brush
brush_img = cv2.resize(brush_img, brushstroke.size, interpolation = cv2.INTER_CUBIC)
if pos[0] < 0:
brush_img = brush_img[0:brush_img.shape[0] + pos[0], :]
pos[0] = 0
if pos[1] < 0:
brush_img = brush_img[:, 0:brush_img.shape[1] + pos[1]]
pos[1] = 0
roi = canvas[pos[0]:pos[0] + brush_img.shape[0],
pos[1]:pos[1] + brush_img.shape[1]]
# Crop brush_img to the same size of roi, this occurs if pos is outside of canvas
brush_img = brush_img[:roi.shape[0], :roi.shape[1]]
# rotate, credit to anopara for this code. Not sure how it works exactly
rows, cols = brush_img.shape
M = cv2.getRotationMatrix2D( (cols/2, rows/2), brushstroke.rot, 1)
brush_img = cv2.warpAffine(brush_img, M, (cols, rows))
myClr = np.copy(brush_img)
myClr[:, :] = brushstroke.color * 255
alpha = np.ceil(brush_img / 255.0)
brush_img = cv2.multiply(alpha, myClr.astype(float))
roi = cv2.multiply((1 - alpha), roi)
roi = cv2.add(roi, brush_img)
roi = np.clip(roi, 0.0, 255.0)
canvas[pos[0]:pos[0] + brush_img.shape[0], pos[1] :pos[1] + brush_img.shape[1]] = roi.astype(np.uint8)
return canvas
def main():
np.random.seed(500) # Set seed for easier debugging
width = 500
height = 500
num_brushstrokes = 4
kill_rate = 0.5
mutation_rate = 0.1
# load target image
target = cv2.imread("./photos/mona.jpg", cv2.IMREAD_GRAYSCALE)
target = cv2.resize(target, (width, height), interpolation=cv2.INTER_CUBIC)
# create painting
canvas = np.zeros([width, height])
# load brush
brush_max_size = (80, 50)
brush_img = read_brush(brush_max_size)
# Create and populate population
population = Population(20, num_brushstrokes, width, height, brush_max_size)
# Evolve unto next generation
next_picture = False
# while True:
num_generations = 10000
num_evolves = 3
window_name = '<NAME>'
cv2.namedWindow(window_name)
cv2.resizeWindow(window_name, 500, 500)
cv2.moveWindow(window_name, 600, 100)
cv2.namedWindow("target")
cv2.resizeWindow("target", 500, 500)
cv2.moveWindow("target", 100, 100)
if (DEBUG):
cv2.namedWindow("debug")
cv2.resizeWindow("debug", 500, 500)
cv2.moveWindow("debug", 1100, 100)
show_painting("target", target)
for i in range(num_generations):
for j in range(num_evolves):
population.evolve(
mutation_rate,
kill_rate,
canvas,
brush_img,
target,
paint)
# Chose top-scoring stroke_layer and add it to canvas
for stroke in population.stroke_layers[0].brush_strokes:
canvas = paint(canvas, brush_img, stroke)
debug_canvas = np.array([0])
# Draw each StrokeLayer in the population in a new window after num_generations
if (DEBUG):
debug_canvas = np.zeros([width, height])
for stroke_layer in population.stroke_layers:
for stroke in stroke_layer.brush_strokes:
debug_canvas = paint(debug_canvas, brush_img, stroke)
if i % 1 == 0:
if(DEBUG):
print("0:", population.stroke_layers[0])
print("1:", population.stroke_layers[1])
print("2:", population.stroke_layers[2])
show_painting("debug", debug_canvas)
show_painting(window_name, canvas)
# Save image
cv2.imwrite("./photos/painted.png", canvas)
if __name__ == '__main__':
main()
```
#### File: jkimblad/jabberjay/population.py
```python
import numpy as np
import sys
from brush_stroke import BrushStroke, create_random_brushstroke
from stroke_layer import StrokeLayer
from enum import Enum
class CrossOverMethods(Enum):
RANDOM = 1
AVERAGE = 2
class Population:
def __init__(self, size, num_brushstrokes, width, height, brush_max_size):
self.stroke_layers = []
self.size = size
self.crossover_method = CrossOverMethods.RANDOM # TODO: pass this in parameter
# TODO pass this as parameters in evolve instead of storing in class.. maybe
self.width = width
self.height = height
self.brush_size = brush_max_size
# Populate the population
for i in range(size):
sl = create_random_strokelayer(
num_brushstrokes, width, height, brush_max_size)
self.__populate(sl)
# Evolve into next generation
# TODO: keep brush_img (and maybe target) out of population
def evolve(self, mutation_rate, kill_rate, canvas, brush_img, target, paint):
self.__score_strokelayers(canvas, target, brush_img, paint)
# Selection phase
# TODO: check if we should do Tournament or Roulette instead of Rank
self.__rank(kill_rate)
# Add offspring
i = 0
while len(self.stroke_layers) < self.size:
offspring = self.__crossover(
self.stroke_layers[i],
self.stroke_layers[i + 1]
)
# Check for mutation
rand = np.random.rand(1)[0]
if rand <= mutation_rate:
offspring.mutate(canvas.shape)
self.__populate(offspring)
i += 1
def __populate(self, ls):
self.stroke_layers.append(ls)
def __score_strokelayers(self, canvas, target, brush_img, paint):
max_score = 255 * target.shape[0] * target.shape[1]
diff = np.subtract(target, canvas)
diff = np.abs(diff)
diff = np.sum(diff)
canvas_score = max_score - diff
for stroke_layer in self.stroke_layers:
tmp_canvas = np.copy(canvas)
# apply stroke_layer
for brush_stroke in stroke_layer.brush_strokes:
tmp_canvas = paint(tmp_canvas, brush_img, brush_stroke)
# check diff from target
diff = np.subtract(target, tmp_canvas)
diff = np.abs(diff)
diff = np.sum(diff)
stroke_layer.score = max_score - diff
stroke_layer.dscore = stroke_layer.score - canvas_score
def get_score(ls):
return ls.score
self.stroke_layers.sort(key=get_score, reverse=True)
def __crossover(self, strokelayer_1, strokelayer_2):
# Combine bushstrokes randomly and make children with 5 strokes each
brush_strokes_1 = strokelayer_1.brush_strokes
brush_strokes_2 = strokelayer_2.brush_strokes
brush_stroke_offspring = []
# "I suspect that this method is not so good, easily get stuck in local minima" - JH
if self.crossover_method == CrossOverMethods.AVERAGE:
for i in range(len(brush_strokes_1)):
# Take average all from first and second
new_color = (brush_strokes_1[i].color + brush_strokes_2[i].color) / 2
new_x_pos = (brush_strokes_1[i].pos[0] + brush_strokes_2[i].pos[0]) / 2
new_y_pos = (brush_strokes_1[i].pos[1] + brush_strokes_2[i].pos[1]) / 2
# new_size = (brush_strokes_1[i].size + brush_strokes_2[i].size) / 2
new_size = ((brush_strokes_1[i].size[0] + brush_strokes_2[i].size[0]) / 2, (brush_strokes_1[i].size[1] + brush_strokes_2[i].size[1]) / 2)
new_rot = (brush_strokes_1[i].rot + brush_strokes_2[i].rot) / 2
brush_stroke_offspring.append(BrushStroke(new_color, [int(round(new_x_pos)), int(round(new_y_pos))], new_size, new_rot))
elif self.crossover_method == CrossOverMethods.RANDOM:
for i in range(len(brush_strokes_1)):
brush_stroke_offspring.append(create_random_brushstroke(self.width, self.height, self.brush_size))
else:
sys.exit("Invalid crossover_method, expecting average|random")
return StrokeLayer(brush_stroke_offspring)
# Selection methods
def __rank(self, kill_rate):
pop_size = len(self.stroke_layers)
# Check that the kill_rate will leave at least 2 pop
new_pop_size = int(pop_size * (1 - kill_rate))
if new_pop_size <= 2:
raise Exception("Kill Ratio is too agressive")
self.stroke_layers = self.stroke_layers[:new_pop_size]
# TODO: refactor into population
def create_random_strokelayer(num_brushstrokes, width, height, brush_size):
brushstrokes = []
for i in range(num_brushstrokes):
brushstrokes.append(create_random_brushstroke(width, height, brush_size))
return StrokeLayer(brushstrokes)
``` |
{
"source": "jkimbo/phishtray",
"score": 2
} |
#### File: phishtray/exercise/views.py
```python
from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from exercise.models import Exercise
from participant.models import Participant, ParticipantProfile
from utils import helpers
from rest_framework import serializers, viewsets
def index(request, link):
e_id = helpers.hasher.decode(link)
exercise = get_object_or_404(Exercise, pk=e_id[0])
context = {'exercise': exercise}
return render(request, 'index.html', context)
def profile(request, link):
e_id = helpers.hasher.decode(link)
exercise = get_object_or_404(Exercise, pk=e_id[0])
profile_keys = exercise.exercisekey_set.all()
if request.method == 'POST':
# try:
participant = Participant(
exercise=exercise,
)
participant.save()
for key in profile_keys:
ParticipantProfile(
participant=participant,
key=key,
value=request.POST[key.key]
).save()
p_id = participant.id
return HttpResponseRedirect(reverse('exercise:start', args=(link, p_id)))
else:
context = {'exercise': exercise, 'exercise_keys': profile_keys}
return render(request, 'profile.html', context)
def start(request, link, p_id):
e_id = helpers.hasher.decode(link)
exercise = get_object_or_404(Exercise, pk=e_id[0])
context = {'exercise': exercise, 'exercise_keys': exercise.exercisekey_set.all()}
return render(request, 'start.html', context)
class ExerciseSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Exercise
fields = ('id', 'title', 'description', 'introduction', 'afterword', 'length_minutes',
'created_date', 'modified_date')
class ExerciseViewSet(viewsets.ModelViewSet):
queryset = Exercise.objects.all()
serializer_class = ExerciseSerializer
```
#### File: phishtray/participant/models.py
```python
from django.db import models
from exercise.models import Exercise, ExerciseKey
import django.utils
import json
STARTED_EXPERIMENT = 0
COMPLETED_EXPERIMENT = 1
OTHER = 2
EVENT_TYPES = (
(STARTED_EXPERIMENT, 'started'),
(COMPLETED_EXPERIMENT, 'completed'),
(OTHER, 'opened'),
)
class Participant(models.Model):
def __str__(self):
return "Participant: {} For: {}".format(self.id, self.exercise)
id = models.AutoField(primary_key=True)
exercise = models.ForeignKey(Exercise, on_delete=models.CASCADE)
created_date = models.DateTimeField(auto_now_add=True, blank=True)
modified_date = models.DateTimeField(auto_now=True, blank=True)
class ParticipantProfile(models.Model):
def __str__(self):
return "{} {}:{}".format(self.participant, self.key, self.value)
id = models.AutoField(primary_key=True)
participant = models.ForeignKey(Participant, on_delete=models.CASCADE)
key = models.ForeignKey(ExerciseKey, on_delete=models.CASCADE)
value = models.CharField(max_length=180, blank=True, null=True)
created_date = models.DateTimeField(auto_now_add=True, blank=True)
modified_date = models.DateTimeField(auto_now=True, blank=True)
class ParticipantAction(models.Model):
def __str__(self):
return self.id
id = models.AutoField(primary_key=True)
participant = models.ForeignKey(Participant, on_delete=models.CASCADE)
experiment = models.ForeignKey(Exercise, on_delete=models.CASCADE)
type = models.IntegerField(choices=EVENT_TYPES)
created_date = models.DateTimeField(auto_now_add=True, blank=True)
modified_date = models.DateTimeField(auto_now=True, blank=True)
``` |
{
"source": "JK-Incorporated/EYN-DOS",
"score": 3
} |
#### File: JK-Incorporated/EYN-DOS/A.py
```python
import os
from os import listdir
from os.path import isfile, join
dir_path = os.path.dirname(os.path.realpath(__file__))
filesys = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
def get_dir_size(path=dir_path):
total = 0
with os.scandir(dir_path) as it:
for entry in it:
if entry.is_file():
total += entry.stat().st_size
elif entry.is_dir():
total += get_dir_size(entry.path)
return total/1024
size=0
for path, dirs, files in os.walk(dir_path):
for f in files:
fp = os.path.join(path, f)
size += os.path.getsize(fp)
while True:
command_lineA=input("A:\> ")
if command_lineA==("B:"):
print("")
os.system("python3 B.py")
print("")
if command_lineA==("C:"):
print("")
os.system("python3 C.py")
print("")
if command_lineA==("D:"):
print("")
os.system("python3 D.py")
print("")
if command_lineA==("E:"):
print("")
os.system("python3 E.py")
print("")
if command_lineA==("dir"):
print("")
print("ERROR EYN_A1")
print("")
if command_lineA==("listdir"):
print("")
print("ERROR EYN_A1")
print("")
if command_lineA==("end"):
print("")
exit()
```
#### File: JK-Incorporated/EYN-DOS/C.py
```python
import os
from os import listdir
from os.path import isfile, join
# File system
dir_path = os.path.dirname(os.path.realpath(__file__))
filesys = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
def get_dir_size(path=dir_path):
total = 0
with os.scandir(dir_path) as it:
for entry in it:
if entry.is_file():
total += entry.stat().st_size
elif entry.is_dir():
total += get_dir_size(entry.path)
return total/1024
size=0
for path, dirs, files in os.walk(dir_path):
for f in files:
fp = os.path.join(path, f)
size += os.path.getsize(fp)
while True:
command_line=input("C:\> ")
if command_line=="help":
print("")
print("help = Prints commands currently usable, ")
print("listdir = Prints a list of available directories, ")
print("dir = Prints a list of all available files in the current directory, ")
print("run (file) = Executes the file entered, ")
print("end = Closes and resets the EYN-DOS terminal, ")
print("browser = Takes you to the EYN-DOS browser terminal, ")
print("ver = Prints the EYN-DOS version that is running, ")
print("credits = Prints a list of all the people who worked on EYN-DOS, ")
print("cd (File/Folder location) = Takes you to the directory entered, ")
print("cdate = Prints the current date and time, ")
print("read = Prints the contents of the file entered, ")
print("find = Prints the directory path of the file entered, ")
print("write = Writes 1 line of custom text to the file entered (creates new file), ")
print("del = Deletes any newly writtten file entered, ")
print("size = Prints the size of the file entered, ")
print("clear = Clears the screen of all previously printed lines, ")
print("errorlist = Prints all error codes and their meanings.")
print("A: = Takes you to the A drive (Floppy disk drive 1)")
print("B: = Takes you to the B drive (Floppy disk drive 2)")
print("C: = Takes you to the C drive (Hard drive)")
print("D: = Takes you to the D drive (Recovery drive)")
print("E: = Takes you to the E drive (Compact Disc drive)")
print("")
print("Misc:")
print("")
print(" insert(1-9).py = You can add a custom Python file into the EYN-DOS folder and execute it by typing 'run insert(Number in the filename (1-9)).py, ")
print("")
if command_line=="listdir":
print("")
print("DIR1 - ", float(size)/1024, " Kilobytes")
print("")
print("DIR2 - ", "0.0", " Kilobytes")
print("")
print("DIR3 - ", "0.0", " Kilobytes")
print("")
if command_line=="dir":
print("")
print(filesys)
print("")
print(get_dir_size('data/src'))
print(" | Kilobytes")
print("")
if command_line=="run eyndos.py":
print("")
print("This is already running!")
print("")
if command_line=="end":
print("")
exit()
if command_line=="run calculator.py":
print("")
os.system('python3 calculator.py')
print("")
if command_line==("run minesweeper.py"):
print("")
os.system('python3 minesweeper.py')
print("")
if command_line==("run notebook.py"):
print("")
os.system("python3 notebook.py")
print("")
if command_line==("lgr"):
print("")
print("Hey, that's a good YouTube channel!")
print("")
if command_line==("fdisk"):
print("")
print("ERROR EYN_C3-FNI")
print("")
if command_line==("win"):
print("")
print("No.")
print("")
if command_line==("run solitaire.py"):
"Credit to 'shomikj' on GitHub for this code!"
print("")
os.system('python3 solitaire.py')
print("")
if command_line==("run weight_converter.py"):
print("")
os.system("python3 weight_converter.py")
print("")
if command_line==("run gui_calculator.py"):
print("")
os.system('python3 gui_calculator.py')
print("")
if command_line==("run clock.py"):
print("")
os.system('python3 clock.py')
print("")
if command_line==("count"):
print("")
count_1=input("WARNING: THIS WILL MAKE EYN-DOS UNUSABLE FOR THE REST OF THE SESSION. CONTINUE? (y/n) ")
print("")
if count_1==("y"):
print("")
os.system('python3 counter.py')
print("")
if count_1==("n"):
print("")
print("Command disbanded")
print("")
if command_line==("run insert1.py"):
print("")
os.system('python3 insert1.py')
print("")
if command_line==("troll"):
print("")
print("░░░░░░▄▄▄▄▀▀▀▀▀▀▀▀▄▄▄▄▄▄▄")
print("░░░░░█░░░░░░░░░░░░░░░░░░▀▀▄")
print("░░░░█░░░░░░░░░░░░░░░░░░░░░░█")
print("░░░█░░░░░░▄██▀▄▄░░░░░▄▄▄░░░░█")
print("░▄▀░▄▄▄░░█▀▀▀▀▄▄█░░░██▄▄█░░░░█")
print("█░░█░▄░▀▄▄▄▀░░░░░░░░█░░░░░░░░░█")
print("█░░█░█▀▄▄░░░░░█▀░░░░▀▄░░▄▀▀▀▄░█")
print("░█░▀▄░█▄░█▀▄▄░▀░▀▀░▄▄▀░░░░█░░█")
print("░░█░░░▀▄▀█▄▄░█▀▀▀▄▄▄▄▀▀█▀██░█")
print("░░░█░░░░██░░▀█▄▄▄█▄▄█▄▄██▄░░█")
print("░░░░█░░░░▀▀▄░█░░░█░█▀█▀█▀██░█")
print("░░░░░▀▄░░░░░▀▀▄▄▄█▄█▄█▄█▄▀░░█")
print("░░░░░░░▀▄▄░░░░░░░░░░░░░░░░░░░█")
print("░░░░░░░░░░▀▀▄▄░░░░░░░░░░░░░░░█")
print("░░░░░░░░░░░░░░▀▄▄▄▄▄░░░░░░░░█")
print("░░░░░░░░░░░░░░░░░░█▄▄▄▄▄▄▄▄▀")
print("")
if command_line==("run oregon_trail.py"):
print("")
os.system('python3 oregon_trail.py')
print("")
if command_line==("run snake.py"):
print("")
os.system('python3 snake.py')
print("")
if command_line==("run pong.py"):
print("")
os.system('python3 pong.py')
print("")
if command_line==("run tetris.py"):
print("")
print("Use A to go left, D to go right and spacebar to rotate.")
os.system('python3 tetris.py')
print("")
if command_line==('run invaders.py'):
print("")
print("Use the left arrow to go left, the right arrow to go right, and spacebar to shoot.")
os.system('python3 invaders.py')
print("")
if command_line==("run paintbrush.py"):
print("")
os.system('python3 paintbrush.py')
print("")
if command_line==("!devdebug1!"):
print("")
dev_ver=input("THIS OPTION IS FOR DEVELOPERS AND TESTERS ONLY. IF YOU ARE NOT A DEVELOPER OR TESTER, YOU WILL BE REPORTED TO A HR. CONTINUE? (y/n) ")
print("")
if dev_ver==("n"):
print("")
print("Command disbanded")
print("")
if dev_ver==("y"):
print("")
dev_ver1=input("Enter your provided username: ")
if dev_ver1==("kg2"):
print("")
print("Welcome back, Kian.")
print("")
dev_ver2=input("Enter your provided password: ")
if dev_ver2==("celerysticksfiddlebottom20"):
print("")
print("Welcome to the EYN-DOS development terminal, Kian!")
print("")
if dev_ver2!=("celerysticksfiddlebottom20"):
exit()
if dev_ver1==("cj9"):
print("")
print("Welcome back, Cayden.")
print("")
dev_ver3=input("Enter your provided password: ")
if dev_ver3==("carrotfarmmule90"):
print("")
print("Welcome to the EYN=DOS development terminal, Cayden!")
print("")
if dev_ver3!=("carrotfarmmule90"):
exit()
if dev_ver1==("ig1"):
print("")
print("Welcome back, Ian.")
print("")
dev_ver4=input("Enter your provided password: ")
if dev_ver4==("isaacboatorange30"):
print("")
print("Welcome to the EYN-DOS development terminal, Ian!")
print("")
if dev_ver4!=("isaacboatorange30"):
exit()
if dev_ver1==(""):
exit()
while True:
command_line1=input("C:\DEVDEBUG1\> ")
if command_line1==("debug"):
print("")
print("Coming soon...")
print("")
if command_line1==("end"):
exit()
if command_line1==("eyn_os"):
print("")
print("Welcome to...")
print(" (Built on EYN-DOS)")
print(" ██████████████████████████")
print(" ███░█████░██░░░██░██░░░█░██")
print("██ ██ ██ ██░░█░░░░░░░███░░░██░░░█░░██")
print(" ██ ██ ██░░█████░░░░█░░░░█░█░░█░░██")
print("██ ██ ██ ██░░█░░░░░░░░█░░░░█░░█░█░░██")
print(" ██ ██ ███░█████░░░░█░░░░█░░░██░░██")
print("██ ██ ██ ████████████████████████████")
print(" ██ ██ ███░░░█████░░░░░░█████░░░░██")
print(" ██ ██ ██░░░█░░░░░█░░░░█░░░░░░░░░██")
print(" ██ ██ ██░░░█░░░░░█░░░░░█████░░░░██")
print(" ██ ██░░░█░░░░░█░░░░░░░░░░█░░░██")
print(" ██ ███░░░█████░░░░░░█████░░░██")
print(" ██████████████████████████")
print(" A nostalgic, yet modern")
print(" O.S...")
print("")
os.system('python3 eyn_os_0_1.py')
print("")
if command_line1==("calculate"):
print("")
gc1=input("GUI based or CLI based? (g/c) ")
if gc1==("g"):
print("")
os.system('python3 gui_calculator.py')
print("")
if gc1==("c"):
print("")
os.system('python3 calculator.py')
print("")
if command_line1==("time"):
print("")
os.system('python3 clock.py')
print("")
if command_line1==("coder"):
print("")
print("Coming soon...")
print("")
if command_line1==("count"):
print("")
countperm=input("WARNING: counter.py WILL LOCK YOUR PC UNTIL RESTARTED PHYSICALLY. CONTINUE? (y/n) ")
if countperm==("n"):
print("")
print("Command disbanded")
print("")
if countperm==("y"):
print("")
os.system('python3 counter.py')
print("")
if command_line1==("eynos01 files"):
print("")
print(" - - - - EYNOS1 - - - - ")
print("")
print(" eyn_os_0_1.py - 3kb")
print(" user.folder - 0kb")
print("")
print(" TOTAL: 3kb")
print("")
if command_line1==("dir1 files"):
print("")
print(" - - - - DIR1 - - - - ")
print("")
print(" eyndos.py - 29kb")
print(" calculator.py - 1kb")
print(" minesweeper.py - 9kb")
print(" notebook.py - 1kb")
print(" solitaire.py - 12kb")
print(" test1.py - 1kb")
print(" weight_converter.py - 1kb")
print(" gui_calculator.py - 4kb")
print(" clock.py - 1kb")
print(" oregon_trail.py - 8kb")
print(" snake.py - 4kb")
print(" pong.py - 3kb")
print(" tetris.py - 7kb")
print(" paintbrush.py - 3kb")
print(" test3.py - 15kb")
print(" mouse_detection.py - 1kb")
print("")
print(" TOTAL: 100kb - LEFT: 900kb - 16 Files")
print("")
if command_line1==("return"):
print("")
print("Returning to main terminal...")
print("")
break
if command_line1==("help"):
print("")
print("help = Prints a list of available commands, end = Ends the current EYN-DOS session, eyn_os = Runs the latest version of EYN-OS, calculate = Runs a calculator program, time = Runs a clock program, count = Counts infinitely (locks current EYN-DOS session), (directory) files = Prints files and information about the entered directory, return = Returns you to the main EYN-DOS terminal. Attempting to type an unknown command results in a blank response.")
print("")
if command_line1==("run mouse_detection.py"):
print("")
os.system('python3 mouse_detection.py')
print("")
if command_line==("ver"):
print("")
print("█████████ ███ ███ ███ ███ ██████ ██████ ██████")
print("███ ███ ███ ██████ ███ ███ ███ ███ ███ ███")
print("█████████ ███ ███ ███ ███ ██████ ███ ███ ███ ███ ██████")
print("███ ███ ███ █████ ███ ███ ███ ███ ███")
print("█████████ ███ ███ ███ ██████ ██████ ██████")
print("")
print(" ████ ████████")
print(" ███ ███ ███")
print(" ███ ███")
print(" ███ ███")
print(" ███ ███")
print(" █████████ ██ ███")
print("")
print("EYN-DOS 1.7 (2022)")
print("")
if command_line==("credits"):
print("")
print("The EYN-DOS Team:")
print("")
print(" Primary coder: <NAME> (Founder and CEO of J.K Incorporated)")
print(" Secondary coder: <NAME> (Musician and Lead Artist of J.K Incorporated.")
print(" Logo designer: <NAME>.")
print(" Staff commander: <NAME>")
print(" Everyone involved: <NAME>, <NAME>, <NAME>. and other J.K Incorporated employees.")
print("")
print("-----------------------------------------------------------------------------------------")
print("")
print(" Honorable mentions:")
print("")
print(" <NAME>: Coder of the 'Snake' game included with EYN-DOS.")
print(" shomikj: Coder of the command line version of 'Solitaire' for EYN-DOS.")
print(" <NAME>: Supporter.")
print(" <NAME>: Supporter and artist.")
print(" Github, StackOverflow & GeeksForGeeks: Saver of countless hours of research.")
print(" You: For using EYN-DOS.")
print(" Linux: Just awesome")
print("")
print(" Thank you for using EYN-DOS!")
print("")
if command_line==("run insert2.py"):
print("")
os.system("python3 insert2.py")
print("")
if command_line==("run insert3.py"):
print("")
os.system("python3 insert3.py")
print("")
if command_line==("run insert4.py"):
print("")
os.system("python3 insert4.py")
print("")
if command_line==("run insert5.py"):
print("")
os.system("python3 insert5.py")
print("")
if command_line==("run insert6.py"):
print("")
os.system("python3 insert6.py")
print("")
if command_line==("run insert7.py"):
print("")
os.system("python3 insert7.py")
print("")
if command_line==("run insert8.py"):
print("")
os.system("python3 insert8.py")
print("")
if command_line==("run insert9.py"):
print("")
os.system("python3 insert9.py")
print("")
if command_line==("browser"):
print("")
os.system("python3 browser.py")
print("")
if command_line==("run cli_notebook.py"):
print("")
print("Loading the EYN-DOS notebook terminal...")
print("")
os.system('python3 cli_notebook.py')
print("")
if command_line==("cdate"):
print("")
os.system("python3 c-date.py")
print("")
if command_line==("read"):
print("")
txt_name=input("Enter the name of the file you want to read. (Including extension): ")
print("")
with open(txt_name) as f:
contents = f.read()
print(contents)
f.close
print("")
if command_line==("find"):
print("")
file_find=input("What file do you want to find? (Including extension): ")
print("")
print(os.path.abspath(file_find))
print("")
if command_line==("write"):
print("")
wri_name=input("What do you want to call your new file? (Extension included): ")
print("")
print("Type what you want your file to contain! (1 line): ")
print("")
wri_con=input("> ")
print("")
print("Saving...")
print("")
with open((wri_name), 'w') as f:
f.write("")
f.write(wri_con)
f.write("")
print("Saved.")
print("")
if command_line==("del"):
print("")
del_file=input("What file do you want to delete? (Including extension): ")
print("")
if del_file==(wri_name):
print("")
print("Deleting file...")
os.remove(wri_name)
print("")
print("File deleted.")
print("")
else:
print("")
print("The file entered is invalid.")
print("")
print("No action will be taken.")
print("")
if command_line==("size"):
print("")
size_cl=input("What file do you want the size to? (Including extension): ")
print("")
print(os.path.getsize(size_cl)/1024)
print(" | Kilobytes")
print("")
if command_line==("cd DIR2"):
print("")
while True:
dir2_line=input("C:\DIR2\> ")
if dir2_line==("cd"):
print("")
print("Returning to the EYN-DOS main terminal...")
print("")
break
if dir2_line==("dir"):
print("")
print("No files found!")
print("")
if dir2_line=="listdir":
print("")
print("DIR1 - ", float(size)/1024, " Kilobytes")
print("")
print("DIR2 - ", "0.0", " Kilobytes")
print("")
print("DIR3 - ", "0.0", " Kilobytes")
print("")
if dir2_line==("write"):
print("")
print("Unsupported command for DIR2 in EYN-DOS 1.62.")
print("")
if dir2_line==("del"):
print("")
print("No files to delete.")
print("")
if dir2_line==("read"):
print("")
print("No files to read.")
print("")
if command_line==("cd DIR3"):
print("")
while True:
dir3_line=input("C:\DIR3\>")
if dir3_line==("cd"):
print("")
print("Returning to the EYN-DOS main terminal...")
print("")
break
if dir3_line==("dir"):
print("")
print("No files found!")
print("")
if dir3_line=="listdir":
print("")
print("DIR1 - ", float(size)/1024, " Kilobytes")
print("")
print("DIR2 - ", "0.0", " Kilobytes")
print("")
print("DIR3 - ", "0.0", " Kilobytes")
print("")
if dir3_line==("write"):
print("")
print("Unsupported command for DIR3 in EYN-DOS 1.62.")
print("")
if dir3_line==("del"):
print("")
print("No files to delete.")
print("")
if dir3_line==("read"):
print("")
print("No files to read.")
print("")
if command_line==("clear"):
print("")
os.system("clear")
if command_line==("errorlist"):
print("")
print(" --A ERRORS--")
print("")
print("EYN_A1 = No floppy drive detected.")
print(" | EYN_A1-NDI = No floppy diskette inserted.")
print("EYN_A2 = Corrupted/unreadable floppy diskette.")
print("EYN_A3 = Invalid diskette format/invalid diskette type.")
print(' | Additional info = Only 3.5" floppy diskettes are supported.')
print("")
print(" --B ERRORS--")
print("")
print("All A Errors apply to the B drive.")
print("")
print(" --C ERRORS--")
print("")
print("EYN_C1 = Unable to boot EYN-DOS due to a C drive error.")
print(" | EYN_C1-1 = Unable to read a critical system file (Possible cause of EYN_C1).")
print("EYN_C2 = Unable to boot EYN-DOS due to a permission issue.")
print("EYN_C3 = Feature not supported in EYN-DOS.")
print("EYN_C3_FNI = Feature not (yet) included in EYN-DOS.")
print("EYN_C4 = Invalid command or file name.")
print("")
print(" --D ERRORS--")
print("")
print("EYN_D1 = Recovery not created.")
print(" | EYN_D2 = Recovery unable to be created.")
print(" | EYN_D2-LNS = Recovery unable to be created due to low/nul storage.")
print("")
print("All C Errors apply to the D drive.")
print("")
print(" --E ERRORS--")
print("")
print("EYN_E1 = Unable to read the E drive due to an E drive error.")
print(" | EYN_E1-NDI = No disc inserted.")
print("EYN_E2 = Corrupted/unreadable disc.")
print("EYN_E3 = Unable to read the E drive due to a permission issue.")
print("EYN_E4 = No disc drive detected.")
print("")
print(" --ADDITIONAL ERRORS--")
print("")
print("EYN_AD1 = Unable to read the current date/time.")
print(" | EYN_AD1-CM = Unable to read the current date/time due to CMOS battery error.")
print("EYN_AD2 = Unable to read/run software.")
print(" | EYN_AD2-NMD = Unable to read/run software due to no essential module detected.")
print("")
if command_line==("A:"):
print("")
os.system("python3 A.py")
print("")
if command_line==("B:"):
print("")
os.system("python3 B.py")
print("")
if command_line==("D:"):
print("")
os.system("python3 D.py")
print("")
if command_line==("E:"):
print("")
os.system("python3 E.py")
print("")
```
#### File: JK-Incorporated/EYN-DOS/minesweeper.py
```python
import tkinter
import configparser
import random
import os
import tkinter.messagebox
import tkinter.simpledialog
window = tkinter.Tk()
window.title("minesweeper")
rows = 10
cols = 10
mines = 10
field = []
buttons = []
colors = ['#FFFFFF', '#0000FF', '#008200', '#FF0000', '#000084', '#840000', '#008284', '#840084', '#000000']
gameover = False
customsizes = []
def createMenu():
menubar = tkinter.Menu(window)
menusize = tkinter.Menu(window, tearoff=0)
menusize.add_command(label="small (10x10 with 10 mines)", command=lambda: setSize(10, 10, 10))
menusize.add_command(label="medium (20x20 with 40 mines)", command=lambda: setSize(20, 20, 40))
menusize.add_command(label="big (35x35 with 120 mines)", command=lambda: setSize(35, 35, 120))
menusize.add_command(label="custom", command=setCustomSize)
menusize.add_separator()
for x in range(0, len(customsizes)):
menusize.add_command(label=str(customsizes[x][0])+"x"+str(customsizes[x][1])+" with "+str(customsizes[x][2])+" mines", command=lambda customsizes=customsizes: setSize(customsizes[x][0], customsizes[x][1], customsizes[x][2]))
menubar.add_cascade(label="size", menu=menusize)
menubar.add_command(label="exit", command=lambda: window.destroy())
window.config(menu=menubar)
def setCustomSize():
global customsizes
r = tkinter.simpledialog.askinteger("Custom size", "Enter amount of rows")
c = tkinter.simpledialog.askinteger("Custom size", "Enter amount of columns")
m = tkinter.simpledialog.askinteger("Custom size", "Enter amount of mines")
while m > r*c:
m = tkinter.simpledialog.askinteger("Custom size", "Maximum mines for this dimension is: " + str(r*c) + "\nEnter amount of mines")
customsizes.insert(0, (r,c,m))
customsizes = customsizes[0:5]
setSize(r,c,m)
createMenu()
def setSize(r,c,m):
global rows, cols, mines
rows = r
cols = c
mines = m
saveConfig()
restartGame()
def saveConfig():
global rows, cols, mines
config = configparser()
config.add_section("game")
config.set("game", "rows", str(rows))
config.set("game", "cols", str(cols))
config.set("game", "mines", str(mines))
config.add_section("sizes")
config.set("sizes", "amount", str(min(5,len(customsizes))))
for x in range(0,min(5,len(customsizes))):
config.set("sizes", "row"+str(x), str(customsizes[x][0]))
config.set("sizes", "cols"+str(x), str(customsizes[x][1]))
config.set("sizes", "mines"+str(x), str(customsizes[x][2]))
with open("config.ini", "w") as file:
config.write(file)
def loadConfig():
global rows, cols, mines, customsizes
config = configparser.ConfigParser()
config.read("config.ini")
rows = config.getint("game", "rows")
cols = config.getint("game", "cols")
mines = config.getint("game", "mines")
amountofsizes = config.getint("sizes", "amount")
for x in range(0, amountofsizes):
customsizes.append((config.getint("sizes", "row"+str(x)), config.getint("sizes", "cols"+str(x)), config.getint("sizes", "mines"+str(x))))
def prepareGame():
global rows, cols, mines, field
field = []
for x in range(0, rows):
field.append([])
for y in range(0, cols):
field[x].append(0)
for _ in range(0, mines):
x = random.randint(0, rows-1)
y = random.randint(0, cols-1)
while field[x][y] == -1:
x = random.randint(0, rows-1)
y = random.randint(0, cols-1)
field[x][y] = -1
if x != 0:
if y != 0:
if field[x-1][y-1] != -1:
field[x-1][y-1] = int(field[x-1][y-1]) + 1
if field[x-1][y] != -1:
field[x-1][y] = int(field[x-1][y]) + 1
if y != cols-1:
if field[x-1][y+1] != -1:
field[x-1][y+1] = int(field[x-1][y+1]) + 1
if y != 0:
if field[x][y-1] != -1:
field[x][y-1] = int(field[x][y-1]) + 1
if y != cols-1:
if field[x][y+1] != -1:
field[x][y+1] = int(field[x][y+1]) + 1
if x != rows-1:
if y != 0:
if field[x+1][y-1] != -1:
field[x+1][y-1] = int(field[x+1][y-1]) + 1
if field[x+1][y] != -1:
field[x+1][y] = int(field[x+1][y]) + 1
if y != cols-1:
if field[x+1][y+1] != -1:
field[x+1][y+1] = int(field[x+1][y+1]) + 1
def prepareWindow():
global rows, cols, buttons
tkinter.Button(window, text="Restart", command=restartGame).grid(row=0, column=0, columnspan=cols, sticky=tkinter.N+tkinter.W+tkinter.S+tkinter.E)
buttons = []
for x in range(0, rows):
buttons.append([])
for y in range(0, cols):
b = tkinter.Button(window, text=" ", width=2, command=lambda x=x,y=y: clickOn(x,y))
b.bind("<Button-3>", lambda e, x=x, y=y:onRightClick(x, y))
b.grid(row=x+1, column=y, sticky=tkinter.N+tkinter.W+tkinter.S+tkinter.E)
buttons[x].append(b)
def restartGame():
global gameover
gameover = False
for x in window.winfo_children():
if type(x) != tkinter.Menu:
x.destroy()
prepareWindow()
prepareGame()
def clickOn(x,y):
global field, buttons, colors, gameover, rows, cols
if gameover:
return
buttons[x][y]["text"] = str(field[x][y])
if field[x][y] == -1:
buttons[x][y]["text"] = "*"
buttons[x][y].config(background='red', disabledforeground='black')
gameover = True
tkinter.messagebox.showinfo("Game Over", "You have lost.")
for _x in range(0, rows):
for _y in range(cols):
if field[_x][_y] == -1:
buttons[_x][_y]["text"] = "*"
else:
buttons[x][y].config(disabledforeground=colors[field[x][y]])
if field[x][y] == 0:
buttons[x][y]["text"] = " "
autoClickOn(x,y)
buttons[x][y]['state'] = 'disabled'
buttons[x][y].config(relief=tkinter.SUNKEN)
checkWin()
def autoClickOn(x,y):
global field, buttons, colors, rows, cols
if buttons[x][y]["state"] == "disabled":
return
if field[x][y] != 0:
buttons[x][y]["text"] = str(field[x][y])
else:
buttons[x][y]["text"] = " "
buttons[x][y].config(disabledforeground=colors[field[x][y]])
buttons[x][y].config(relief=tkinter.SUNKEN)
buttons[x][y]['state'] = 'disabled'
if field[x][y] == 0:
if x != 0 and y != 0:
autoClickOn(x-1,y-1)
if x != 0:
autoClickOn(x-1,y)
if x != 0 and y != cols-1:
autoClickOn(x-1,y+1)
if y != 0:
autoClickOn(x,y-1)
if y != cols-1:
autoClickOn(x,y+1)
if x != rows-1 and y != 0:
autoClickOn(x+1,y-1)
if x != rows-1:
autoClickOn(x+1,y)
if x != rows-1 and y != cols-1:
autoClickOn(x+1,y+1)
def onRightClick(x,y):
global buttons
if gameover:
return
if buttons[x][y]["text"] == "?":
buttons[x][y]["text"] = " "
buttons[x][y]["state"] = "normal"
elif buttons[x][y]["text"] == " " and buttons[x][y]["state"] == "normal":
buttons[x][y]["text"] = "?"
buttons[x][y]["state"] = "disabled"
def checkWin():
global buttons, field, rows, cols
win = True
for x in range(0, rows):
for y in range(0, cols):
if field[x][y] != -1 and buttons[x][y]["state"] == "normal":
win = False
if win:
tkinter.messagebox.showinfo("Gave Over", "You have won.")
if os.path.exists("config.ini"):
loadConfig()
else:
saveConfig()
createMenu()
prepareWindow()
prepareGame()
window.mainloop()
```
#### File: JK-Incorporated/EYN-DOS/solitaire.py
```python
"Credit to 'shomikj' on GitHub for this code!"
class Card:
def __init__(self, param_rank):
self.rank = param_rank
self.str_rank = ['A ', '2 ', '3 ', '4 ', '5 ', '6 ', '7 ', '8 ', '9 ', '10 ', 'J ', 'Q ', 'K ']
self.visible = False
def __str__(self):
if self.visible:
return self.str_rank[self.rank-1]
else:
return '- '
def hide(self):
self.visible = False
def show(self):
self.visible = True
class Card_Stack:
def __init__(self):
self.cards = []
def __len__(self):
return len(self.cards)
def top(self):
if not self.empty():
return self.cards[-1]
else:
return None
def empty(self):
return len(self) == 0
class Tableau(Card_Stack):
def __init__(self):
Card_Stack.__init__(self)
def valid(self, new):
threshold = 14
if self.top():
threshold = self.top().rank
for c in new:
if (c.visible) and (c.rank == threshold-1):
threshold = c.rank
else:
return False
return True
def add(self, new):
for c in new:
self.cards.append(c)
def view_cards(self, i):
if (i >= 0) and (i < len(self)):
return self.cards[i:]
else:
return []
def remove_cards(self, i):
if (i >= 0) and (i < len(self)):
for c in range(i, len(self)):
if not self.cards[c].visible:
return []
answer = self.cards[i:]
self.cards = self.cards[:i]
if not self.empty():
self.top().show()
return answer
else:
return []
def next_spot(self, i):
return (i == len(self))
def last_spot(self, i):
return (i == len(self)-1)
def get_str(self, i):
if i < len(self.cards):
return str(self.cards[i])
else:
return ' '
class Foundation(Card_Stack):
def __init__(self):
Card_Stack.__init__(self)
def valid(self, c):
threshold = 0
if self.top():
threshold = self.top().rank
return (c.rank == threshold+1)
def add(self, c):
if not self.empty():
self.cards[-1].hide()
self.cards.append(c)
def __str__(self):
if self.empty():
return '- '
else:
return str(self.top())
def full(self):
if not (len(self) == 13):
return False
for i in range(1,14):
if not (self.cards[i].rank == i):
return False
return True
from random import randint
class Deck(Card_Stack):
def __init__(self):
Card_Stack.__init__(self)
for i in range(0, 4):
for r in range(1, 14):
c = Card(r)
self.cards.append(c)
self.shuffle()
self.pointer = 0
def shuffle(self):
for i in range(len(self.cards)-1, 0, -1):
j = randint(0, i)
self.cards[i],self.cards[j] = self.cards[j],self.cards[i]
def top(self):
return self.cards[self.pointer]
def increment(self):
self.cards[self.pointer].hide()
self.pointer += 1
if (self.pointer >= len(self)):
self.pointer = 0
self.cards[self.pointer].show()
def pop(self):
answer = self.cards[self.pointer]
del self.cards[self.pointer]
self.cards[self.pointer].show()
return answer
class Game:
def __init__(self):
self.tableaus = []
self.foundations = []
self.deck = Deck()
for i in range(0, 7):
self.tableaus.append(Tableau())
for i in range(7, 0, -1):
for j in range(0, i):
self.tableaus[j].add([self.deck.pop()])
self.tableaus[j].top().hide()
self.deck.top().show()
for t in self.tableaus:
t.top().show()
for i in range(0, 4):
self.foundations.append(Foundation())
def game_over(self):
for f in self.foundations:
if not f.full():
return False
return True
def valid_row(self, str):
if (len(str) == 2) or (len(str) == 3):
if (str[0] == 'R'):
return True
return False
def valid_col(self, str):
return str in ['T1', 'T2', 'T3', 'T4', 'T5', 'T6', 'T7', 'F1', 'F2', 'F3', 'F4', 'D0']
def valid_tableau(self, i):
return (i >= 0) and (i < len(self.tableaus))
def valid_foundation(self, i):
return (i >= 0) and (i < len(self.foundations))
def move(self, command):
sequence = command.split()
if (len(sequence) != 4):
print("Invalid Command: format error")
return False
from_row = sequence[0]
from_col = sequence[1]
to_row = sequence[2]
to_col = sequence[3]
if not (self.valid_col(from_col) and self.valid_col(to_col) and self.valid_row(from_row) and self.valid_row(to_row)):
print("Invalid Command: format error")
return False
if (from_row == 'R0') and (from_col == 'D0') and (to_row == 'R0') and (to_col == 'D0'):
self.deck.increment()
return True
if (from_row == 'R0') and (from_col == 'D0') and ('T' == to_col[0]):
to_row = int(to_row[1:]) - 1
to_col = int(to_col[1:]) - 1
if not self.valid_tableau(to_col):
print("Invalid Command: tableau column error")
return False
if not self.tableaus[to_col].next_spot(to_row):
print("Invalid Command: tableau row error")
return False
move_card = [self.deck.top()]
if self.tableaus[to_col].valid(move_card):
self.tableaus[to_col].add([self.deck.pop()])
return True
else:
print("Invalid Command: can't move selected cards")
return False
if ('T' == from_col[0]) and ('T' == to_col[0]):
from_col = int(from_col[1:]) - 1
to_col = int(to_col[1:]) - 1
from_row = int(from_row[1:]) - 1
to_row = int(to_row[1:]) - 1
if not self.valid_tableau(from_col) or not self.valid_tableau(to_col):
print("Invalid Command: tableau column error")
return False
if not self.tableaus[to_col].next_spot(to_row):
print("Invalid Command: destination tableau row error")
return False
if (self.tableaus[from_col].empty()) or (from_row < 0) or (from_row >= len(self.tableaus[from_col])):
print("Invalid Command: source tableau row error")
return False
move_cards = self.tableaus[from_col].view_cards(from_row)
if self.tableaus[to_col].valid(move_cards):
self.tableaus[to_col].add(self.tableaus[from_col].remove_cards(from_row))
return True
else:
print("Invalid Command: can't move selected cards")
return False
if (from_row == 'R0') and (from_col == 'D0') and (to_row == 'R0') and ('F' == to_col[0]):
to_col = int(to_col[1:]) - 1
if not self.valid_foundation(to_col):
print("Invalid Command: foundation column error")
return False
move_card = self.deck.top()
if (self.foundations[to_col].valid(move_card)):
self.foundations[to_col].add(self.deck.pop())
return True
else:
print("Invalid Command: can't move selected cards")
return False
if (from_col[0] == 'T') and (to_row == 'R0') and ('F' == to_col[0]):
from_col = int(from_col[1:]) - 1
to_col = int(to_col[1:]) - 1
from_row = int(from_row[1:]) - 1
if not self.valid_tableau(from_col):
print("Invalid Command: source tableau column error")
return False
if not self.tableaus[from_col].last_spot(from_row):
print("Invalid Command: source tableau row error")
return False
if not self.valid_foundation(to_col):
print("Invalid Command: destination foundation column error")
return False
move_card = self.tableaus[from_col].top()
if (self.foundations[to_col].valid(move_card)):
move_card = self.tableaus[from_col].remove_cards(from_row)
move_card = move_card[0]
self.foundations[to_col].add(move_card)
return True
else:
print("Invalid Command: can't move selected cards")
return False
def __str__(self):
spot = ' '
header = spot + 'D0 ' + spot + spot + 'F1 ' + 'F2 ' + 'F3 ' + 'F4 ' + '\n'
header_cards = 'R0 ' + str(self.deck.top()) + spot + spot
for f in self.foundations:
header_cards += str(f)
header_cards += '\n' + '\n'
tableau_header = spot
for i in range(0, 7):
tableau_header += 'T' + str(i+1) + ' '
tableau_header += '\n'
tableau_str = ''
max_len = max([len(i) for i in self.tableaus])
for r in range(0, max_len+1):
tableau_str += 'R' + str(r+1)
if r < 9:
tableau_str += ' '
for t in self.tableaus:
tableau_str += t.get_str(r)
tableau_str += '\n'
return header + header_cards + tableau_header + tableau_str
def main():
game = Game()
print()
print("Welcome to Solitaire: Good Luck!")
print()
print("Game Instructions")
print("Move Command Format: [Source Row] [Source Column] [Destination Row] [Destination Column]")
print()
print("Move Types and Examples")
print("(1) New Deck Card: R0 D0 R0 D0")
print("(2) Deck to Tableau: R0 D0 R8 T1")
print("(3) Tableau to Tableau: R7 T1 R7 T2 (supports multiple cards)")
print("(4) Deck to Foundation: R0 D0 R0 F1")
print("(5) Tableau to Foundation: R7 T1 R0 F1 (supports 1 card only)")
print("(6) Return to EYN-DOS main terminal: return")
print()
print(game)
while not game.game_over():
print()
command = input("What is your move?: ")
if (command == 'return'):
print()
print("Returning to the EYN-DOS main terminal...")
break
result = False
try:
result = game.move(command)
except:
print()
print("Invalid Command")
print()
if result:
print(game)
if __name__ == "__main__":
main()
"Credit to 'shomikj' on GitHub for this code!"
```
#### File: JK-Incorporated/EYN-DOS/tetris.py
```python
import turtle
import time
import random
wn = turtle.Screen()
wn.title("tetris")
wn.bgcolor("white")
wn.setup(width=400, height=600)
wn.tracer(0)
delay = 0.1
class Shape():
def __init__(self):
self.x = 5
self.y = 0
self.color = random.randint(1, 7)
# Block Shape
square = [[1,1],
[1,1]]
horizontal_line = [[1,1,1,1]]
vertical_line = [[1],
[1],
[1],
[1]]
left_l = [[1,0,0,0],
[1,1,1,1]]
right_l = [[0,0,0,1],
[1,1,1,1]]
left_s = [[1,1,0],
[0,1,1]]
right_s = [[0,1,1],
[1,1,0]]
t = [[0,1,0],
[1,1,1]]
shapes = [square, horizontal_line, vertical_line, left_l, right_l, left_s, right_s, t]
# Choose a random shape each time
self.shape = random.choice(shapes)
self.height = len(self.shape)
self.width = len(self.shape[0])
# print(self.height, self.width)
def move_left(self, grid):
if self.x > 0:
if grid[self.y][self.x - 1] == 0:
self.erase_shape(grid)
self.x -= 1
def move_right(self, grid):
if self.x < 12 - self.width:
if grid[self.y][self.x + self.width] == 0:
self.erase_shape(grid)
self.x += 1
def draw_shape(self, grid):
for y in range(self.height):
for x in range(self.width):
if(self.shape[y][x]==1):
grid[self.y + y][self.x + x] = self.color
def erase_shape(self, grid):
for y in range(self.height):
for x in range(self.width):
if(self.shape[y][x]==1):
grid[self.y + y][self.x + x] = 0
def can_move(self, grid):
result = True
for x in range(self.width):
# Check if bottom is a 1
if(self.shape[self.height-1][x] == 1):
if(grid[self.y + self.height][self.x + x] != 0):
result = False
return result
def rotate(self, grid):
# First erase_shape
self.erase_shape(grid)
rotated_shape = []
for x in range(len(self.shape[0])):
new_row = []
for y in range(len(self.shape)-1, -1, -1):
new_row.append(self.shape[y][x])
rotated_shape.append(new_row)
right_side = self.x + len(rotated_shape[0])
if right_side < len(grid[0]):
self.shape = rotated_shape
# Update the height and width
self.height = len(self.shape)
self.width = len(self.shape[0])
grid = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
pen = turtle.Turtle()
pen.penup()
pen.speed(0)
pen.shape("square")
pen.setundobuffer(None)
def draw_grid(pen, grid):
pen.clear()
top = 230
left = -110
colors = ["black", "lightblue", "blue", "orange", "yellow", "green", "purple", "red"]
for y in range(len(grid)):
for x in range(len(grid[0])):
screen_x = left + (x * 20)
screen_y = top - (y * 20)
color_number = grid[y][x]
color = colors[color_number]
pen.color(color)
pen.goto(screen_x, screen_y)
pen.stamp()
def check_grid(grid):
y = 23
while y > 0:
is_full = True
for x in range(0, 12):
if grid[y][x] == 0:
is_full = False
y -= 1
break
if is_full:
global score
score += 10
draw_score(pen, score)
for copy_y in range(y, 0, -1):
for copy_x in range(0, 12):
grid[copy_y][copy_x] = grid[copy_y-1][copy_x]
def draw_score(pen, score):
pen.color("blue")
pen.hideturtle()
pen.goto(-75, 350)
pen.write("Score: {}".format(score), move=False, align="left", font=("Arial", 24, "normal"))
shape = Shape()
wn.listen()
wn.onkeypress(lambda: shape.move_left(grid), "a")
wn.onkeypress(lambda: shape.move_right(grid), "d")
wn.onkeypress(lambda: shape.rotate(grid), "space")
score = 0
draw_score(pen, score)
while True:
wn.update()
if shape.y == 23 - shape.height + 1:
shape = Shape()
check_grid(grid)
elif shape.can_move(grid):
shape.erase_shape(grid)
shape.y +=1
shape.draw_shape(grid)
else:
shape = Shape()
check_grid(grid)
draw_grid(pen, grid)
draw_score(pen, score)
time.sleep(delay)
wn.mainloop()
``` |
{
"source": "jkingben/minos",
"score": 2
} |
#### File: minos/build/build_utils.py
```python
import ConfigParser
import os
import subprocess
from string import Template
from minos_config import Log
MINOS_ROOT = os.getenv("MINOS_ROOT")
ENV_PIP = os.getenv("ENV_PIP")
BUILD_INFO_FILE = os.getenv("BUILD_INFO_FILE")
BUILD_OFFLINE_REQUIREMENTS_FILE = os.getenv("BUILD_OFFLINE_REQUIREMENTS_FILE")
def execute_command(cmd, log_message="", error_message=""):
if log_message:
Log.print_info(log_message)
try:
subprocess.check_call(cmd)
except BaseException, e:
Log.print_critical('ERROR: %s' % error_message if error_message else str(e))
def check_command_output(cmd, error_message="", skip_error=False):
try:
out = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except BaseException, e:
if skip_error:
return 0
else:
Log.print_critical('ERROR: %s' % error_message if error_message else str(e))
return 1
def get_command_variable(cmd):
child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
out = child.communicate()
return out[0].strip()
def get_process_running_pid(pid_file):
try:
with open(pid_file) as fp:
return int(fp.readline())
except ValueError, e:
Log.print_critical("Error: Pid file %s is null" % pid_file)
def check_process_is_running(pid_file):
if not os.path.exists(pid_file):
return False
process_pid = get_process_running_pid(pid_file)
try:
os.kill(process_pid, 0)
return True
except OSError:
return False
def exec_daemon_script(dest_path, script, *extra_para):
os.chdir(dest_path)
cmd = ["%s" % script]
cmd.extend(list(extra_para))
execute_command(cmd)
os.chdir(MINOS_ROOT)
def start_daemon_process(process_name, pid_file, dest_path, script, *extra_para):
if check_process_is_running(pid_file):
Log.print_warning("%s is running, please check" % process_name)
return
Log.print_info("Starting %s" % process_name)
exec_daemon_script(dest_path, script, *extra_para)
Log.print_success("Start %s success" % process_name)
def stop_daemon_process(process_name, pid_file, dest_path, script):
if not check_process_is_running(pid_file):
Log.print_warning("%s is not running" % process_name)
return
Log.print_info("Stopping %s" % process_name)
exec_daemon_script(dest_path, script, str(get_process_running_pid(pid_file)))
Log.print_success("Stop %s success" % process_name)
def generate_config_file(template_file, dest_file, config_dict):
config_template = Template(open(template_file).read())
config_file = config_template.safe_substitute(config_dict)
with open(dest_file, 'w') as output:
output.write(config_file)
def output_build_info(component, info_key, info_val):
build_info_parser = ConfigParser.SafeConfigParser()
build_info_parser.read([BUILD_INFO_FILE])
if not build_info_parser.has_section(component):
build_info_parser.add_section(component)
build_info_parser.set(component, info_key, str(info_val))
with open(BUILD_INFO_FILE, 'wb') as build_info:
build_info_parser.write(build_info)
def get_build_info_option(component, option):
build_info_parser = ConfigParser.SafeConfigParser()
build_info_parser.read([BUILD_INFO_FILE])
if build_info_parser.has_option(component, option):
return build_info_parser.get(component, option)
return None
def check_module_installed(module):
try:
__import__(module)
except ImportError:
return 0
return 1
def pip_install_offline(offline_package_dir):
cmd = [ENV_PIP, "install", "--no-index", "--find-links",
offline_package_dir, "-r", BUILD_OFFLINE_REQUIREMENTS_FILE]
execute_command(cmd)
def pip_install(module, module_version):
log_message = "Installing %s" % module
cmd = [ENV_PIP, "install", "%s>=%s" % (module, module_version)]
execute_command(cmd, log_message=log_message)
def pip_install_url(module, module_version):
log_message = "Installing %s" % module
cmd = [ENV_PIP, "install", "%s" % (module_version)]
execute_command(cmd, log_message=log_message)
def check_and_install_modules(modules_list):
for module_key, module_val, module_version in modules_list:
if not check_module_installed(module_key):
pip_install(module_val, module_version)
def check_and_install_modules_url(modules_list):
for module_key, module_val, module_version in modules_list:
if not check_module_installed(module_key):
pip_install_url(module_val, module_version)
``` |
{
"source": "JKingKong/mmdetection",
"score": 2
} |
#### File: models/detectors/test_mixins.py
```python
import logging
import sys
import torch
from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,
merge_aug_masks, merge_aug_proposals, multiclass_nms)
logger = logging.getLogger(__name__)
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import completed
class RPNTestMixin(object):
if sys.version_info >= (3, 7):
async def async_test_rpn(self, x, img_metas, rpn_test_cfg):
sleep_interval = rpn_test_cfg.pop('async_sleep_interval', 0.025)
async with completed(
__name__, 'rpn_head_forward',
sleep_interval=sleep_interval):
rpn_outs = self.rpn_head(x)
proposal_inputs = rpn_outs + (img_metas, rpn_test_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
return proposal_list
def simple_test_rpn(self, x, img_metas, rpn_test_cfg):
rpn_outs = self.rpn_head(x)
proposal_inputs = rpn_outs + (img_metas, rpn_test_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
return proposal_list
def aug_test_rpn(self, feats, img_metas, rpn_test_cfg):
imgs_per_gpu = len(img_metas[0])
aug_proposals = [[] for _ in range(imgs_per_gpu)]
for x, img_meta in zip(feats, img_metas):
proposal_list = self.simple_test_rpn(x, img_meta, rpn_test_cfg)
for i, proposals in enumerate(proposal_list):
aug_proposals[i].append(proposals)
# reorganize the order of 'img_metas' to match the dimensions
# of 'aug_proposals'
aug_img_metas = []
for i in range(imgs_per_gpu):
aug_img_meta = []
for j in range(len(img_metas)):
aug_img_meta.append(img_metas[j][i])
aug_img_metas.append(aug_img_meta)
# after merging, proposals will be rescaled to the original image size
merged_proposals = [
merge_aug_proposals(proposals, aug_img_meta, rpn_test_cfg)
for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
]
return merged_proposals
class BBoxTestMixin(object):
if sys.version_info >= (3, 7):
async def async_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False,
bbox_semaphore=None,
global_lock=None):
"""Async test only det bboxes without augmentation."""
rois = bbox2roi(proposals)
roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)
async with completed(
__name__, 'bbox_head_forward',
sleep_interval=sleep_interval):
cls_score, bbox_pred = self.bbox_head(roi_feats)
img_shape = img_metas[0]['img_shape']
scale_factor = img_metas[0]['scale_factor']
det_bboxes, det_labels = self.bbox_head.get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
def simple_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False,
mode_name=None,
save_mode=False,
):
####====================================================================================
"""Test only det bboxes without augmentation."""
'''
此处可以载入其他模型的roi、roi_feats 进行特征加入融合
'''
rois = bbox2roi(proposals)
# 调用 mmdet/models/roi_extractors/single_level.py 下得到
# 1、得到roi_feats
roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
# 利用roi_feats得到cls_score, bbox_pred
# mmdet/models/bbox_heads/convfc_bbox_head.py 下的forward方法的返回值 分类过后的分数,回归框后的参数
# cls_score的shape: box数 * 2 (第0列是背景分数,会被直接忽略)
# bbox_pred的shape:box数 * 8
'''
此处可以载入其他模型的roi_feats 进行 特征加入融合
'''
cls_score, bbox_pred = self.bbox_head(roi_feats)
img_shape = img_metas[0]['img_shape']
scale_factor = img_metas[0]['scale_factor']
# 载入roi_feats张量(已完成小score过滤,nms抑制)
# root_path = "/content/mmdetection/"
# picture_name = "Z108"
# save_path = root_path + picture_name + "_filter_final_roi_feats.pt"
# roi_feats = torch.load(save_path)
# save_path = root_path + picture_name + "_filter_final_rois.pt"
# rois = torch.load(save_path)
# save_path = root_path + picture_name + "_filter_final_bbox_pred.pt"
# bbox_pred = torch.load(save_path)
# save_path = root_path + picture_name + "_filter_final_cls_score.pt"
# cls_score = torch.load(save_path)
# det_bboxes的shape:NMS抑制后的数量 * 5
'''
# print(img_metas)
此处可以传入img_metas:
[{
'filename': '/content/mmdetection/data/coco/val2017/Z107.jpg',
'ori_shape': (720, 1280, 3),
'img_shape': (750, 1333, 3),
'pad_shape': (768, 1344, 3),
'scale_factor': 1.04140625,
'flip': False,
'img_norm_cfg': {
'mean': array([123.675, 116.28, 103.53], dtype = float32),
'std': array([58.395, 57.12, 57.375], dtype = float32),
'to_rgb': True
}
}]
'''
det_bboxes, det_labels = self.bbox_head.get_det_bboxes(
rois, # 原来的参数 必须保证load的.pt文件和这个维度一致,所以这里也需要保存后load
cls_score, # 原来的参数 必须保证load的.pt文件和这个维度一致,所以这里也需要保存后load
bbox_pred, # 原来的参数 必须保证load的.pt文件和这个维度一致,所以这里也需要保存后load
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg,
save_mode=save_mode,
roi_feats=roi_feats, # 新加入的参数 为了得到预测框所对应的特征图
img_metas=img_metas,
mode_name=mode_name
)
# print()
# print("===================****************=====================")
# print("--- current function from ", sys._getframe().f_code.co_filename)
# print("--- current function is ", sys._getframe().f_code.co_name)
# print()
# print("--- called from file ", sys._getframe().f_back.f_code.co_filename)
# print("--- called by function ", sys._getframe().f_back.f_code.co_name)
# print("--- called at line ", sys._getframe().f_back.f_lineno)
# print("===================****************=====================")
# print()
# print()
# print("--------------------------------test_mixins.py------------------------------------------------------")
# print("===roi_feats:",roi_feats.shape)
# print()
# print("===cls_score:",cls_score.shape)
# print("===bbox_pred:",bbox_pred.shape)
# print()
# print("===det_bboxes:",det_bboxes.shape)
# print("===det_labels:",det_labels.shape)
# print("--------------------------------------------------------------------------------------")
# print()
return det_bboxes, det_labels
def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
# TODO more flexible
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip)
rois = bbox2roi([proposals])
# recompute feature maps to save GPU memory
roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
cls_score, bbox_pred = self.bbox_head(roi_feats)
bboxes, scores = self.bbox_head.get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
return det_bboxes, det_labels
class MaskTestMixin(object):
if sys.version_info >= (3, 7):
async def async_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False,
mask_test_cfg=None):
# image shape of the first image in the batch (only one)
ori_shape = img_metas[0]['ori_shape']
scale_factor = img_metas[0]['scale_factor']
if det_bboxes.shape[0] == 0:
segm_result = [[]
for _ in range(self.mask_head.num_classes - 1)]
else:
_bboxes = (
det_bboxes[:, :4] *
scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)],
mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):
sleep_interval = mask_test_cfg['async_sleep_interval']
else:
sleep_interval = 0.035
async with completed(
__name__,
'mask_head_forward',
sleep_interval=sleep_interval):
mask_pred = self.mask_head(mask_feats)
segm_result = self.mask_head.get_seg_masks(
mask_pred, _bboxes, det_labels, self.test_cfg.rcnn,
ori_shape, scale_factor, rescale)
return segm_result
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
# image shape of the first image in the batch (only one)
ori_shape = img_metas[0]['ori_shape']
scale_factor = img_metas[0]['scale_factor']
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
if rescale and not isinstance(scale_factor, float):
scale_factor = torch.from_numpy(scale_factor).to(
det_bboxes.device)
_bboxes = (
det_bboxes[:, :4] * scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head(mask_feats)
segm_result = self.mask_head.get_seg_masks(mask_pred, _bboxes,
det_labels,
self.test_cfg.rcnn,
ori_shape, scale_factor,
rescale)
return segm_result
def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]
else:
aug_masks = []
for x, img_meta in zip(feats, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)],
mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head(mask_feats)
# convert to numpy array to save memory
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, img_metas,
self.test_cfg.rcnn)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head.get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
self.test_cfg.rcnn,
ori_shape,
scale_factor=1.0,
rescale=False)
return segm_result
```
#### File: JKingKong/mmdetection/TestPythonAPI.py
```python
a={'a':'Ass','b':'We','c':'Can'}
b = (1,2,3,4)
c = [1,2,3,4]
print(*a)
print(*b)
print(*c)
def fu(a=None,b=None,c=None):
pass
``` |
{
"source": "jking-r7/scanman",
"score": 3
} |
#### File: scanman/utils/nmapper.py
```python
import subprocess
import logging
class Nmapper:
''' Nmap base class wrapper '''
# Nmap version cmd.
version_cmd = 'nmap -version'
def __init__(self, nsescript, ports, inputlist, xmlfile):
''' Init arg(s)nsescript:str, ports:lst/str, inputlist:str, xmlfile:str '''
self.nsescript = nsescript
self.ports = self.scrub_ports(ports)
self.inputlist = inputlist
self.xmlfile = xmlfile
self.cmd = \
f"nmap -Pn --script {self.nsescript} -p {self.ports} -iL {self.inputlist} -oX {self.xmlfile}"
@classmethod
def get_version(cls):
''' Return Nmap version:str'''
# Nmap Version cmd.
cmdlst = cls.version_cmd.split(' ')
try:
proc = subprocess.run(cmdlst,
shell=False,
check=True,
capture_output=True,
text=True)
except Exception as e:
# Set check=True for the exception to catch.
logging.exception(e)
raise e
else:
# Debug print only.
logging.info(f'STDOUT:\n{proc.stdout}')
logging.debug(f'STDERR:\n{proc.stderr}')
return proc.stdout.split(' ')[2]
def scrub_ports(self, ports):
'''
Scrub ports convert lst to str(if needed), remove any whitespaces
arg(s)ports:lst/str '''
# Convert lst to str.
portsstr = ''.join(ports)
# Remove white-space between ports and convert lst to str.
scrubbed_ports = str(portsstr.replace(' ','') )
return scrubbed_ports
def run_scan(self):
''' Launch Nmap scan via subprocess wrapper.'''
# Nmap command.
cmdlst = self.cmd.split(' ')
try:
proc = subprocess.run(cmdlst,
shell=False,
check=False,
capture_output=True,
text=True)
except Exception as e:
# Set check=True for the exception to catch.
logging.exception(e)
pass
else:
# Debug print only.
logging.info(f'STDOUT:\n{proc.stdout}')
logging.debug(f'STDERR:\n{proc.stderr}')
``` |
{
"source": "jkingsman/mockmail.io",
"score": 2
} |
#### File: jkingsman/mockmail.io/Mailbox.py
```python
import smtpd
import random
import pprint
import asyncore
from email.parser import Parser
from twisted.internet import task
from Config import bindingPort, bindingIP, dropSize
staged = []
class MailboxHandler():
def __init__(self, queue):
self.binding = (bindingIP, bindingPort)
def stagedToQueue():
while len(staged) > 0:
queue.put(staged.pop())
lc = task.LoopingCall(stagedToQueue)
lc.start(2)
server = CustomSMTPServer(self.binding, None)
print 'SMTP starting on', self.binding[1]
asyncore.loop(timeout=1)
class CustomSMTPServer(smtpd.SMTPServer):
def process_message(self, peer, mailFrom, mailTo, data):
# handle drop conditions
if len(data) > dropSize:
# too big; drop
print 'Dropping message to', mailTo, ': too big'
return
# begin assembling email object
parser = Parser()
print 'Receiving message from:', mailFrom, peer, 'to', mailTo
email = parser.parsestr(data)
emailObj = {}
emailObj['raw'] = data
emailObj['from'] = email.get('From')
emailObj['fromIP'] = peer
emailObj['to'] = email.get('To')
emailObj['subject'] = email.get('Subject')
emailObj['transferEncoding'] = email.get('Content-Transfer-Encoding')
emailObj['attachments'] = []
if email.is_multipart():
# loop through each chunk of the body
for index, part in enumerate(email.get_payload()):
if index == 0:
# first object of multipart is probably body
emailObj['body'] = part.get_payload()
else:
attachment = {}
attachment['name'] = part.get_filename()
attachment['type'] = part.get_content_type()
attachment['data'] = part.get_payload()
attachment['transferEncoding'] = part.get('Content-Transfer-Encoding')
emailObj['attachments'].append(attachment)
else:
# not multipart; grab the body and run
emailObj['body'] = email.get_payload(decode=True)
staged.append(emailObj)
return
``` |
{
"source": "jkinkead/snowflake-connector-python",
"score": 2
} |
#### File: snowflake/connector/s3_storage_client.py
```python
from __future__ import division
import base64
import xml.etree.cElementTree as ET
from datetime import datetime
from io import IOBase
from logging import getLogger
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Tuple, Union
from cryptography.hazmat.primitives import hashes, hmac
from .compat import quote
from .constants import (
HTTP_HEADER_CONTENT_TYPE,
HTTP_HEADER_VALUE_OCTET_STREAM,
FileHeader,
ResultStatus,
)
from .encryption_util import EncryptionMetadata
from .storage_client import SnowflakeStorageClient
from .vendored import requests
if TYPE_CHECKING: # pragma: no cover
from .file_transfer_agent import SnowflakeFileMeta, StorageCredential
logger = getLogger(__name__)
META_PREFIX = "x-amz-meta-"
SFC_DIGEST = "sfc-digest"
AMZ_MATDESC = "x-amz-matdesc"
AMZ_KEY = "x-amz-key"
AMZ_IV = "x-amz-iv"
ERRORNO_WSAECONNABORTED = 10053 # network connection was aborted
EXPIRED_TOKEN = "ExpiredToken"
ADDRESSING_STYLE = "virtual" # explicit force to use virtual addressing style
class S3Location(NamedTuple):
bucket_name: str
path: str
class SnowflakeS3RestClient(SnowflakeStorageClient):
def __init__(
self,
meta: "SnowflakeFileMeta",
credentials: "StorageCredential",
stage_info: Dict[str, Any],
chunk_size: int,
use_accelerate_endpoint: bool = False,
use_s3_regional_url=False,
):
"""Rest client for S3 storage.
Args:
stage_info:
use_accelerate_endpoint:
"""
super().__init__(meta, stage_info, chunk_size, credentials=credentials)
# Signature version V4
# Addressing style Virtual Host
self.region_name: str = stage_info["region"]
# Multipart upload only
self.upload_id: Optional[str] = None
self.etags: Optional[List[str]] = None
self.s3location: "S3Location" = (
SnowflakeS3RestClient._extract_bucket_name_and_path(
self.stage_info["location"]
)
)
self.use_s3_regional_url = use_s3_regional_url
# if GS sends us an endpoint, it's likely for FIPS. Use it.
if stage_info["endPoint"]:
self.endpoint = (
f"https://{self.s3location.bucket_name}." + stage_info["endPoint"]
)
elif use_accelerate_endpoint:
self.endpoint = (
f"https://{self.s3location.bucket_name}.s3-accelerate.amazonaws.com"
)
else:
if self.use_s3_regional_url:
self.endpoint = f"https://{self.s3location.bucket_name}.s3.{self.region_name}.amazonaws.com"
else:
self.endpoint = (
f"https://{self.s3location.bucket_name}.s3.amazonaws.com"
)
@staticmethod
def sign(secret_key, msg):
h = hmac.HMAC(secret_key, hashes.SHA1())
h.update(msg)
return base64.encodebytes(h.finalize()).strip()
@staticmethod
def _construct_canonicalized_element(
bucket_name: str = None,
request_uri: str = "",
subresource: Dict[str, Union[str, int, None]] = None,
) -> str:
if not subresource:
subresource = {}
res = ""
if bucket_name:
res += f"/{bucket_name}"
if request_uri:
res += "/" + request_uri
else:
# for GET operations without a bucket name
res += "/"
if subresource:
res += "?"
keys = sorted(subresource.keys())
res += (
keys[0]
if subresource[keys[0]] is None
else f"{keys[0]}={subresource[keys[0]]}"
)
for k in keys[1:]:
query_str = k if subresource[k] is None else f"{k}={subresource[k]}"
res += f"&{query_str}"
return res
@staticmethod
def construct_canonicalized_headers(
headers: Dict[str, Union[str, List[str]]]
) -> str:
_res = sorted([[k.lower(), v] for k, v in headers.items()])
res = []
for i in range(len(_res)):
k, v = _res[i]
# if value is a list, convert to string delimited by comma
if isinstance(v, list):
v = ",".join(v)
# if multiline header, replace withs space
k = k.replace("\n", " ")
res.append(k.rstrip() + ":" + v.lstrip())
ans = "\n".join(res)
if ans:
ans = ans + "\n"
return ans
@staticmethod
def _construct_string_to_sign(
verb: str,
canonicalized_element: str,
canonicalized_headers: str,
amzdate: str,
content_md5: str = "",
content_type: str = "",
) -> bytes:
res = verb + "\n" + content_md5 + "\n" + content_type + "\n"
res += amzdate + "\n" + canonicalized_headers + canonicalized_element
return res.encode("UTF-8")
@staticmethod
def _has_expired_token(response: requests.Response) -> bool:
"""Extract error code and error message from the S3's error response.
Expected format:
https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses
Args:
response: Rest error response in XML format
Returns: True if the error response is caused by token expiration
"""
if response.status_code != 400:
return False
message = response.text
if not message or message.isspace():
return False
err = ET.fromstring(message)
return err.find("Code").text == EXPIRED_TOKEN
@staticmethod
def _extract_bucket_name_and_path(stage_location) -> "S3Location":
# split stage location as bucket name and path
bucket_name, _, path = stage_location.partition("/")
if path and not path.endswith("/"):
path += "/"
return S3Location(bucket_name=bucket_name, path=path)
def _send_request_with_authentication_and_retry(
self,
url: str,
verb: str,
resources: str,
retry_id: Union[int, str],
x_amz_headers: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
content_type: str = "",
data: Union[bytes, bytearray, IOBase, None] = None,
) -> requests.Response:
if not x_amz_headers:
x_amz_headers = {}
if not headers:
headers = {}
def generate_authenticated_url_and_args() -> Tuple[bytes, Dict[str, bytes]]:
t = datetime.utcnow()
amzdate = t.strftime("%Y%m%dT%H%M%SZ")
if "AWS_TOKEN" in self.credentials.creds:
x_amz_headers["x-amz-security-token"] = self.credentials.creds.get(
"AWS_TOKEN"
)
_x_amz_headers = self.construct_canonicalized_headers(x_amz_headers)
string_to_sign = self._construct_string_to_sign(
verb, resources, _x_amz_headers, amzdate, content_type=content_type
)
signature = self.sign(
self.credentials.creds["AWS_SECRET_KEY"].encode("UTF-8"), string_to_sign
)
authorization_header = ( # TODO
"AWS " + self.credentials.creds["AWS_KEY_ID"] + ":" + signature.decode()
)
headers.update(x_amz_headers)
headers["Date"] = amzdate
headers["Authorization"] = authorization_header
rest_args = {"headers": headers}
if data:
rest_args["data"] = data
return url, rest_args
return self._send_request_with_retry(
verb, generate_authenticated_url_and_args, retry_id
)
def get_file_header(self, filename: str) -> Union[FileHeader, None]:
"""Gets the metadata of file in specified location.
Args:
filename: Name of remote file.
Returns:
None if HEAD returns 404, otherwise a FileHeader instance populated with metadata
"""
path = quote(self.s3location.path + filename.lstrip("/"))
url = self.endpoint + f"/{path}"
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name, request_uri=path
)
retry_id = "HEAD"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url, "HEAD", _resource, retry_id
)
if response.status_code == 200:
self.meta.result_status = ResultStatus.UPLOADED
metadata = response.headers
encryption_metadata = (
EncryptionMetadata(
key=metadata.get(META_PREFIX + AMZ_KEY),
iv=metadata.get(META_PREFIX + AMZ_IV),
matdesc=metadata.get(META_PREFIX + AMZ_MATDESC),
)
if metadata.get(META_PREFIX + AMZ_KEY)
else None
)
return FileHeader(
digest=metadata.get(META_PREFIX + SFC_DIGEST),
content_length=int(metadata.get("Content-Length")),
encryption_metadata=encryption_metadata,
)
elif response.status_code == 404:
logger.debug(
f"not found. bucket: {self.s3location.bucket_name}, path: {path}"
)
self.meta.result_status = ResultStatus.NOT_FOUND_FILE
return None
else:
response.raise_for_status()
def _prepare_file_metadata(self) -> Dict[str, Any]:
"""Construct metadata for a file to be uploaded.
Returns: File metadata in a dict.
"""
s3_metadata = {
META_PREFIX + SFC_DIGEST: self.meta.sha256_digest,
}
if self.encryption_metadata:
s3_metadata.update(
{
META_PREFIX + AMZ_IV: self.encryption_metadata.iv,
META_PREFIX + AMZ_KEY: self.encryption_metadata.key,
META_PREFIX + AMZ_MATDESC: self.encryption_metadata.matdesc,
}
)
return s3_metadata
def _initiate_multipart_upload(self) -> None:
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
url = self.endpoint + f"/{path}?uploads"
s3_metadata = self._prepare_file_metadata()
# initiate multipart upload
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name,
request_uri=path,
subresource={"uploads": None},
)
retry_id = "Initiate"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url,
"POST",
_resource,
retry_id,
x_amz_headers=s3_metadata,
content_type=HTTP_HEADER_VALUE_OCTET_STREAM,
headers={HTTP_HEADER_CONTENT_TYPE: HTTP_HEADER_VALUE_OCTET_STREAM},
)
if response.status_code == 200:
self.upload_id = ET.fromstring(response.content)[2].text
self.etags = [None] * self.num_of_chunks
else:
response.raise_for_status()
def _upload_chunk(self, chunk_id: int, chunk: bytes):
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
url = self.endpoint + f"/{path}"
if self.num_of_chunks == 1: # single request
s3_metadata = self._prepare_file_metadata()
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name, request_uri=path
)
response = self._send_request_with_authentication_and_retry(
url,
"PUT",
_resource,
chunk_id,
data=chunk,
x_amz_headers=s3_metadata,
headers={HTTP_HEADER_CONTENT_TYPE: HTTP_HEADER_VALUE_OCTET_STREAM},
content_type=HTTP_HEADER_VALUE_OCTET_STREAM,
)
response.raise_for_status()
else:
# multipart PUT
chunk_url = url + f"?partNumber={chunk_id+1}&uploadId={self.upload_id}"
query_params = {"partNumber": chunk_id + 1, "uploadId": self.upload_id}
chunk_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name,
request_uri=path,
subresource=query_params,
)
response = self._send_request_with_authentication_and_retry(
chunk_url, "PUT", chunk_resource, chunk_id, data=chunk
)
if response.status_code == 200:
self.etags[chunk_id] = response.headers["ETag"]
response.raise_for_status()
def _complete_multipart_upload(self) -> None:
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
url = self.endpoint + f"/{path}?uploadId={self.upload_id}"
logger.debug("Initiating multipart upload complete")
# Complete multipart upload
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name,
request_uri=path,
subresource={"uploadId": self.upload_id},
)
root = ET.Element("CompleteMultipartUpload")
for idx, etag_str in enumerate(self.etags):
part = ET.Element("Part")
etag = ET.Element("ETag")
etag.text = etag_str
part.append(etag)
part_number = ET.Element("PartNumber")
part_number.text = str(idx + 1)
part.append(part_number)
root.append(part)
retry_id = "Complete"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url,
"POST",
_resource,
retry_id,
data=ET.tostring(root),
)
response.raise_for_status()
def _abort_multipart_upload(self) -> None:
if self.upload_id is None:
return
path = quote(self.s3location.path + self.meta.dst_file_name.lstrip("/"))
url = self.endpoint + f"/{path}?uploadId={self.upload_id}"
retry_id = "Abort"
self.retry_count[retry_id] = 0
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name,
request_uri=path,
subresource={"uploadId": self.upload_id},
)
response = self._send_request_with_authentication_and_retry(
url, "DELETE", _resource, retry_id
)
response.raise_for_status()
def download_chunk(self, chunk_id: int) -> None:
logger.debug(f"Downloading chunk {chunk_id}")
path = quote(self.s3location.path + self.meta.src_file_name.lstrip("/"))
url = self.endpoint + f"/{path}"
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name, request_uri=path
)
if self.num_of_chunks == 1:
response = self._send_request_with_authentication_and_retry(
url, "GET", _resource, chunk_id
)
if response.status_code == 200:
self.write_downloaded_chunk(0, response.content)
self.meta.result_status = ResultStatus.DOWNLOADED
response.raise_for_status()
else:
chunk_size = self.chunk_size
if chunk_id < self.num_of_chunks - 1:
_range = f"{chunk_id * chunk_size}-{(chunk_id+1)*chunk_size-1}"
else:
_range = f"{chunk_id * chunk_size}-"
response = self._send_request_with_authentication_and_retry(
url,
"GET",
_resource,
chunk_id,
headers={"Range": f"bytes={_range}"},
)
if response.status_code in (200, 206):
self.write_downloaded_chunk(chunk_id, response.content)
response.raise_for_status()
def transfer_accelerate_config(self) -> bool:
url = self.endpoint + "/?accelerate"
_resource = self._construct_canonicalized_element(
bucket_name=self.s3location.bucket_name, subresource={"accelerate": None}
)
retry_id = "accelerate"
self.retry_count[retry_id] = 0
response = self._send_request_with_authentication_and_retry(
url, "GET", _resource, retry_id
)
if response.status_code == 200:
config = ET.fromstring(response.text)
use_accelerate_endpoint = (
config.find("Status") and config.find("Status").text == "Enabled"
)
logger.debug(f"use_accelerate_endpoint: {use_accelerate_endpoint}")
return use_accelerate_endpoint
return False
```
#### File: snowflake/connector/s3_util_sdk.py
```python
from __future__ import division
import logging
import os
from collections import namedtuple
from logging import getLogger
from typing import TYPE_CHECKING, Any, Dict
import boto3
import botocore.exceptions
import OpenSSL
from boto3.exceptions import RetriesExceededError, S3UploadFailedError
from boto3.s3.transfer import TransferConfig
from boto3.session import Session
from botocore.client import Config
from .constants import (
DEFAULT_S3_CONNECTION_POOL_SIZE,
FileHeader,
HTTP_HEADER_CONTENT_TYPE,
HTTP_HEADER_VALUE_OCTET_STREAM,
MAX_S3_CONNECTION_POOL_SIZE,
ResultStatus,
)
from .encryption_util import EncryptionMetadata
if TYPE_CHECKING: # pragma: no cover
from .file_transfer_agent_sdk import SnowflakeFileMeta
logger = getLogger(__name__)
SFC_DIGEST = "sfc-digest"
AMZ_MATDESC = "x-amz-matdesc"
AMZ_KEY = "x-amz-key"
AMZ_IV = "x-amz-iv"
ERRORNO_WSAECONNABORTED = 10053 # network connection was aborted
EXPIRED_TOKEN = "ExpiredToken"
ADDRESSING_STYLE = "virtual" # explicit force to use virtual addressing style
"""
S3 Location: S3 bucket name + path
"""
S3Location = namedtuple(
"S3Location", ["bucket_name", "s3path"] # S3 bucket name # S3 path name
)
class SnowflakeS3Util:
"""S3 Utility class."""
@staticmethod
def create_client(
stage_info,
use_accelerate_endpoint=False,
use_s3_regional_url=False,
s3_connection_pool_size: int = DEFAULT_S3_CONNECTION_POOL_SIZE,
) -> Session.resource:
"""Creates a client object with a stage credential.
Args:
stage_info: Information about the stage.
use_accelerate_endpoint: Whether or not to use accelerated endpoint (Default value = False).
use_s3_regional_url: Whether or not to use regional url in aws deployments (Default value = False).
Returns:
The client to communicate with S3.
"""
stage_credentials = stage_info["creds"]
security_token = stage_credentials.get("AWS_TOKEN", None)
# if GS sends us an endpoint, it's likely for FIPS. Use it.
end_point = (
("https://" + stage_info["endPoint"]) if stage_info["endPoint"] else None
)
# If FIPS endpoint is in us-east-1 this will still work as we are testing for stage_info['endPoint'] which
# is populated only in the FIPS gov deployments.
if use_s3_regional_url and not stage_info["endPoint"]:
end_point = "https://" + "s3." + stage_info["region"] + ".amazonaws.com"
config = Config(
signature_version="s3v4",
s3={
"use_accelerate_endpoint": use_accelerate_endpoint,
"addressing_style": ADDRESSING_STYLE,
},
max_pool_connections=s3_connection_pool_size,
)
session = boto3.Session(
region_name=stage_info["region"],
aws_access_key_id=stage_credentials["AWS_KEY_ID"],
aws_secret_access_key=stage_credentials["AWS_SECRET_KEY"],
aws_session_token=security_token,
)
client = session.resource(
"s3",
endpoint_url=end_point,
config=config,
)
return client
@staticmethod
def extract_bucket_name_and_path(stage_location):
stage_location = os.path.expanduser(stage_location)
bucket_name = stage_location
s3path = ""
# split stage location as bucket name and path
if "/" in stage_location:
bucket_name = stage_location[0 : stage_location.index("/")]
s3path = stage_location[stage_location.index("/") + 1 :]
if s3path and not s3path.endswith("/"):
s3path += "/"
return S3Location(bucket_name=bucket_name, s3path=s3path)
@staticmethod
def _get_s3_object(meta: "SnowflakeFileMeta", filename):
client = meta.client_meta.cloud_client
s3location = SnowflakeS3Util.extract_bucket_name_and_path(
meta.client_meta.stage_info["location"]
)
s3path = s3location.s3path + filename.lstrip("/")
if logger.getEffectiveLevel() == logging.DEBUG:
tmp_meta = {}
log_black_list = ("stage_credentials", "creds", "encryption_material")
for k, v in meta.__dict__.items():
if k not in log_black_list:
tmp_meta[k] = v
# avoid logging tmp_meta - SNOW-372131
logger.debug(
f"s3location.bucket_name: {s3location.bucket_name}, "
f"s3location.s3path: {s3location.s3path}, "
f"s3full_path: {s3path}"
)
return client.Object(s3location.bucket_name, s3path)
@staticmethod
def get_file_header(meta: "SnowflakeFileMeta", filename):
"""Gets the remote file's metadata.
Args:
meta: Remote file's metadata info.
filename: Name of remote file.
Returns:
The file header, with expected properties populated or None, based on how the request goes with the
storage provider.
"""
akey = SnowflakeS3Util._get_s3_object(meta, filename)
try:
# HTTP HEAD request
akey.load()
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == EXPIRED_TOKEN:
logger.debug("AWS Token expired. Renew and retry")
meta.result_status = ResultStatus.RENEW_TOKEN
return None
elif e.response["Error"]["Code"] == "404":
logger.debug(f"not found. bucket: {akey.bucket_name}, path: {akey.key}")
meta.result_status = ResultStatus.NOT_FOUND_FILE
return FileHeader(
digest=None,
content_length=None,
encryption_metadata=None,
)
elif e.response["Error"]["Code"] == "400":
logger.debug(
f'Bad request, token needs to be renewed: {e.response["Error"]["Message"]}. '
f"bucket: {akey.bucket_name}, path: {akey.key}"
)
meta.result_status = ResultStatus.RENEW_TOKEN
return None
logger.debug(
f"Failed to get metadata for {akey.bucket_name}, {akey.key}: {e}"
)
meta.result_status = ResultStatus.ERROR
return None
meta.result_status = ResultStatus.UPLOADED
encryption_metadata = (
EncryptionMetadata(
key=akey.metadata.get(AMZ_KEY),
iv=akey.metadata.get(AMZ_IV),
matdesc=akey.metadata.get(AMZ_MATDESC),
)
if akey.metadata.get(AMZ_KEY)
else None
)
return FileHeader(
digest=akey.metadata.get(SFC_DIGEST),
content_length=akey.content_length,
encryption_metadata=encryption_metadata,
)
@staticmethod
def upload_file(
data_file: str,
meta: "SnowflakeFileMeta",
encryption_metadata: "EncryptionMetadata",
max_concurrency: int,
multipart_threshold: int,
):
"""Uploads the local file to S3.
Args:
data_file: File path on local system.
meta: The File meta object (contains credentials and remote location).
encryption_metadata: Encryption metadata to be set on object.
max_concurrency: The maximum number of threads to used to upload.
multipart_threshold: The number of bytes after which size a file should be uploaded concurrently in chunks.
Raises:
HTTPError if some http errors occurred.
Returns:
None.
"""
try:
s3_metadata = {
HTTP_HEADER_CONTENT_TYPE: HTTP_HEADER_VALUE_OCTET_STREAM,
SFC_DIGEST: meta.sha256_digest,
}
if encryption_metadata:
s3_metadata.update(
{
AMZ_IV: encryption_metadata.iv,
AMZ_KEY: encryption_metadata.key,
AMZ_MATDESC: encryption_metadata.matdesc,
}
)
s3location = SnowflakeS3Util.extract_bucket_name_and_path(
meta.client_meta.stage_info["location"]
)
s3path = s3location.s3path + meta.dst_file_name.lstrip("/")
akey = meta.client_meta.cloud_client.Object(s3location.bucket_name, s3path)
extra_args = {"Metadata": s3_metadata}
config = TransferConfig(
multipart_threshold=multipart_threshold,
max_concurrency=max_concurrency,
num_download_attempts=10,
)
if meta.src_stream is None:
akey.upload_file(
data_file,
Callback=meta.put_callback(
data_file,
os.path.getsize(data_file),
output_stream=meta.put_callback_output_stream,
show_progress_bar=meta.show_progress_bar,
)
if meta.put_callback
else None,
ExtraArgs=extra_args,
Config=config,
)
else:
upload_stream = meta.real_src_stream or meta.src_stream
upload_size = upload_stream.seek(0, os.SEEK_END)
upload_stream.seek(0)
akey.upload_fileobj(
upload_stream,
Callback=meta.put_callback(
data_file,
upload_size,
output_stream=meta.put_callback_output_stream,
show_progress_bar=meta.show_progress_bar,
)
if meta.put_callback
else None,
ExtraArgs=extra_args,
Config=config,
)
logger.debug("DONE putting a file")
meta.dst_file_size = meta.upload_size
meta.result_status = ResultStatus.UPLOADED
except botocore.exceptions.ClientError as err:
if err.response["Error"]["Code"] == EXPIRED_TOKEN:
logger.debug("AWS Token expired. Renew and retry")
meta.result_status = ResultStatus.RENEW_TOKEN
return
logger.debug(
f"Failed to upload a file: {data_file}, err: {err}", exc_info=True
)
raise err
except S3UploadFailedError as err:
if EXPIRED_TOKEN in str(err):
# Since AWS token expiration error can be encapsulated in
# S3UploadFailedError, the text match is required to
# identify the case.
logger.debug(
f"Failed to upload a file: {data_file}, err: {err}. Renewing AWS Token and Retrying"
)
meta.result_status = ResultStatus.RENEW_TOKEN
return
meta.last_error = err
meta.result_status = ResultStatus.NEED_RETRY
except OpenSSL.SSL.SysCallError as err:
meta.last_error = err
if err.args[0] == ERRORNO_WSAECONNABORTED:
# connection was disconnected by S3
# because of too many connections. retry with
# less concurrency to mitigate it
meta.result_status = ResultStatus.NEED_RETRY_WITH_LOWER_CONCURRENCY
else:
meta.result_status = ResultStatus.NEED_RETRY
@staticmethod
def _native_download_file(
meta: "SnowflakeFileMeta", full_dst_file_name, max_concurrency
):
try:
akey = SnowflakeS3Util._get_s3_object(meta, meta.src_file_name)
akey.download_file(
full_dst_file_name,
Callback=meta.get_callback(
meta.src_file_name,
meta.src_file_size,
output_stream=meta.get_callback_output_stream,
show_progress_bar=meta.show_progress_bar,
)
if meta.get_callback
else None,
Config=TransferConfig(
multipart_threshold=meta.multipart_threshold,
max_concurrency=max_concurrency,
num_download_attempts=10,
),
)
meta.result_status = ResultStatus.DOWNLOADED
except botocore.exceptions.ClientError as err:
if err.response["Error"]["Code"] == EXPIRED_TOKEN:
meta.result_status = ResultStatus.RENEW_TOKEN
else:
logger.debug(
f"Failed to download a file: {full_dst_file_name}, err: {err}",
exc_info=True,
)
raise err
except RetriesExceededError as err:
meta.result_status = ResultStatus.NEED_RETRY
meta.last_error = err
except OpenSSL.SSL.SysCallError as err:
meta.last_error = err
if err.args[0] == ERRORNO_WSAECONNABORTED:
# connection was disconnected by S3
# because of too many connections. retry with
# less concurrency to mitigate it
meta.result_status = ResultStatus.NEED_RETRY_WITH_LOWER_CONCURRENCY
else:
meta.result_status = ResultStatus.NEED_RETRY
@staticmethod
def transfer_accelerate_config(
client: "Session.resource", stage_info: Dict[str, Any]
) -> bool:
s3location = SnowflakeS3Util.extract_bucket_name_and_path(
stage_info["location"]
)
try:
ret = client.meta.client.get_bucket_accelerate_configuration(
Bucket=s3location.bucket_name
)
use_accelerate_endpoint = (
ret and "Status" in ret and ret["Status"] == "Enabled"
)
logger.debug(f"use_accelerate_endpoint: {use_accelerate_endpoint}")
return use_accelerate_endpoint
except botocore.exceptions.ClientError as e:
if e.response["Error"].get("Code", "Unknown") == "AccessDenied":
logger.debug(e)
else:
# unknown error
logger.debug(e, exc_info=True)
return False
``` |
{
"source": "JKinx/awr",
"score": 2
} |
#### File: awr/tests/test_fqe.py
```python
import sys
sys.path.append("../")
import awr_configs
import learning.awr_agent as awr_agent
import gym
import tensorflow as tf
import numpy as np
import torch
import util.rl_path as rl_path
from tqdm import tqdm as tqdm
import random
# from sklearn.ensemble import ExtraTreesRegressor
import torch
import torch.nn as nn
import warnings
import pickle
from matplotlib import pyplot as plt
import argparse
warnings.filterwarnings("ignore")
def sample_action(agent, s, action_std):
n = len(s.shape)
s = np.reshape(s, [-1, agent.get_state_size()])
feed = {
agent._s_tf: s
}
run_tfs = [agent._norm_a_pd_tf.parameters["loc"]]
out = agent._sess.run(run_tfs, feed_dict=feed)
loc = torch.tensor(out[0])
a = np.array(torch.distributions.Normal(loc, scale=action_std).sample().tolist())
if n == 1:
a = a[0]
return a
def rollout_path(agent, action_std):
path = rl_path.RLPath()
s = agent._env.reset()
s = np.array(s)
path.states.append(s)
done = False
while not done:
a = sample_action(agent, s, action_std)
s, r, done, info = agent._step_env(a)
s = np.array(s)
path.states.append(s)
path.actions.append(a)
path.rewards.append(r)
path.logps.append(0)
path.terminate = agent._check_env_termination()
print('HERE')
return rl_path.RLPath2(path) # in order to compute constraints
class Policy(nn.Module):
"""Policy class with an epsilon-greedy dqn model"""
def __init__(self, agent, action_std):
super().__init__()
self.agent = agent
self.action_std = action_std
def forward(self, states):
return sample_action(self.agent, states, self.action_std)
class Q(nn.Module):
"""Q-network using a NN"""
def __init__(self, state_dim, action_dim, lr):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.fitted = False
self.model = nn.Sequential(
nn.Linear(self.state_dim + self.action_dim, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 1)
)
self.criterion = nn.MSELoss()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
def forward(self, state):
"""Forward"""
state = torch.tensor(state).cuda().float()
return self.model(state)
def predict(self, state):
"""Forward without gradients (used for predictions)"""
state = torch.tensor(state).cuda().float()
with torch.no_grad():
return self.model(state).squeeze().cpu().numpy()
def fit(self, state, true_value):
"""Fit NN with a single backward step"""
self.fitted = True
state = torch.tensor(state).cuda().float()
true_value = torch.tensor(true_value).cuda().float()
self.optimizer.zero_grad()
out = self(state).squeeze()
loss = self.criterion(out, true_value)
loss.backward()
self.optimizer.step()
def is_fitted(sklearn_regressor):
"""Helper function to determine if a regression model from scikit-learn
has ever been `fit`"""
return hasattr(sklearn_regressor, 'n_outputs_')
class FittedQEvaluation(object):
def __init__(self, regressor=None):
self.regressor = regressor
self.tree_regressor = regressor is None
def regressor_fitted(self):
if self.tree_regressor:
return is_fitted(self.regressor)
else:
return self.regressor.fitted
def Q(self, state_actions):
"""Return the Q function estimate of `states` for each action"""
if not self.regressor_fitted():
return np.zeros(state_actions.shape[0])
return self.regressor.predict(state_actions)
def fit_Q(self, eval_policy, episodes, num_iters=100, discount=0.95):
batches = []
batch_len = len(episodes) // 10
for i in range(10):
Is = []
S2s = []
Rs = []
Ts = []
for I, R, S2, T in episodes[i * batch_len: (i + 1) * batch_len]:
Is.append(I)
Rs.append(R)
S2s.append(S2)
Ts.append(T)
batches.append((np.concatenate(Is, 0), np.concatenate(Rs, 0),
np.concatenate(S2s, 0), np.concatenate(Ts, 0)))
for i in tqdm(range(num_iters)):
ins = []
outs = []
for (Is, Rs, S2s, Ts) in batches:
ins.append(Is)
pi_S2s = eval_policy(S2s)
S2pi_S2s = np.hstack([S2s, pi_S2s])
Os = Rs + discount * (Ts * self.Q(S2pi_S2s))
outs.append(Os)
for (Is, Os) in zip(ins, outs):
self.regressor.fit(Is, Os)
def get_data(paths, constraint):
episodes = []
if constraint:
c_paths = []
for path in paths:
c_paths.append(rl_path.RLPath2(path))
paths = c_paths
# import pdb; pdb.set_trace()
for path in paths:
I = np.hstack([np.array(path.states)[:-1], np.array(path.actions)])
if constraint:
R = path.g[:, 0] # for constraints
else:
R = path.rewards
S2 = np.array(path.states)[1:]
episodes.append((I, R, S2))
return episodes
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data_std', type=float, default=0.2,
help='std of batch data')
parser.add_argument('--eval_std', type=float, default=0.2,
help='std in eval')
parser.add_argument('--n_epochs', type=int, default=100,
help='number of epochs in FQE fit')
parser.add_argument('--constraint', type=bool, default=False,
help='whether or not FQE should be evluated for the constraint')
args = parser.parse_args()
data_std = args.data_std
configs = awr_configs.AWR_CONFIGS['Reacher-v2']
datas = get_data(pickle.load(open("../output/Reacher-v2_{}_offline/Reacher-v2_{}_offline_paths.pickle".format(data_std, data_std), "rb")),
args.constraint)
terminals = []
for el in datas:
terminal = np.ones(len(el[0]))
terminal[-1] = 0
terminals.append(terminal)
datas2 = []
for el,terminal in zip(datas, terminals):
datas2.append((el[0],el[1],el[2],terminal))
env = gym.make("Reacher-v2")
graph = tf.Graph()
sess = tf.Session(graph=graph)
agent = awr_agent.AWRAgent(env=env, sess=sess, **configs)
agent.load_model("../output/Reacher-v2_{}_offline/model.ckpt".format(data_std))
qnn = Q(agent.get_state_size(), agent.get_action_size(), 0.001).cuda()
eval_std = args.eval_std # For each, try different eval_std
num_epochs = args.n_epochs
FQE = FittedQEvaluation(qnn)
policy = Policy(agent, eval_std)
FQE.fit_Q(policy, datas2, num_epochs , agent._discount)
vals0 = []
for _ in tqdm(range(100)):
path = rollout_path(agent, eval_std)
true = sum([r * (agent._discount ** i) for i,r in enumerate(path.rewards)])
pred = FQE.regressor.predict(np.hstack([path.states[0], path.actions[0]]).reshape(1,-1))
vals0.append([true, pred])
# val[0] true, val[1] prediction
# TODO:
# 1) Draw line of best fit and y=x
# 2) Calculate MSE loss or something (maybe R^2 too?)
xs = [val[0] for val in vals0]
ys = [val[1] for val in vals0]
# Plot linear regression line
plt.plot(np.unique(xs), np.poly1d(np.polyfit(xs, ys, 1))(np.unique(xs)))
# Plot y=x
plt.plot(np.unique(xs), np.unique(xs), 'k--')
# Mean-squared Error
mse = np.mean((np.array(ys) - np.array(xs)) ** 2)
print('MSE: {}'.format(mse))
plt.scatter(xs, ys, color='r')
plt.xlabel("True Value")
plt.ylabel("Prediction")
plt.title("FQE: {}, {}, MSE: {}".format(data_std, eval_std, mse))
plt.savefig("../output/Reacher-v2_FQE_{}_{}_{}.jpg".format(data_std, eval_std, num_epochs))
plt.show()
```
#### File: awr/util/best_response.py
```python
import argparse
import gym
import numpy as np
import os
import sys
import tensorflow as tf
import pickle
from tqdm import tqdm
from copy import deepcopy as dc
from util.policy import Policy
import awr_configs
import learning.awr_agent as awr_agent
def enable_gpus(gpu_str):
if (gpu_str is not ""):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_str
return
class BestResponse(object):
def __init__(self, args):
self.args = args
def run(self, dataset):
enable_gpus(self.args.gpu)
self.env = self.build_env(self.args.env)
self.agent = self.build_agent(self.env)
self.agent.visualize = self.args.visualize
self.agent._replay_buffer = dc(dataset)
self.agent.op_train(max_iter=self.args.max_iter,
test_episodes=self.args.test_episodes,
output_dir=self.args.output_dir,
output_iters=self.args.output_iters)
return Policy(self.agent)
def build_agent(self, env):
env_id = self.args.env
agent_configs = {}
if (env_id in awr_configs.AWR_CONFIGS):
agent_configs = awr_configs.AWR_CONFIGS[env_id]
graph = tf.Graph()
sess = tf.Session(graph=graph)
agent = awr_agent.AWRAgent(env=env, sess=sess, **agent_configs)
return agent
def enable_gpus(self, gpu_str):
if (gpu_str is not ""):
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_str
return
def build_env(self, env_id):
assert(env_id is not ""), "Unspecified environment."
env = gym.make(env_id)
return env
```
#### File: awr/util/rl_path.py
```python
import enum
import numpy as np
import time
class Terminate(enum.Enum):
Null = 0
Fail = 1
class RLPath(object):
def __init__(self):
self.states = []
self.actions = []
self.logps = []
self.rewards = []
self.terminate = Terminate.Null
self.clear()
return
def pathlength(self):
return len(self.actions)
def is_valid(self):
valid = True
l = self.pathlength()
valid &= len(self.states) == l + 1
valid &= len(self.actions) == l
valid &= len(self.logps) == l
valid &= len(self.rewards) == l
valid |= (l == 0)
return valid
def check_vals(self):
for key, vals in vars(self).items():
if type(vals) is list and len(vals) > 0:
for v in vals:
if not np.isfinite(v).all():
return False
return True
def clear(self):
for key, vals in vars(self).items():
if type(vals) is list:
vals.clear()
self.terminate = Terminate.Null
return
def discounted_sum(self, discount):
factors = np.empty(len(self.rewards))
factors[0] = 1
factors[1:] = discount
factors = np.cumprod(factors)
return np.array(self.rewards) @ factors
def calc_return(self):
return sum(self.rewards)
def terminated(self):
return self.terminate == Terminate.Null
def compute_g(path):
g0 = (np.array(path.actions)[:,0] > 0.5).astype(np.float)
return g0.reshape(-1,1)
class RLPath2(object):
def __init__(self, path, compute_g):
self.states = np.array(path.states)
self.actions = np.array(path.actions)
self.rewards = np.array(path.rewards)
self.costs = - self.rewards
self.c = - self.rewards
self.g = compute_g(path)
self.terminate = Terminate.Null
self.clear()
return
def calculate_cost(self, scale, lamb):
self.costs = (self.c + np.dot(lamb[:-1], self.g.T)) / scale
self.rewards = -self.costs
def set_cost(self, scale, key, idx=None):
if key == 'c':
self.costs = self.c / scale
self.rewards = -self.costs
elif key == 'g':
assert idx is not None
# Pick the idx'th constraint
self.costs = self.g[:,idx] / scale
self.rewards = -self.costs
else:
raise
def compute_g(self, path):
g0g = (np.array(path.actions)[:,0] > 0.4).astype(np.float)
g0l = (np.array(path.actions)[:,0] < -0.4).astype(np.float)
g1g = (np.array(path.actions)[:,1] > 0.4).astype(np.float)
g1l = (np.array(path.actions)[:,1] < -0.4).astype(np.float)
g0 = g0g + g0l + g1g + g1l
return g0.reshape(-1,1)
def pathlength(self):
return len(self.actions)
def is_valid(self):
valid = True
l = self.pathlength()
valid &= len(self.states) == l + 1
valid &= len(self.actions) == l
valid &= len(self.rewards) == l
valid &= len(self.costs) == l
valid &= len(self.c) == l
valid &= len(self.g) == l
valid |= (l == 0)
return valid
def check_vals(self):
for key, vals in vars(self).items():
if type(vals) is list and len(vals) > 0:
for v in vals:
if not np.isfinite(v).all():
return False
return True
def clear(self):
for key, vals in vars(self).items():
if type(vals) is list:
vals.clear()
self.terminate = Terminate.Null
return
def discounted_sum(self, discount, which="costs"):
factors = np.empty(len(self.rewards))
factors[0] = 1
factors[1:] = discount
factors = np.cumprod(factors)
if which == "rewards":
main = self.rewards
elif which == "costs":
main = self.costs
else:
raise NotImplementedError
return main @ factors
def calc_return(self):
return sum(self.rewards)
def terminated(self):
return self.terminate == Terminate.Null
``` |
{
"source": "jkirkby3/fypy",
"score": 3
} |
#### File: fypy/fit/Calibrator.py
```python
from fypy.fit.Calibratable import Calibratable
from fypy.fit.Minimizer import Minimizer, LeastSquares, OptResult
from fypy.fit.Objective import Objective
from typing import Dict, Union, List, Tuple, Optional
import numpy as np
class Calibrator(object):
def __init__(self,
model: Calibratable,
minimizer: Minimizer = LeastSquares()):
"""
A generic calibration engine. The idea is to supply a minimizer of some kind (least squares), as well
as a calibratable object of some kind, e.g. a full model or a component of the model, and to calibrate
that model based on the set of objectives/penalties that are added to this calibrator. At the least, you
should add some set of targets. e.g., to calibrate a stochastic volatility (SV) model to a set of market prices,
add a Targets objective, containing the market prices, and supply the SV model to calibrate. Upon completion
of the calibration, the model parameters will be set to their calibrated values, and your model is ready
for pricing/risk.
:param model: Calibratable, some calibratable object/model (e.g. Levy model)
:param minimizer: Minimizer, some minimizer (e.g Levenberg-Marquardt least squares)
"""
self._model = model
self._minimizer = minimizer
self._objectives: Dict[str, Objective] = {}
# Initialize the guess and bounds, using model defaults. These can be overridden
self._guess: Optional[np.ndarray] = model.default_params()
self._bounds: Union[Tuple, List[Tuple]] = model.param_bounds()
def add_objective(self,
name: str,
objective: Objective):
"""
Add a new objective function to the set of objectives. You can calibrate with as many objectives as you want,
some representing targets, others representing regularization penalties, aribtrage penalties, etc.
Each objective is named. Adding an objective with an existing name overwrites that objective
:param name: str, the name of this objective
:param objective: Objective, an objective that will guide the calibration process
:return: self
"""
self._objectives[name] = objective
return self
def set_bounds(self, bounds: Union[Tuple, List[Tuple]]):
"""
Set the bounds on parameters
:param bounds: the bounds per parameter, list of (lower,upper) bounds per parameter
:return: self
"""
self._bounds = bounds
return self
def set_initial_guess(self, params: np.ndarray):
"""
Set the initial guess used to start the calibration
:param params: np.ndarray, initial guess for parameters
:return: self
"""
self._guess = params
return self
def calibrate(self) -> OptResult:
""" Run the calibration, fits the model in place, returns the optimization summary """
if len(self._objectives) == 0:
raise RuntimeError("You never set any objectives ")
result = self._minimizer.minimize(self._objective_value,
bounds=self._bounds, guess=self._guess)
# Set the final parameters in the model
self._model.set_params(result.params)
return result
# ========================
# Private
# ========================
def _objective_value(self, params: np.ndarray) -> np.ndarray:
if len(self._objectives) == 0:
raise RuntimeError("You never set any objectives ")
# Set the parameters into model
self._model.set_params(params)
# Evaluate the residuals for all objectives
return np.concatenate([objective.value() for _, objective in self._objectives.items()])
```
#### File: fypy/fit/Targets.py
```python
from typing import Callable, Optional
import numpy as np
from fypy.fit.Objective import Objective
class Targets(Objective):
def __init__(self,
targets: np.ndarray,
function: Callable[..., np.ndarray],
weights: Optional[np.ndarray] = None,
strength: float = 1.0):
"""
An objective representing a set of targets (e.g. market prices, volatilities, etc)
:param weights: np.ndarray, the weight to apply per target
:param targets: np.ndarray, the targets themselves (e.g. market prices)
:param function: function, evaluated at each of the targets, determines the residual
:param strength: float, the strength of this particular objective
"""
super().__init__(strength=strength)
# take sqrt since they are applied to residual before squaring.
# The strength is in space of sqrt(sum(squares)), so let it be squared with the resituals
if weights is None:
weights = np.ones_like(targets) / len(targets)
self._weights = self._strength * np.sqrt(weights)
self._targets = targets
self._function = function
def value(self) -> np.ndarray:
""" Evaluate the targets objective, returns residuals per target """
return self._weights * (self._function() - self._targets)
```
#### File: fypy/instrument/Instrument.py
```python
from abc import ABC, abstractmethod
import numpy as np
from fypy.date.Date import Date
from typing import Union
class Instrument(ABC):
"""
Base instrument class.
"""
def __init__(self):
pass
# TODO: add excercise class, vanilla option will take an excerise
class VanillaOption(Instrument):
def __init__(self,
strike: float,
expiry: Date,
is_call: bool):
""" Vanilla Option """
super().__init__()
self._strike = strike
self._expiry = expiry
self._is_call = is_call
@property
def strike(self) -> float:
return self._strike
@property
def expiry(self) -> Date:
return self._expiry
@property
def is_call(self) -> bool:
return self._is_call
def payoff(self, underlying: Union[float, np.array]) -> Union[float, np.array]:
"""
Calculate the payoff (cashflow) that would occur given a particular value of the underlying
:param underlying: float or array of underlying value
:return: float or array, matches type that was supplied
"""
return np.maximum(0, underlying - self._strike) if self._is_call \
else np.maximum(0, self._strike - underlying)
```
#### File: fypy/market/MarketSurface.py
```python
from typing import Dict
from fypy.market.MarketSlice import MarketSlice
from fypy.volatility.implied import ImpliedVolCalculator
class MarketSurface(object):
def __init__(self, slices: Dict[float, MarketSlice] = None):
"""
Container class for an option price surface, composed of individual market slices, one per tenor
:param slices: dict: {float, MarketSlice}, contains all slices (you can add more later)
"""
self._slices = slices or {}
def add_slice(self, ttm: float, market_slice: MarketSlice):
"""
Add a new market slice (overwrites if same ttm already exists in surface)
:param ttm: float, time to maturity of the slice (tenor)
:param market_slice: MarketSlice, the market slice prices object
:return: self
"""
self._slices[ttm] = market_slice
return self
@property
def slices(self) -> Dict[float, MarketSlice]:
""" Access all slices """
return self._slices
@property
def ttms(self):
""" Get the ttms in the surface """
return self._slices.keys()
@property
def num_slices(self) -> int:
""" Get number of slice in surface """
return len(self._slices)
def fill_implied_vols(self, calculator: ImpliedVolCalculator):
"""
Fill the implied vols given a calculator. Fills in for each of bid,mid,ask, but only those that have
corresponding prices
:param calculator: ImpliedVolCalculator, a calculator used to fill in the vols from prices
:return: None
"""
for slice_ in self.slices.values():
slice_.fill_implied_vols(calculator)
```
#### File: model/levy/CGMY.py
```python
from fypy.model.levy.LevyModel import LevyModel
from fypy.model.FourierModel import Cumulants
from fypy.termstructures.ForwardCurve import ForwardCurve
from fypy.termstructures.DiscountCurve import DiscountCurve
import numpy as np
from scipy.special import gamma
from typing import List, Tuple, Optional, Union
class CMGY(LevyModel):
def __init__(self,
forwardCurve: ForwardCurve,
discountCurve: DiscountCurve,
C: float = 0.02,
G: float = 5.,
M: float = 15.,
Y: float = 1.2):
"""
Carr-Geman-Madan-Yor (CGMY) model. When Y=0, this model reduces to VG
:param forwardCurve: ForwardCurve term structure
:param C: float, viewed as a measure of the overall level of activity, and influences kurtosis
:param G: float, rate of exponential decay on the right tail
:param M: float, rate of exponential decay on the left tail. Typically for equities G < M, ie the left
tail is then heavier than the right (more down risk)
:param Y: float, controls the "fine structure" of the process
"""
super().__init__(forwardCurve=forwardCurve, discountCurve=discountCurve,
params=np.asarray([C, G, M, Y]))
# ================
# Model Parameters
# ================
@property
def C(self) -> float:
""" Model Parameter """
return self._params[0]
@property
def G(self) -> float:
""" Model Parameter """
return self._params[1]
@property
def M(self) -> float:
""" Model Parameter """
return self._params[2]
@property
def Y(self) -> float:
""" Model Parameter """
return self._params[3]
# =============================
# Fourier Interface Implementation
# =============================
def cumulants(self, T: float) -> Cumulants:
"""
Evaluate the cumulants of the model at a given time. This is useful e.g. to figure out integration bounds etc
during pricing
:param T: float, time to maturity (time at which cumulants are evaluated)
:return: Cumulants object
"""
C, G, M, Y = self.C, self.G, self.M, self.Y
rn_drift = self.risk_neutral_log_drift()
return Cumulants(T=T,
rn_drift=rn_drift,
c1=T * (rn_drift + C * gamma(1 - Y) * (M ** (Y - 1) - G ** (Y - 1))),
c2=T * C * gamma(2 - Y) * (M ** (Y - 2) + G ** (Y - 2)),
c4=T * C * gamma(4 - Y) * (M ** (Y - 4) + G ** (Y - 4))
)
def symbol(self, xi: Union[float, np.ndarray]):
"""
Levy symbol, uniquely defines Characteristic Function via: chf(T,xi) = exp(T*symbol(xi)), for all T>=0
:param xi: np.ndarray or float, points in frequency domain
:return: np.ndarray or float, symbol evaluated at input points in frequency domain
"""
C, G, M, Y = self.C, self.G, self.M, self.Y
rn_drift = self.risk_neutral_log_drift()
return 1j * xi * rn_drift + C * gamma(-Y) * ((M - 1j * xi) ** Y - M ** Y + (G + 1j * xi) ** Y - G ** Y)
def convexity_correction(self) -> float:
"""
Computes the convexity correction for the Levy model, added to log process drift to ensure
risk neutrality
"""
C, G, M, Y = self.C, self.G, self.M, self.Y
return -C * gamma(-Y) * ((M - 1) ** Y - M ** Y + (G + 1) ** Y - G ** Y) # convexity correction
# =============================
# Calibration Interface Implementation
# =============================
def num_params(self) -> int:
return 4
def param_bounds(self) -> Optional[List[Tuple]]:
return [(0, np.inf), (0, np.inf), (0, np.inf), (-np.inf, 2)]
def default_params(self) -> Optional[np.ndarray]:
return np.asarray([0.02, 5, 15, 1.2])
```
#### File: pricing/engine/EuropeanEngine.py
```python
from fypy.pricing.engine.Engine import Engine
from fypy.pricing.StrikesPricer import StrikesPricer
from fypy.instrument.Instrument import VanillaOption
from fypy.date.Date import Date
from fypy.date.DayCounter import DayCounter, DayCounter_365
import numpy as np
class EuropeanEngine(Engine):
def __init__(self,
strikesPricer: StrikesPricer,
val_date: Date,
dc: DayCounter = DayCounter_365()):
"""
European pricing Engine, which can price European options.
:param strikesPricer: an instance of a StrikesPricer, e.g. a Fourier based pricer
:param val_date: Date, the date of valuation
:param dc: DayCounter, used to count the time until contracts expire, from given val_date
"""
self._strikesPricer = strikesPricer
self._val_date = val_date
self._dc = dc
def price_instrument(self, inst: VanillaOption) -> float:
"""
Price a vanilla instrument (implements the Engine interface)
:param inst: Instrument object, the instrument to price
:return: price of instrument
"""
T = self._dc.year_fraction(start=self._val_date, end=inst.expiry)
return self._strikesPricer.price(T=T, K=inst.strike, is_call=inst.is_call)
def price_strikes(self,
T: float,
K: np.ndarray,
is_calls: np.ndarray) -> np.ndarray:
"""
Price a set of set of strikes (at same time to maturity, ie one slice of a surface)
Override this method if given a more efficient implementation for multiple strikes
:param T: float, time to maturity of options
:param K: np.array, strikes of options
:param is_calls: np.array[bool], indicators of if strikes are calls (true) or puts (false)
:return: np.array, prices of strikes
"""
return self._strikesPricer.price_strikes(T=T, K=K, is_calls=is_calls)
def price(self, T: float, K: float, is_call: bool):
"""
Price a single strike of European option.
:param T: float, time to maturity
:param K: float, strike of option
:param is_call: bool, indicator of if strike is call (true) or put (false)
:return: float, price of option
"""
return self._strikesPricer.price(T=T, K=K, is_call=is_call)
```
#### File: fypy/pricing/StrikesPricer.py
```python
from abc import ABC, abstractmethod
import numpy as np
class StrikesPricer(ABC):
"""
Abstract class for pricing a homogeneous type of instrument (e.g. European options),
which are distinguished according to Time, Strike, and call or put.
For example, we can price multiple strikes efficiently under a "Fourier" model such as Levy or Heston using
a Fast Fourier Transform pricer. These typically have efficiencies for prices a set of strikes with common maturity
Note: implementations of the strikes pricer is will target a single type of option, e.g. European, American, etc.
"""
def price_strikes(self,
T: float,
K: np.ndarray,
is_calls: np.ndarray) -> np.ndarray:
"""
Price a set of set of strikes (at same time to maturity, ie one slice of a surface)
Override this method if given a more efficient implementation for multiple strikes
:param T: float, time to maturity of options
:param K: np.array, strikes of options
:param is_calls: np.array[bool], indicators of if strikes are calls (true) or puts (false)
:return: np.array, prices of strikes
"""
return np.asarray([self.price(T, strike, is_call) for strike, is_call in zip(K, is_calls)])
@abstractmethod
def price(self, T: float, K: float, is_call: bool) -> float:
"""
Price a single strike (of whatever type of instrument the strikes pricer can price)
:param T: float, time to maturity
:param K: float, strike of option
:param is_call: bool, indicator of if strike is call (true) or put (false)
:return: float, price of option
"""
raise NotImplementedError
```
#### File: fypy/termstructures/ForwardCurve.py
```python
from abc import ABC, abstractmethod
from typing import Union
import numpy as np
class ForwardCurve(ABC):
"""
Abstract base class for deterministic forward curves.
Examples:
Equity: F(T) = S_0 * Div(T) / Disc(T) (more generally includes dividends, borrow cost, etc.)
FX: F(T) = FX_0 * Div_f(T) / Div_d(T)
Rates: F(T) = IBOR(T), the forward rate for some IBOR curve, e.g. LIBOR 3M
Commodity: F(T) = Futures(T), ie. some interpolation of the futures curve
"""
@abstractmethod
def spot(self) -> float:
""" Spot price. In some cases this is the actual spot (e.g. Equity/FX), otherwise it is F(0) """
raise NotImplementedError
@abstractmethod
def fwd_T(self, T: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""
Forward at time T in the future
:param T: float or np.ndarray, time(s) in the future
:return: float or np.ndarray, forward(s) at time(s) in the future
"""
raise NotImplementedError
def __call__(self, T: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""
Forward at time T in the future. Ability to call term structure using ()
:param T: float or np.ndarray, time(s) in the future
:return: float or np.ndarray, forward(s) at time(s) in the future
"""
return self.fwd_T(T)
def drift(self, t: float, T: float) -> float:
"""
Drift implied by the forward curve, implied over a time interval [t,T]
:param t: float, start time
:param T: float, end time
:return: float, drift implied over [t,T]
"""
return np.log(self.fwd_T(T)/self.fwd_T(t)) / (T - t)
```
#### File: pricing/fourier/suite.py
```python
import unittest
from tests.pricing.fourier.Test_Proj_European import Test_Proj_European
from tests.pricing.fourier.Test_Lewis_European import Test_Lewis_European
from tests.pricing.fourier.Test_GilPeleaz_European import Test_GilPeleaz_European
def test_suite():
suite = unittest.TestSuite()
for test in (Test_Proj_European, Test_Lewis_European, Test_GilPeleaz_European):
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(test))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(test_suite())
``` |
{
"source": "jkirkish/DojoAssignments",
"score": 4
} |
#### File: jkirkish/DojoAssignments/functions.py
```python
def odd_even():
for count in range(1,2000):
if count % 2 == 0:
print("Even:", count)
else:
print("Odd:", count)
odd_even()
'''Create a function called 'multiply' that iterates
through each value in a list (e.g. a = [2, 4, 10, 16])
and returns a list where each value has been multiplied
by 5.'''
def multiply(a,n):
a2 = []
for x in range(len(a)):
a2.insert(x,a[x]*n)
if len(a2)==4:
print a
print "A new list multiplied by 5"
print a2
a = [2, 4, 10, 16]
multiply(a,5)
``` |
{
"source": "jkisk/pulumi-aws-iam",
"score": 2
} |
#### File: python/jkisk_pulumi_aws_iam/iam_role_for_service_account.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
import pulumi_aws
__all__ = ['IamRoleForServiceAccountArgs', 'IamRoleForServiceAccount']
@pulumi.input_type
class IamRoleForServiceAccountArgs:
def __init__(__self__, *,
namespace: pulumi.Input[str],
provider_arn: pulumi.Input[str],
service_account: pulumi.Input[str],
role_path: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a IamRoleForServiceAccount resource.
:param pulumi.Input[str] namespace: The EKS namespace that will use the role for IAM authentication.
:param pulumi.Input[str] provider_arn: The OIDC provider ARN.
:param pulumi.Input[str] service_account: The EKS service account that will use the role for IAM authentication.
:param pulumi.Input[str] role_path: The IAM path where the role exists.
"""
pulumi.set(__self__, "namespace", namespace)
pulumi.set(__self__, "provider_arn", provider_arn)
pulumi.set(__self__, "service_account", service_account)
if role_path is not None:
pulumi.set(__self__, "role_path", role_path)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
The EKS namespace that will use the role for IAM authentication.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="providerArn")
def provider_arn(self) -> pulumi.Input[str]:
"""
The OIDC provider ARN.
"""
return pulumi.get(self, "provider_arn")
@provider_arn.setter
def provider_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "provider_arn", value)
@property
@pulumi.getter(name="serviceAccount")
def service_account(self) -> pulumi.Input[str]:
"""
The EKS service account that will use the role for IAM authentication.
"""
return pulumi.get(self, "service_account")
@service_account.setter
def service_account(self, value: pulumi.Input[str]):
pulumi.set(self, "service_account", value)
@property
@pulumi.getter(name="rolePath")
def role_path(self) -> Optional[pulumi.Input[str]]:
"""
The IAM path where the role exists.
"""
return pulumi.get(self, "role_path")
@role_path.setter
def role_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_path", value)
class IamRoleForServiceAccount(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
namespace: Optional[pulumi.Input[str]] = None,
provider_arn: Optional[pulumi.Input[str]] = None,
role_path: Optional[pulumi.Input[str]] = None,
service_account: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a IamRoleForServiceAccount resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] namespace: The EKS namespace that will use the role for IAM authentication.
:param pulumi.Input[str] provider_arn: The OIDC provider ARN.
:param pulumi.Input[str] role_path: The IAM path where the role exists.
:param pulumi.Input[str] service_account: The EKS service account that will use the role for IAM authentication.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IamRoleForServiceAccountArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a IamRoleForServiceAccount resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param IamRoleForServiceAccountArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IamRoleForServiceAccountArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
namespace: Optional[pulumi.Input[str]] = None,
provider_arn: Optional[pulumi.Input[str]] = None,
role_path: Optional[pulumi.Input[str]] = None,
service_account: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IamRoleForServiceAccountArgs.__new__(IamRoleForServiceAccountArgs)
if namespace is None and not opts.urn:
raise TypeError("Missing required property 'namespace'")
__props__.__dict__["namespace"] = namespace
if provider_arn is None and not opts.urn:
raise TypeError("Missing required property 'provider_arn'")
__props__.__dict__["provider_arn"] = provider_arn
__props__.__dict__["role_path"] = role_path
if service_account is None and not opts.urn:
raise TypeError("Missing required property 'service_account'")
__props__.__dict__["service_account"] = service_account
__props__.__dict__["role"] = None
super(IamRoleForServiceAccount, __self__).__init__(
'awsIam:index:IamRoleForServiceAccount',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter
def role(self) -> pulumi.Output['pulumi_aws.iam.Role']:
"""
The IAM role created
"""
return pulumi.get(self, "role")
```
#### File: python/jkisk_pulumi_aws_iam/user.py
```python
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
import pulumi_aws
__all__ = ['UserArgs', 'User']
@pulumi.input_type
class UserArgs:
def __init__(__self__, *,
create_iam_access_key: pulumi.Input[bool],
create_user_login_profile: pulumi.Input[bool],
force_destroy: Optional[pulumi.Input[bool]] = None,
password_length: Optional[pulumi.Input[float]] = None,
password_reset_required: Optional[pulumi.Input[bool]] = None,
path: Optional[pulumi.Input[str]] = None,
permissions_boundary: Optional[pulumi.Input[str]] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
ssh_key_encoding: Optional[pulumi.Input[str]] = None,
ssh_public_key: Optional[pulumi.Input[str]] = None,
upload_iam_user_ssh_key: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a User resource.
:param pulumi.Input[bool] create_iam_access_key: Boolean to determine whether to create an IAM access key
:param pulumi.Input[bool] create_user_login_profile: Boolean to determine whether to create an IAM user login profile
"""
pulumi.set(__self__, "create_iam_access_key", create_iam_access_key)
pulumi.set(__self__, "create_user_login_profile", create_user_login_profile)
if force_destroy is not None:
pulumi.set(__self__, "force_destroy", force_destroy)
if password_length is not None:
pulumi.set(__self__, "password_length", password_length)
if password_reset_required is not None:
pulumi.set(__self__, "password_reset_required", password_reset_required)
if path is not None:
pulumi.set(__self__, "path", path)
if permissions_boundary is not None:
pulumi.set(__self__, "permissions_boundary", permissions_boundary)
if pgp_key is not None:
pulumi.set(__self__, "pgp_key", pgp_key)
if ssh_key_encoding is not None:
pulumi.set(__self__, "ssh_key_encoding", ssh_key_encoding)
if ssh_public_key is not None:
pulumi.set(__self__, "ssh_public_key", ssh_public_key)
if upload_iam_user_ssh_key is not None:
pulumi.set(__self__, "upload_iam_user_ssh_key", upload_iam_user_ssh_key)
@property
@pulumi.getter(name="createIamAccessKey")
def create_iam_access_key(self) -> pulumi.Input[bool]:
"""
Boolean to determine whether to create an IAM access key
"""
return pulumi.get(self, "create_iam_access_key")
@create_iam_access_key.setter
def create_iam_access_key(self, value: pulumi.Input[bool]):
pulumi.set(self, "create_iam_access_key", value)
@property
@pulumi.getter(name="createUserLoginProfile")
def create_user_login_profile(self) -> pulumi.Input[bool]:
"""
Boolean to determine whether to create an IAM user login profile
"""
return pulumi.get(self, "create_user_login_profile")
@create_user_login_profile.setter
def create_user_login_profile(self, value: pulumi.Input[bool]):
pulumi.set(self, "create_user_login_profile", value)
@property
@pulumi.getter(name="forceDestroy")
def force_destroy(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "force_destroy")
@force_destroy.setter
def force_destroy(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_destroy", value)
@property
@pulumi.getter(name="passwordLength")
def password_length(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "password_length")
@password_length.setter
def password_length(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "password_length", value)
@property
@pulumi.getter(name="passwordResetRequired")
def password_reset_required(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "password_reset_required")
@password_reset_required.setter
def password_reset_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "password_reset_required", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter(name="permissionsBoundary")
def permissions_boundary(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "permissions_boundary")
@permissions_boundary.setter
def permissions_boundary(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "permissions_boundary", value)
@property
@pulumi.getter(name="pgpKey")
def pgp_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "pgp_key")
@pgp_key.setter
def pgp_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pgp_key", value)
@property
@pulumi.getter(name="sshKeyEncoding")
def ssh_key_encoding(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ssh_key_encoding")
@ssh_key_encoding.setter
def ssh_key_encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssh_key_encoding", value)
@property
@pulumi.getter(name="sshPublicKey")
def ssh_public_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ssh_public_key")
@ssh_public_key.setter
def ssh_public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ssh_public_key", value)
@property
@pulumi.getter(name="uploadIamUserSshKey")
def upload_iam_user_ssh_key(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "upload_iam_user_ssh_key")
@upload_iam_user_ssh_key.setter
def upload_iam_user_ssh_key(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "upload_iam_user_ssh_key", value)
class User(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create_iam_access_key: Optional[pulumi.Input[bool]] = None,
create_user_login_profile: Optional[pulumi.Input[bool]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
password_length: Optional[pulumi.Input[float]] = None,
password_reset_required: Optional[pulumi.Input[bool]] = None,
path: Optional[pulumi.Input[str]] = None,
permissions_boundary: Optional[pulumi.Input[str]] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
ssh_key_encoding: Optional[pulumi.Input[str]] = None,
ssh_public_key: Optional[pulumi.Input[str]] = None,
upload_iam_user_ssh_key: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Create a User resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] create_iam_access_key: Boolean to determine whether to create an IAM access key
:param pulumi.Input[bool] create_user_login_profile: Boolean to determine whether to create an IAM user login profile
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: UserArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a User resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param UserArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(UserArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create_iam_access_key: Optional[pulumi.Input[bool]] = None,
create_user_login_profile: Optional[pulumi.Input[bool]] = None,
force_destroy: Optional[pulumi.Input[bool]] = None,
password_length: Optional[pulumi.Input[float]] = None,
password_reset_required: Optional[pulumi.Input[bool]] = None,
path: Optional[pulumi.Input[str]] = None,
permissions_boundary: Optional[pulumi.Input[str]] = None,
pgp_key: Optional[pulumi.Input[str]] = None,
ssh_key_encoding: Optional[pulumi.Input[str]] = None,
ssh_public_key: Optional[pulumi.Input[str]] = None,
upload_iam_user_ssh_key: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = UserArgs.__new__(UserArgs)
if create_iam_access_key is None and not opts.urn:
raise TypeError("Missing required property 'create_iam_access_key'")
__props__.__dict__["create_iam_access_key"] = create_iam_access_key
if create_user_login_profile is None and not opts.urn:
raise TypeError("Missing required property 'create_user_login_profile'")
__props__.__dict__["create_user_login_profile"] = create_user_login_profile
__props__.__dict__["force_destroy"] = force_destroy
__props__.__dict__["password_length"] = password_length
__props__.__dict__["password_reset_required"] = password_reset_required
__props__.__dict__["path"] = path
__props__.__dict__["permissions_boundary"] = permissions_boundary
__props__.__dict__["pgp_key"] = pgp_key
__props__.__dict__["ssh_key_encoding"] = ssh_key_encoding
__props__.__dict__["ssh_public_key"] = ssh_public_key
__props__.__dict__["upload_iam_user_ssh_key"] = upload_iam_user_ssh_key
__props__.__dict__["access_key"] = None
__props__.__dict__["iam_user"] = None
__props__.__dict__["ssh_key"] = None
__props__.__dict__["user_login_profile"] = None
super(User, __self__).__init__(
'awsIam:index:User',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter(name="accessKey")
def access_key(self) -> pulumi.Output[Optional['pulumi_aws.iam.AccessKey']]:
"""
The access key associated with the IAM user
"""
return pulumi.get(self, "access_key")
@property
@pulumi.getter(name="iamUser")
def iam_user(self) -> pulumi.Output['pulumi_aws.iam.User']:
"""
The IAM user
"""
return pulumi.get(self, "iam_user")
@property
@pulumi.getter(name="sshKey")
def ssh_key(self) -> pulumi.Output[Optional['pulumi_aws.iam.SshKey']]:
"""
The SSH key associated with the IAM user
"""
return pulumi.get(self, "ssh_key")
@property
@pulumi.getter(name="userLoginProfile")
def user_login_profile(self) -> pulumi.Output[Optional['pulumi_aws.iam.UserLoginProfile']]:
"""
The user login profile associated with the IAM user
"""
return pulumi.get(self, "user_login_profile")
``` |
{
"source": "jkissinger/abode_alexa",
"score": 3
} |
#### File: jkissinger/abode_alexa/door_state.py
```python
import logging
from datetime import datetime
from enum import Enum
import options
DOOR_STATE = {}
class State(Enum):
CLOSED = 0
OPEN = 1
UNKNOWN = 2
class StateTime:
def __init__(self, state, timestamp):
self.state = state
self.timestamp = timestamp
self.last_warning_timestamp = timestamp
def update_door_state(door_name, string_state):
state = State.UNKNOWN
if string_state == 'Opened':
state = State.OPEN
elif string_state == 'Closed':
state = State.CLOSED
if door_name in DOOR_STATE:
logging.info("updating '" + door_name + "' from " + str(DOOR_STATE[door_name].state) + " to " + str(state))
else:
logging.info("created '" + door_name + "' as " + str(state))
DOOR_STATE[door_name] = StateTime(state, datetime.now())
def validate_door_states():
# return a list of doors that have been open too long
open_doors = []
for name, door in DOOR_STATE.items():
if door.state == State.OPEN:
difference = (datetime.now() - door.timestamp)
seconds_door_was_open = difference.total_seconds()
if seconds_door_was_open >= options.INITIAL_WARNING_THRESHOLD:
difference = (datetime.now() - door.last_warning_timestamp)
seconds_since_last_warning = difference.total_seconds()
if seconds_since_last_warning >= options.NEXT_WARNING_THRESHOLD:
open_doors.append(name)
door.last_warning_timestamp = datetime.now()
if open_doors:
logging.info("Found these doors to be open too long: " + str(open_doors))
return open_doors
```
#### File: jkissinger/abode_alexa/gmail_checker.py
```python
from __future__ import print_function
import logging
import os.path
import time
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/gmail.modify']
def fetch_emails(service):
# Call the Gmail API
results = service.users().messages().list(maxResults=50, userId='me', q='from:<EMAIL> is:unread').execute()
message_ids = results.get('messages', [])
notifications = []
if not message_ids:
logging.debug('No messages found.')
else:
for message_id in message_ids:
message = service.users().messages().get(userId='me', id=message_id['id'], format='full').execute()
payload = message['payload']
headers = payload['headers']
for d in headers:
if d['name'] == 'Subject':
# To have the oldest at the front of the list, insert at 0 index
notifications.insert(0, d['value'])
service.users().messages().trash(userId='me', id=message_id['id']).execute()
return notifications
def check_notifications():
"""Shows basic usage of the Gmail API.
Lists the user's Gmail labels.
"""
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
service = build('gmail', 'v1', cache_discovery=False, credentials=creds)
return fetch_emails(service)
``` |
{
"source": "jkitchin/autograd",
"score": 2
} |
#### File: autograd/tests/test_wrappers.py
```python
from __future__ import absolute_import
from builtins import range
import warnings
from functools import partial
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.test_util import check_grads, check_equivalent # , nd
from autograd.tracer import primitive, isbox
from autograd import (grad, elementwise_grad, jacobian, value_and_grad,
hessian_tensor_product, hessian, make_hvp,
tensor_jacobian_product, checkpoint, make_jvp, make_ggnvp)
npr.seed(1)
def test_return_both():
fun = lambda x : 3.0 * x**3.2
d_fun = grad(fun)
f_and_d_fun = value_and_grad(fun)
test_x = 1.7
f, d = f_and_d_fun(test_x)
assert f == fun(test_x)
assert d == d_fun(test_x)
def test_value_and_grad():
fun = lambda x: np.sum(np.sin(x)**2)
dfun = grad(fun)
dfun_both = value_and_grad(fun)
x = npr.randn(5)
assert not isbox(dfun_both(x)[0])
check_equivalent(fun(x), dfun_both(x)[0])
check_equivalent(dfun(x), dfun_both(x)[1])
def fun2(x): return dfun_both(x)[0]
check_grads(fun2)(x)
def test_hessian():
# Check Hessian of a quadratic function.
D = 5
H = npr.randn(D, D)
def fun(x):
return np.dot(np.dot(x, H),x)
hess = hessian(fun)
x = npr.randn(D)
check_equivalent(hess(x), H + H.T)
def test_multigrad():
def complicated_fun(a,b,c,d,e,f=1.1, g=9.0):
return a + np.sin(b) + np.cosh(c) + np.cos(d) + np.tan(e) + f + g
def complicated_fun_3_1(d_b):
d, b = d_b
return complicated_fun(A, b, C, d, E, f=F, g=G)
A = 0.5
B = -0.3
C = 0.2
D = -1.1
E = 0.7
F = 0.6
G = -0.1
wrapped = grad(complicated_fun, argnum=[3, 1])(A, B, C, D, E, f=F, g=G)
explicit = grad(complicated_fun_3_1)((D, B))
check_equivalent(wrapped, explicit)
def test_value_and_multigrad():
def complicated_fun(a,b,c,d,e,f=1.1, g=9.0):
return a + np.sin(b) + np.cosh(c) + np.cos(d) + np.tan(e) + f + g
A = 0.5
B = -0.3
C = 0.2
D = -1.1
E = 0.7
F = 0.6
G = -0.1
dfun = grad(complicated_fun, argnum=[3, 1])
dfun_both = value_and_grad(complicated_fun, argnum=[3, 1])
check_equivalent(complicated_fun(A, B, C, D, E, f=F, g=G),
dfun_both(A, B, C, D, E, f=F, g=G)[0])
check_equivalent(dfun(A, B, C, D, E, f=F, g=G),
dfun_both(A, B, C, D, E, f=F, g=G)[1])
def test_multigrad_onearg():
fun = lambda x, y: np.sum(x + np.sin(y))
packed_fun = lambda xy: np.sum(xy[0] + np.sin(xy[1]))
A, B = npr.randn(3), npr.randn(3)
check_equivalent(grad(fun, argnum=[0])(A,B), (grad(packed_fun)((A,B))[0],))
def test_elementwise_grad():
def simple_fun(a):
return a + np.sin(a) + np.cosh(a)
A = npr.randn(10)
wrapped = elementwise_grad(simple_fun)(A)
explicit = np.array([grad(simple_fun)(A[i]) for i in range(len(A))])
check_equivalent(wrapped, explicit)
def test_elementwise_grad_multiple_args():
def simple_fun(a, b):
return a + np.sin(a) + np.cosh(b)
A = 0.9
B = npr.randn(10)
argnum = 1
wrapped = elementwise_grad(simple_fun, argnum)(A, B)
explicit = np.array([grad(simple_fun, argnum)(A, B[i]) for i in range(len(B))])
check_equivalent(wrapped, explicit)
def test_hessian_tensor_product():
fun = lambda a: np.sum(np.sin(a))
a = npr.randn(5)
v = npr.randn(5)
H = hessian(fun)(a)
check_equivalent(np.dot(H, v), hessian_tensor_product(fun)(a, v))
def test_hvp():
fun = lambda a: np.sum(np.sin(a))
a = npr.randn(5)
v = npr.randn(5)
H = hessian(fun)(a)
hvp = make_hvp(fun)(a)[0]
check_equivalent(np.dot(H, v), hvp(v))
def test_hessian_matrix_product():
fun = lambda a: np.sum(np.sin(a))
a = npr.randn(5, 4)
V = npr.randn(5, 4)
H = hessian(fun)(a)
check_equivalent(np.tensordot(H, V), hessian_tensor_product(fun)(a, V))
def test_hessian_tensor_product():
fun = lambda a: np.sum(np.sin(a))
a = npr.randn(5, 4, 3)
V = npr.randn(5, 4, 3)
H = hessian(fun)(a)
check_equivalent(np.tensordot(H, V, axes=np.ndim(V)), hessian_tensor_product(fun)(a, V))
def test_tensor_jacobian_product():
# This function will have an asymmetric jacobian matrix.
fun = lambda a: np.roll(np.sin(a), 1)
a = npr.randn(5)
V = npr.randn(5)
J = jacobian(fun)(a)
check_equivalent(np.dot(V.T, J), tensor_jacobian_product(fun)(a, V))
def test_matrix_jacobian_product():
fun = lambda a: np.roll(np.sin(a), 1)
a = npr.randn(5, 4)
V = npr.randn(5, 4)
J = jacobian(fun)(a)
check_equivalent(np.tensordot(V, J), tensor_jacobian_product(fun)(a, V))
def test_tensor_jacobian_product():
fun = lambda a: np.roll(np.sin(a), 1)
a = npr.randn(5, 4, 3)
V = npr.randn(5, 4)
J = jacobian(fun)(a)
check_equivalent(np.tensordot(V, J, axes=np.ndim(V)), tensor_jacobian_product(fun)(a, V))
def test_deprecated_defgrad_wrapper():
from autograd.core import primitive
@primitive
def new_mul(x, y):
return x * y
with warnings.catch_warnings(record=True) as w:
new_mul.defgrad(lambda ans, x, y : lambda g : y * g)
new_mul.defgrad(lambda ans, x, y : lambda g : x * g, argnum=1)
def fun(x, y):
return new_mul(x, y)
mat1 = npr.randn(2, 2)
mat2 = npr.randn(2, 2)
check_grads(fun, modes=['rev'])(mat1, mat2)
def test_deprecated_defvjp_wrapper():
from autograd.core import primitive
@primitive
def new_mul(x, y):
return x * y
with warnings.catch_warnings(record=True) as w:
new_mul.defvjp(lambda g, ans, vs, gvs, x, y : y * g)
new_mul.defvjp(lambda g, ans, vs, gvs, x, y : x * g, argnum=1)
def fun(x, y):
return new_mul(x, y)
mat1 = npr.randn(2, 2)
mat2 = npr.randn(2, 2)
check_grads(fun, modes=['rev'])(mat1, mat2)
def test_deprecated_defvjp_is_zero_wrapper():
from autograd.core import primitive
@primitive
def new_mul(x, y):
return 0 * x * y
with warnings.catch_warnings(record=True) as w:
new_mul.defvjp_is_zero([0, 1])
def fun(x, y):
return new_mul(x, y)
mat1 = npr.randn(2, 2)
mat2 = npr.randn(2, 2)
with warnings.catch_warnings(record=True) as w:
check_grads(fun, modes=['rev'])(mat1, mat2)
def test_deprecated_quick_grad_check_wrapper():
from autograd.util import quick_grad_check
with warnings.catch_warnings(record=True) as w:
quick_grad_check(lambda x, y: x**2 + y, 1., (2.,))
def test_partial():
def f(x, y):
return x
grad(partial(f, y=1))
def test_dtypes():
def f(x):
return np.sum(x**2)
# Array y with dtype np.float32
y = np.random.randn(10, 10).astype(np.float32)
assert grad(f)(y).dtype.type is np.float32
y = np.random.randn(10, 10).astype(np.float16)
assert grad(f)(y).dtype.type is np.float16
def test_checkpoint_correctness():
bar = lambda x, y: 2*x + y + 5
checkpointed_bar = checkpoint(bar)
foo = lambda x: bar(x, x/3.) + bar(x, x**2)
foo2 = lambda x: checkpointed_bar(x, x/3.) + checkpointed_bar(x, x**2)
assert np.allclose(foo(3.), foo2(3.))
assert np.allclose(grad(foo)(3.), grad(foo2)(3.))
baz = lambda *args: sum(args)
checkpointed_baz = checkpoint(baz)
foobaz = lambda x: baz(x, x/3.)
foobaz2 = lambda x: checkpointed_baz(x, x/3.)
assert np.allclose(foobaz(3.), foobaz2(3.))
assert np.allclose(grad(foobaz)(3.), grad(foobaz2)(3.))
def checkpoint_memory():
'''This test is meant to be run manually, since it depends on
memory_profiler and its behavior may vary.'''
try:
from memory_profiler import memory_usage
except ImportError:
return
def f(a):
for _ in range(10):
a = np.sin(a**2 + 1)
return a
checkpointed_f = checkpoint(f)
def testfun(f, x):
for _ in range(5):
x = f(x)
return np.sum(x)
gradfun = grad(testfun, 1)
A = npr.RandomState(0).randn(100000)
max_usage = max(memory_usage((gradfun, (f, A))))
max_checkpointed_usage = max(memory_usage((gradfun, (checkpointed_f, A))))
assert max_checkpointed_usage < max_usage / 2.
def test_make_jvp():
A = npr.randn(3, 5)
x = npr.randn(5)
v = npr.randn(5)
fun = lambda x: np.tanh(np.dot(A, x))
jvp_explicit = lambda x: lambda v: np.dot(jacobian(fun)(x), v)
jvp = make_jvp(fun)
check_equivalent(jvp_explicit(x)(v), jvp(x)(v)[1])
def _make_explicit_ggnvp(f, g=lambda x: 1./2*np.dot(x, x)):
def ggnvp_maker(x):
J = jacobian(f)(x)
H = hessian(g)(f(x))
def ggnvp(v):
return np.dot(J.T, np.dot(H, np.dot(J, v)))
return ggnvp
return ggnvp_maker
def test_make_ggnvp():
A = npr.randn(5, 4)
x = npr.randn(4)
v = npr.randn(4)
fun = lambda x: np.dot(A, x)
check_equivalent(make_ggnvp(fun)(x)(v), _make_explicit_ggnvp(fun)(x)(v))
fun2 = lambda x: np.tanh(np.dot(A, x))
check_equivalent(make_ggnvp(fun2)(x)(v), _make_explicit_ggnvp(fun2)(x)(v))
def test_make_ggnvp_nondefault_g():
A = npr.randn(5, 4)
x = npr.randn(4)
v = npr.randn(4)
g = lambda y: np.sum(2.*y**2 + y**4)
fun = lambda x: np.dot(A, x)
check_equivalent(make_ggnvp(fun, g)(x)(v), _make_explicit_ggnvp(fun, g)(x)(v))
fun2 = lambda x: np.tanh(np.dot(A, x))
check_equivalent(make_ggnvp(fun2, g)(x)(v), _make_explicit_ggnvp(fun2, g)(x)(v))
## No longer support this behavior
# def test_make_ggnvp_broadcasting():
# A = npr.randn(4, 5)
# x = npr.randn(10, 4)
# v = npr.randn(10, 4)
# fun = lambda x: np.tanh(np.dot(x, A))
# res1 = np.stack([_make_explicit_ggnvp(fun)(xi)(vi) for xi, vi in zip(x, v)])
# res2 = make_ggnvp(fun)(x)(v)
# check_equivalent(res1, res2)
def test_wrapped_name_and_docs():
def foo(x): pass
assert grad.__name__ == 'grad'
assert grad.__doc__.startswith("\n Returns a function which")
assert grad(foo, 1).__name__ == 'grad_of_foo_wrt_argnum_1'
assert grad(foo, 1).__doc__.startswith(" grad of function foo with")
``` |
{
"source": "JKitok/girder",
"score": 3
} |
#### File: girder/cli/shell.py
```python
import click
import girder
import sys
from girder.utility.server import configureServer
def _launchShell(context):
"""
Launches a Python shell with the given context.
:param context: A dictionary containing key value pairs
of variable name -> value to be set in the newly
launched shell.
"""
header = 'Girder %s' % girder.__version__
header += '\nThe current context provides the variables webroot and appconf for use.'
try:
from IPython import embed
return embed(header=header, user_ns=context)
except ImportError:
import code
return code.interact(banner=header, local=context)
@click.command('shell', short_help='Run a Girder shell.', help='Run an interactive Girder shell '
'or a script in the Girder environment.')
@click.option('--plugins', default=None, help='Comma separated list of plugins to import.')
@click.argument('script', type=click.Path(exists=True, dir_okay=False), required=False)
@click.argument('args', nargs=-1, required=False)
def main(plugins, script, args):
if plugins is not None:
plugins = plugins.split(',')
webroot, appconf = configureServer(plugins=plugins)
if script is None:
_launchShell({
'webroot': webroot,
'appconf': appconf
})
else:
globals_ = {k: v for k, v in globals().items() if k not in {'__file__', '__name__'}}
sys.argv = [script] + list(args)
exec(open(script, 'rb').read(), dict(
webroot=webroot, appconf=appconf, __name__='__main__',
__file__=script, **globals_))
```
#### File: authorized_upload/girder_authorized_upload/rest.py
```python
from girder.api import access
from girder.api.describe import describeRoute, Description
from girder.api.rest import loadmodel, Resource
from girder.constants import AccessType, TokenScope
from girder.exceptions import ValidationException
from girder.models.setting import Setting
from girder.models.token import Token
from girder.settings import SettingKey
from girder.utility import mail_utils
from .constants import TOKEN_SCOPE_AUTHORIZED_UPLOAD
class AuthorizedUpload(Resource):
def __init__(self):
super().__init__()
self.resourceName = 'authorized_upload'
self.route('POST', (), self.createAuthorizedUpload)
@access.user(scope=TokenScope.DATA_WRITE)
@loadmodel(map={'folderId': 'folder'}, model='folder', level=AccessType.WRITE)
@describeRoute(
Description('Create an authorized upload URL.')
.param('folderId', 'Destination folder ID for the upload.')
.param('duration', 'How many days the token should last.', required=False, dataType='int')
)
def createAuthorizedUpload(self, folder, params):
try:
if params.get('duration'):
days = int(params.get('duration'))
else:
days = Setting().get(SettingKey.COOKIE_LIFETIME)
except ValueError:
raise ValidationException('Token duration must be an integer, or leave it empty.')
token = Token().createToken(days=days, user=self.getCurrentUser(), scope=(
TOKEN_SCOPE_AUTHORIZED_UPLOAD, 'authorized_upload_folder_%s' % folder['_id']))
url = '%s#authorized_upload/%s/%s' % (
mail_utils.getEmailUrlPrefix(), folder['_id'], token['_id'])
return {'url': url}
```
#### File: gravatar/girder_gravatar/settings.py
```python
from girder.utility import setting_utilities
class PluginSettings:
DEFAULT_IMAGE = 'gravatar.default_image'
@setting_utilities.default(PluginSettings.DEFAULT_IMAGE)
def _defaultDefaultImage():
return 'identicon'
@setting_utilities.validator(PluginSettings.DEFAULT_IMAGE)
def _validateDefaultImage(doc):
# TODO should we update user collection to remove gravatar_baseUrl vals?
pass
```
#### File: hashsum_download/girder_hashsum_download/settings.py
```python
from girder.exceptions import ValidationException
from girder.utility import setting_utilities
class PluginSettings:
AUTO_COMPUTE = 'hashsum_download.auto_compute'
@setting_utilities.default(PluginSettings.AUTO_COMPUTE)
def _defaultAutoCompute():
return False
@setting_utilities.validator(PluginSettings.AUTO_COMPUTE)
def _validateAutoCompute(doc):
if not isinstance(doc['value'], bool):
raise ValidationException('Auto-compute hash setting must be true or false.')
```
#### File: girder_jobs/models/job.py
```python
import datetime
from bson import json_util
from girder import events
from girder.constants import AccessType, SortDir
from girder.exceptions import ValidationException
from girder.models.model_base import AccessControlledModel
from girder.models.notification import Notification
from girder.models.token import Token
from girder.models.user import User
from ..constants import JobStatus, JOB_HANDLER_LOCAL
class Job(AccessControlledModel):
def initialize(self):
self.name = 'job'
compoundSearchIndex = (
('userId', SortDir.ASCENDING),
('created', SortDir.DESCENDING),
('type', SortDir.ASCENDING),
('status', SortDir.ASCENDING)
)
self.ensureIndices([(compoundSearchIndex, {}),
'created', 'parentId', 'celeryTaskId'])
self.exposeFields(level=AccessType.READ, fields={
'title', 'type', 'created', 'interval', 'when', 'status',
'progress', 'log', 'meta', '_id', 'public', 'parentId', 'asynchronous',
'updated', 'timestamps', 'handler', 'jobInfoSpec'})
self.exposeFields(level=AccessType.SITE_ADMIN, fields={'args', 'kwargs'})
def validate(self, job):
self._validateStatus(job['status'])
return job
def _validateStatus(self, status):
if not JobStatus.isValid(status):
raise ValidationException(
'Invalid job status %s.' % status, field='status')
def _validateChild(self, parentJob, childJob):
if str(parentJob['_id']) == str(childJob['_id']):
raise ValidationException('Child Id cannot be equal to Parent Id')
if childJob['parentId']:
raise ValidationException('Cannot overwrite the Parent Id')
def list(self, user=None, types=None, statuses=None,
limit=0, offset=0, sort=None, currentUser=None, parentJob=None):
"""
List a page of jobs for a given user.
:param user: The user who owns the job.
:type user: dict, 'all', 'none', or None.
:param types: job type filter.
:type types: array of type string, or None.
:param statuses: job status filter.
:type statuses: array of status integer, or None.
:param limit: The page limit.
:param limit: The page limit.
:param offset: The page offset.
:param sort: The sort field.
:param parentJob: Parent Job.
:param currentUser: User for access filtering.
"""
return self.findWithPermissions(
offset=offset, limit=limit, sort=sort, user=currentUser,
types=types, statuses=statuses, jobUser=user, parentJob=parentJob)
def findWithPermissions(self, query=None, offset=0, limit=0, timeout=None, fields=None,
sort=None, user=None, level=AccessType.READ,
types=None, statuses=None, jobUser=None, parentJob=None, **kwargs):
"""
Search the list of jobs.
:param query: The search query (see general MongoDB docs for "find()")
:type query: dict
:param offset: The offset into the results
:type offset: int
:param limit: Maximum number of documents to return
:type limit: int
:param timeout: Cursor timeout in ms. Default is no timeout.
:type timeout: int
:param fields: A mask for filtering result documents by key, or None to return the full
document, passed to MongoDB find() as the `projection` param.
:type fields: `str, list of strings or tuple of strings for fields to be included from the
document, or dict for an inclusion or exclusion projection`.
:param sort: The sort order.
:type sort: List of (key, order) tuples.
:param user: The user to check policies against.
:type user: dict or None
:param level: The access level. Explicitly passing None skips doing
permissions checks.
:type level: AccessType
:param types: job type filter.
:type types: array of type string, or None.
:param statuses: job status filter.
:type statuses: array of status integer, or None.
:param jobUser: The user who owns the job.
:type jobUser: dict, 'all', 'none', or None.
:param parentJob: Parent Job.
:returns: A pymongo Cursor or CommandCursor. If a CommandCursor, it
has been augmented with a count function.
"""
if query is None:
query = {}
# When user is 'all', no filtering by user, list jobs of all users.
if jobUser == 'all':
pass
# When user is 'none' or None, list anonymous user jobs.
elif jobUser == 'none' or jobUser is None:
query['userId'] = None
# Otherwise, filter by user id
else:
query['userId'] = jobUser['_id']
if types is not None:
query['type'] = {'$in': types}
if statuses is not None:
query['status'] = {'$in': statuses}
if parentJob:
query['parentId'] = parentJob['_id']
return super().findWithPermissions(
query, offset=offset, limit=limit, timeout=timeout, fields=fields,
sort=sort, user=user, level=level, **kwargs)
def cancelJob(self, job):
"""
Revoke/cancel a job. This simply triggers the jobs.cancel event and
sets the job status to CANCELED. If one of the event handlers
calls preventDefault() on the event, this job will *not* be put into
the CANCELED state.
:param job: The job to cancel.
"""
event = events.trigger('jobs.cancel', info=job)
if not event.defaultPrevented:
job = self.updateJob(job, status=JobStatus.CANCELED)
return job
def createLocalJob(self, module, function=None, **kwargs):
"""
Takes the same keyword arguments as :py:func:`createJob`, except this
sets the handler to the local handler and takes additional parameters
to specify the module and function that should be run.
:param module: The name of the python module to run.
:type module: str
:param function: Function name within the module to run. If not passed,
the default name of "run" will be used.
:type function: str or None
:returns: The job that was created.
"""
kwargs['handler'] = JOB_HANDLER_LOCAL
kwargs['save'] = False
job = self.createJob(**kwargs)
job['module'] = module
if function is not None:
job['function'] = function
return self.save(job)
def createJob(self, title, type, args=(), kwargs=None, user=None, when=None,
interval=0, public=False, handler=None, asynchronous=False,
save=True, parentJob=None, otherFields=None):
"""
Create a new job record.
:param title: The title of the job.
:type title: str
:param type: The type of the job.
:type type: str
:param args: Positional args of the job payload.
:type args: list or tuple
:param kwargs: Keyword arguments of the job payload.
:type kwargs: dict
:param user: The user creating the job.
:type user: dict or None
:param when: Minimum start time for the job (UTC).
:type when: datetime
:param interval: If this job should be recurring, set this to a value
in seconds representing how often it should occur. Set to <= 0 for
jobs that should only be run once.
:type interval: int
:param public: Public read access flag.
:type public: bool
:param handler: If this job should be handled by a specific handler,
use this field to store that information.
:param externalToken: If an external token was created for updating this
job, pass it in and it will have the job-specific scope set.
:type externalToken: token (dict) or None.
:param asynchronous: Whether the job is to be run asynchronously. For now this
only applies to jobs that are scheduled to run locally.
:type asynchronous: bool
:param save: Whether the documented should be saved to the database.
:type save: bool
:param parentJob: The job which will be set as a parent
:type parentJob: Job
:param otherFields: Any additional fields to set on the job.
:type otherFields: dict
"""
now = datetime.datetime.utcnow()
if when is None:
when = now
if kwargs is None:
kwargs = {}
otherFields = otherFields or {}
parentId = None
if parentJob:
parentId = parentJob['_id']
job = {
'title': title,
'type': type,
'args': args,
'kwargs': kwargs,
'created': now,
'updated': now,
'when': when,
'interval': interval,
'status': JobStatus.INACTIVE,
'progress': None,
'log': [],
'meta': {},
'handler': handler,
'asynchronous': asynchronous,
'timestamps': [],
'parentId': parentId
}
job.update(otherFields)
self.setPublic(job, public=public)
if user:
job['userId'] = user['_id']
self.setUserAccess(job, user=user, level=AccessType.ADMIN)
else:
job['userId'] = None
if save:
job = self.save(job)
if user:
deserialized_kwargs = job['kwargs']
job['kwargs'] = json_util.dumps(job['kwargs'])
Notification().createNotification(
type='job_created', data=job, user=user,
expires=datetime.datetime.utcnow() + datetime.timedelta(seconds=30))
job['kwargs'] = deserialized_kwargs
return job
def save(self, job, *args, **kwargs):
"""
We extend save so that we can serialize the kwargs before sending them
to the database. This will allow kwargs with $ and . characters in the
keys.
"""
job['kwargs'] = json_util.dumps(job['kwargs'])
job = super().save(job, *args, **kwargs)
job['kwargs'] = json_util.loads(job['kwargs'])
return job
def find(self, *args, **kwargs):
"""
Overrides the default find behavior to exclude the log by default.
:param includeLog: Whether to include the log field in the documents.
:type includeLog: bool
"""
kwargs['fields'] = self._computeFields(kwargs)
return super().find(*args, **kwargs)
def load(self, *args, **kwargs):
"""
We extend load to deserialize the kwargs back into a dict since we
serialized them on the way into the database.
:param includeLog: Whether to include the log field in the document.
:type includeLog: bool
"""
kwargs['fields'] = self._computeFields(kwargs)
job = super().load(*args, **kwargs)
if job and isinstance(job.get('kwargs'), str):
job['kwargs'] = json_util.loads(job['kwargs'])
if job and isinstance(job.get('log'), str):
# Legacy support: log used to be just a string, but we want to
# consistently return a list of strings now.
job['log'] = [job['log']]
return job
def scheduleJob(self, job):
"""
Trigger the event to schedule this job. Other plugins are in charge of
actually scheduling and/or executing the job, except in the case when
the handler is 'local'.
"""
if job.get('asynchronous', job.get('async')) is True:
events.daemon.trigger('jobs.schedule', info=job)
else:
events.trigger('jobs.schedule', info=job)
def createJobToken(self, job, days=7):
"""
Create a token that can be used just for the management of an individual
job, e.g. updating job info, progress, logs, status.
"""
return Token().createToken(days=days, scope='jobs.job_' + str(job['_id']))
def updateJob(self, job, log=None, overwrite=False, status=None,
progressTotal=None, progressCurrent=None, notify=True,
progressMessage=None, otherFields=None):
"""
Update an existing job. Any of the updateable fields that are set to None in the kwargs of
this method will not be modified. If you set progress information on the job for the first
time and set notify=True, a new notification record for the job progress will be created.
If notify=True, job status changes will also create a notification with type="job_status",
and log changes will create a notification with type="job_log".
:param job: The job document to update.
:param log: Message to append to the job log. If you wish to overwrite
instead of append, pass overwrite=True.
:type log: str
:param overwrite: Whether to overwrite the log (default is append).
:type overwrite: bool
:param status: New status for the job.
:type status: JobStatus
:param progressTotal: Max progress value for this job.
:param otherFields: Any additional fields to set on the job.
:type otherFields: dict
"""
event = events.trigger('jobs.job.update', {
'job': job,
'params': {
'log': log,
'overwrite': overwrite,
'status': status,
'progressTotal': progressTotal,
'progressMessage': progressMessage,
'otherFields': otherFields
}
})
if event.defaultPrevented:
return job
now = datetime.datetime.utcnow()
user = None
otherFields = otherFields or {}
if job['userId']:
user = User().load(job['userId'], force=True)
query = {
'_id': job['_id']
}
updates = {
'$push': {},
'$set': {}
}
statusChanged = False
if log is not None:
self._updateLog(job, log, overwrite, now, notify, user, updates)
if status is not None:
try:
status = int(status)
except ValueError:
# Allow non int states
pass
statusChanged = status != job['status']
self._updateStatus(job, status, now, query, updates)
if progressMessage is not None or progressCurrent is not None or progressTotal is not None:
self._updateProgress(
job, progressTotal, progressCurrent, progressMessage, notify, user, updates)
for k, v in otherFields.items():
job[k] = v
updates['$set'][k] = v
if updates['$set'] or updates['$push']:
if not updates['$push']:
del updates['$push']
job['updated'] = now
updates['$set']['updated'] = now
updateResult = self.update(query, update=updates, multi=False)
# If our query didn't match anything then our state transition
# was not valid. So raise an exception
if updateResult.matched_count != 1:
job = self.load(job['_id'], force=True)
msg = "Invalid state transition to '%s', Current state is '%s'." % (
status, job['status'])
raise ValidationException(msg, field='status')
events.trigger('jobs.job.update.after', {
'job': job
})
# We don't want todo this until we know the update was successful
if statusChanged and user is not None and notify:
self._createUpdateStatusNotification(now, user, job)
return job
def _updateLog(self, job, log, overwrite, now, notify, user, updates):
"""Helper for updating a job's log."""
if overwrite:
updates['$set']['log'] = [log]
elif log:
updates['$push']['log'] = log
if notify and user:
expires = now + datetime.timedelta(seconds=30)
Notification().createNotification(
type='job_log', data={
'_id': job['_id'],
'overwrite': overwrite,
'text': log
}, user=user, expires=expires)
def _createUpdateStatusNotification(self, now, user, job):
expires = now + datetime.timedelta(seconds=30)
filtered = self.filter(job, user)
filtered.pop('kwargs', None)
filtered.pop('log', None)
Notification().createNotification(
type='job_status', data=filtered, user=user, expires=expires)
def _updateStatus(self, job, status, now, query, updates):
"""Helper for updating job progress information."""
self._validateStatus(status)
if status != job['status']:
job['status'] = status
previous_states = JobStatus.validTransitions(job, status)
if previous_states is None:
# Get the current state
job = self.load(job['_id'], force=True)
msg = "No valid state transition to '%s'. Current state is '%s'." % (
status, job['status'])
raise ValidationException(msg, field='status')
query['status'] = {
'$in': previous_states
}
updates['$set']['status'] = status
ts = {
'status': status,
'time': now
}
job['timestamps'].append(ts)
updates['$push']['timestamps'] = ts
def _updateProgress(self, job, total, current, message, notify, user, updates):
"""Helper for updating job progress information."""
state = JobStatus.toNotificationStatus(job['status'])
if current is not None:
current = float(current)
if total is not None:
total = float(total)
if job['progress'] is None:
if notify and job['userId']:
notification = self._createProgressNotification(
job, total, current, state, message)
notificationId = notification['_id']
else:
notificationId = None
job['progress'] = {
'message': message,
'total': total,
'current': current,
'notificationId': notificationId
}
updates['$set']['progress'] = job['progress']
else:
if total is not None:
job['progress']['total'] = total
updates['$set']['progress.total'] = total
if current is not None:
job['progress']['current'] = current
updates['$set']['progress.current'] = current
if message is not None:
job['progress']['message'] = message
updates['$set']['progress.message'] = message
if notify and user:
if job['progress']['notificationId'] is None:
notification = self._createProgressNotification(
job, total, current, state, message, user)
nid = notification['_id']
job['progress']['notificationId'] = nid
updates['$set']['progress.notificationId'] = nid
else:
notification = Notification().load(job['progress']['notificationId'])
Notification().updateProgress(
notification, state=state,
message=job['progress']['message'],
current=job['progress']['current'],
total=job['progress']['total'])
def _createProgressNotification(self, job, total, current, state, message,
user=None):
if not user:
user = User().load(job['userId'], force=True)
# TODO support channel-based notifications for jobs. For
# right now we'll just go through the user.
return Notification().initProgress(
user, job['title'], total, state=state, current=current,
message=message, estimateTime=False, resource=job, resourceName=self.name)
def filter(self, doc, user=None, additionalKeys=None):
"""
Overrides the parent ``filter`` method to also deserialize the ``kwargs``
field if it is still in serialized form. This is handled in ``load``, but
required here also for fetching lists of jobs.
"""
doc = super().filter(doc, user, additionalKeys=additionalKeys)
if 'kwargs' in doc and isinstance(doc['kwargs'], str):
doc['kwargs'] = json_util.loads(doc['kwargs'])
return doc
def _computeFields(self, kwargs, includeLogDefault=False):
"""
Helper to compute the projection operator for default log exclusion.
"""
fields = kwargs.get('fields')
if fields is None and not kwargs.pop('includeLog', includeLogDefault):
fields = {'log': False}
return fields
def getAllTypesAndStatuses(self, user):
"""
Get a list of types and statuses of all jobs or jobs owned by a particular user.
:param user: The user who owns the jobs.
:type user: dict, or 'all'.
"""
query = {}
if user == 'all':
pass
else:
query['userId'] = user['_id']
types = self.collection.distinct('type', query)
statuses = self.collection.distinct('status', query)
return {'types': types, 'statuses': statuses}
def setParentJob(self, job, parentJob):
"""
Sets a parent job for a job
:param job: Job document which the parent will be set on
:type job: Job
:param parentJob: Parent job
:type parentId: Job
"""
self._validateChild(parentJob, job)
return self.updateJob(job, otherFields={'parentId': parentJob['_id']})
def listChildJobs(self, job):
"""
Lists the child jobs for a given job
:param job: Job document
:type job: Job
"""
query = {'parentId': job['_id']}
cursor = self.find(query)
user = User().load(job['userId'], force=True)
for r in self.filterResultsByPermission(cursor=cursor, user=user, level=AccessType.READ):
yield r
```
#### File: girder_oauth/providers/globus.py
```python
import urllib.parse
from girder.api.rest import getApiUrl
from girder.exceptions import RestException
from girder.models.setting import Setting
from .base import ProviderBase
from ..settings import PluginSettings
class Globus(ProviderBase):
_AUTH_URL = 'https://auth.globus.org/v2/oauth2/authorize'
_AUTH_SCOPES = ['urn:globus:auth:scope:auth.globus.org:view_identities',
'openid', 'profile', 'email']
_TOKEN_URL = 'https://auth.globus.org/v2/oauth2/token'
_API_USER_URL = 'https://auth.globus.org/v2/oauth2/userinfo'
def getClientIdSetting(self):
return Setting().get(PluginSettings.GLOBUS_CLIENT_ID)
def getClientSecretSetting(self):
return Setting().get(PluginSettings.GLOBUS_CLIENT_SECRET)
@classmethod
def getUrl(cls, state):
clientId = Setting().get(PluginSettings.GLOBUS_CLIENT_ID)
if not clientId:
raise Exception('No Globus client ID setting is present.')
callbackUrl = '/'.join((getApiUrl(), 'oauth', 'globus', 'callback'))
query = urllib.parse.urlencode({
'response_type': 'code',
'access_type': 'online',
'client_id': clientId,
'redirect_uri': callbackUrl,
'state': state,
'scope': ' '.join(cls._AUTH_SCOPES)
})
return '%s?%s' % (cls._AUTH_URL, query)
def getToken(self, code):
params = {
'grant_type': 'authorization_code',
'code': code,
'client_id': self.clientId,
'client_secret': self.clientSecret,
'redirect_uri': self.redirectUri
}
resp = self._getJson(method='POST', url=self._TOKEN_URL,
data=params,
headers={'Accept': 'application/json'})
if 'error' in resp:
raise RestException(
'Got an error exchanging token from provider: "%s".' % resp,
code=502)
return resp
def getUser(self, token):
headers = {
'Authorization': 'Bearer {}'.format(token['access_token'])
}
resp = self._getJson(method='GET', url=self._API_USER_URL,
headers=headers)
oauthId = resp.get('sub')
if not oauthId:
raise RestException(
'Globus identity did not return a valid ID.', code=502)
email = resp.get('email')
if not email:
raise RestException(
'Globus identity did not return a valid email.', code=502)
name = resp['name'].split()
firstName = name[0]
lastName = name[-1]
return self._createOrReuseUser(oauthId, email, firstName, lastName)
```
#### File: sentry/girder_sentry/rest.py
```python
from girder.api import access
from girder.api.describe import Description, describeRoute
from girder.api.rest import Resource
from girder.models.setting import Setting
from .settings import PluginSettings
class Sentry(Resource):
def __init__(self):
super().__init__()
self.resourceName = 'sentry'
self.route('GET', ('dsn',), self._getDsn)
@access.public
@describeRoute(
Description('Public URL for getting the Sentry DSN.')
)
def _getDsn(self, params):
dsn = Setting().get(PluginSettings.FRONTEND_DSN)
return {'sentry_dsn': dsn}
```
#### File: girder/tests/base.py
```python
import base64
import cherrypy
import io
import json
import logging
import os
import shutil
import signal
import sys
import unittest
import urllib.parse
import warnings
from girder.utility._cache import cache, requestCache
from girder.utility.server import setup as setupServer
from girder.constants import AccessType, ROOT_DIR, ServerMode
from girder.models import getDbConnection
from girder.models.model_base import _modelSingletons
from girder.models.assetstore import Assetstore
from girder.models.file import File
from girder.models.setting import Setting
from girder.models.token import Token
from girder.settings import SettingKey
from . import mock_smtp
from . import mock_s3
from . import mongo_replicaset
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'setup_database.*')
from . import setup_database
local = cherrypy.lib.httputil.Host('127.0.0.1', 30000)
remote = cherrypy.lib.httputil.Host('127.0.0.1', 30001)
mockSmtp = mock_smtp.MockSmtpReceiver()
mockS3Server = None
enabledPlugins = []
usedDBs = {}
def startServer(mock=True, mockS3=False):
"""
Test cases that communicate with the server should call this
function in their setUpModule() function.
"""
# If the server starts, a database will exist and we can remove it later
dbName = cherrypy.config['database']['uri'].split('/')[-1]
usedDBs[dbName] = True
# By default, this passes "[]" to "plugins", disabling any installed plugins
server = setupServer(mode=ServerMode.TESTING, plugins=enabledPlugins)
if mock:
cherrypy.server.unsubscribe()
cherrypy.engine.start()
# Make server quiet (won't announce start/stop or requests)
cherrypy.config.update({'environment': 'embedded'})
# Log all requests if we asked to do so
if 'cherrypy' in os.environ.get('EXTRADEBUG', '').split():
cherrypy.config.update({'log.screen': True})
logHandler = logging.StreamHandler(sys.stdout)
logHandler.setLevel(logging.DEBUG)
cherrypy.log.error_log.addHandler(logHandler)
# Tell CherryPy to throw exceptions in request handling code
cherrypy.config.update({'request.throw_errors': True})
mockSmtp.start()
if mockS3:
global mockS3Server
mockS3Server = mock_s3.startMockS3Server()
return server
def stopServer():
"""
Test cases that communicate with the server should call this
function in their tearDownModule() function.
"""
cherrypy.engine.exit()
mockSmtp.stop()
dropAllTestDatabases()
def dropAllTestDatabases():
"""
Unless otherwise requested, drop all test databases.
"""
if 'keepdb' not in os.environ.get('EXTRADEBUG', '').split():
db_connection = getDbConnection()
for dbName in usedDBs:
db_connection.drop_database(dbName)
usedDBs.clear()
def dropTestDatabase(dropModels=True):
"""
Call this to clear all contents from the test database. Also forces models
to reload.
"""
db_connection = getDbConnection()
dbName = cherrypy.config['database']['uri'].split('/')[-1]
if 'girder_test_' not in dbName:
raise Exception('Expected a testing database name, but got %s' % dbName)
if dbName in db_connection.list_database_names():
if dbName not in usedDBs and 'newdb' in os.environ.get('EXTRADEBUG', '').split():
raise Exception('Warning: database %s already exists' % dbName)
db_connection.drop_database(dbName)
usedDBs[dbName] = True
if dropModels:
for model in _modelSingletons:
model.reconnect()
def dropGridFSDatabase(dbName):
"""
Clear all contents from a gridFS database used as an assetstore.
:param dbName: the name of the database to drop.
"""
db_connection = getDbConnection()
if dbName in db_connection.list_database_names():
if dbName not in usedDBs and 'newdb' in os.environ.get('EXTRADEBUG', '').split():
raise Exception('Warning: database %s already exists' % dbName)
db_connection.drop_database(dbName)
usedDBs[dbName] = True
def dropFsAssetstore(path):
"""
Delete all of the files in a filesystem assetstore. This unlinks the path,
which is potentially dangerous.
:param path: the path to remove.
"""
if os.path.isdir(path):
shutil.rmtree(path)
class TestCase(unittest.TestCase):
"""
Test case base class for the application. Adds helpful utilities for
database and HTTP communication.
"""
def setUp(self, assetstoreType=None, dropModels=True):
"""
We want to start with a clean database each time, so we drop the test
database before each test. We then add an assetstore so the file model
can be used without 500 errors.
:param assetstoreType: if 'gridfs' or 's3', use that assetstore.
'gridfsrs' uses a GridFS assetstore with a replicaset. For any other value, use
a filesystem assetstore.
"""
self.assetstoreType = assetstoreType
dropTestDatabase(dropModels=dropModels)
assetstoreName = os.environ.get('GIRDER_TEST_ASSETSTORE', 'test')
assetstorePath = os.path.join(
ROOT_DIR, 'tests', 'assetstore', assetstoreName)
if assetstoreType == 'gridfs':
# Name this as '_auto' to prevent conflict with assetstores created
# within test methods
gridfsDbName = 'girder_test_%s_assetstore_auto' % assetstoreName.replace('.', '_')
dropGridFSDatabase(gridfsDbName)
self.assetstore = Assetstore().createGridFsAssetstore(name='Test', db=gridfsDbName)
elif assetstoreType == 'gridfsrs':
gridfsDbName = 'girder_test_%s_rs_assetstore_auto' % assetstoreName
self.replicaSetConfig = mongo_replicaset.makeConfig()
mongo_replicaset.startMongoReplicaSet(self.replicaSetConfig)
self.assetstore = Assetstore().createGridFsAssetstore(
name='Test', db=gridfsDbName,
mongohost='mongodb://127.0.0.1:27070,127.0.0.1:27071,'
'127.0.0.1:27072', replicaset='replicaset')
elif assetstoreType == 's3':
self.assetstore = Assetstore().createS3Assetstore(
name='Test', bucket='bucketname', accessKeyId='test',
secret='test', service=mockS3Server.service)
else:
dropFsAssetstore(assetstorePath)
self.assetstore = Assetstore().createFilesystemAssetstore(
name='Test', root=assetstorePath)
host, port = mockSmtp.address or ('localhost', 25)
Setting().set(SettingKey.SMTP_HOST, host)
Setting().set(SettingKey.SMTP_PORT, port)
Setting().set(SettingKey.UPLOAD_MINIMUM_CHUNK_SIZE, 0)
if os.environ.get('GIRDER_TEST_DATABASE_CONFIG'):
setup_database.main(os.environ['GIRDER_TEST_DATABASE_CONFIG'])
def tearDown(self):
"""
Stop any services that we started just for this test.
"""
# If "self.setUp" is overridden, "self.assetstoreType" may not be set
if getattr(self, 'assetstoreType', None) == 'gridfsrs':
mongo_replicaset.stopMongoReplicaSet(self.replicaSetConfig)
# Invalidate cache regions which persist across tests
cache.invalidate()
requestCache.invalidate()
def assertStatusOk(self, response):
"""
Call this to assert that the response yielded a 200 OK output_status.
:param response: The response object.
"""
self.assertStatus(response, 200)
def assertStatus(self, response, code):
"""
Call this to assert that a given HTTP status code was returned.
:param response: The response object.
:param code: The status code.
:type code: int or str
"""
code = str(code)
if not response.output_status.startswith(code.encode()):
msg = 'Response status was %s, not %s.' % (response.output_status,
code)
if hasattr(response, 'json'):
msg += ' Response body was:\n%s' % json.dumps(
response.json, sort_keys=True, indent=4,
separators=(',', ': '))
else:
msg += 'Response body was:\n%s' % self.getBody(response)
self.fail(msg)
def assertDictContains(self, expected, actual, msg=''):
"""
Assert that an object is a subset of another.
This test will fail under the following conditions:
1. ``actual`` is not a dictionary.
2. ``expected`` contains a key not in ``actual``.
3. for any key in ``expected``, ``expected[key] != actual[key]``
:param test: The expected key/value pairs
:param actual: The actual object
:param msg: An optional message to include with test failures
"""
self.assertIsInstance(actual, dict, msg + ' does not exist')
for k, v in expected.items():
if k not in actual:
self.fail('%s expected key "%s"' % (msg, k))
self.assertEqual(v, actual[k])
def assertHasKeys(self, obj, keys):
"""
Assert that the given object has the given list of keys.
:param obj: The dictionary object.
:param keys: The keys it must contain.
:type keys: list or tuple
"""
for k in keys:
self.assertTrue(k in obj, 'Object does not contain key "%s"' % k)
def assertRedirect(self, resp, url=None):
"""
Assert that we were given an HTTP redirect response, and optionally
assert that you were redirected to a specific URL.
:param resp: The response object.
:param url: If you know the URL you expect to be redirected to, you
should pass it here.
:type url: str
"""
self.assertStatus(resp, 303)
self.assertTrue('Location' in resp.headers)
if url:
self.assertEqual(url, resp.headers['Location'])
def assertNotHasKeys(self, obj, keys):
"""
Assert that the given object does not have any of the given list of
keys.
:param obj: The dictionary object.
:param keys: The keys it must not contain.
:type keys: list or tuple
"""
for k in keys:
self.assertFalse(k in obj, 'Object contains key "%s"' % k)
def assertValidationError(self, response, field=None):
"""
Assert that a ValidationException was thrown with the given field.
:param response: The response object.
:param field: The field that threw the validation exception.
:type field: str
"""
self.assertStatus(response, 400)
self.assertEqual(response.json['type'], 'validation')
self.assertEqual(response.json.get('field', None), field)
def assertAccessDenied(self, response, level, modelName, user=None):
if level == AccessType.READ:
ls = 'Read'
elif level == AccessType.WRITE:
ls = 'Write'
else:
ls = 'Admin'
if user is None:
self.assertStatus(response, 401)
else:
self.assertStatus(response, 403)
self.assertEqual('%s access denied for %s.' % (ls, modelName),
response.json['message'])
def assertMissingParameter(self, response, param):
"""
Assert that the response was a "parameter missing" error response.
:param response: The response object.
:param param: The name of the missing parameter.
:type param: str
"""
self.assertEqual('Parameter "%s" is required.' % param, response.json.get('message', ''))
self.assertStatus(response, 400)
def getSseMessages(self, resp):
messages = self.getBody(resp).strip().split('\n\n')
if not messages or messages == ['']:
return ()
return [json.loads(m.replace('data: ', '')) for m in messages]
def uploadFile(self, name, contents, user, parent, parentType='folder',
mimeType=None):
"""
Upload a file. This is meant for small testing files, not very large
files that should be sent in multiple chunks.
:param name: The name of the file.
:type name: str
:param contents: The file contents
:type contents: str
:param user: The user performing the upload.
:type user: dict
:param parent: The parent document.
:type parent: dict
:param parentType: The type of the parent ("folder" or "item")
:type parentType: str
:param mimeType: Explicit MIME type to set on the file.
:type mimeType: str
:returns: The file that was created.
:rtype: dict
"""
mimeType = mimeType or 'application/octet-stream'
resp = self.request(
path='/file', method='POST', user=user, params={
'parentType': parentType,
'parentId': str(parent['_id']),
'name': name,
'size': len(contents),
'mimeType': mimeType
})
self.assertStatusOk(resp)
resp = self.request(
path='/file/chunk', method='POST', user=user, body=contents, params={
'uploadId': resp.json['_id']
}, type=mimeType)
self.assertStatusOk(resp)
file = resp.json
self.assertHasKeys(file, ['itemId'])
self.assertEqual(file['name'], name)
self.assertEqual(file['size'], len(contents))
self.assertEqual(file['mimeType'], mimeType)
return File().load(file['_id'], force=True)
def ensureRequiredParams(self, path='/', method='GET', required=(), user=None):
"""
Ensure that a set of parameters is required by the endpoint.
:param path: The endpoint path to test.
:param method: The HTTP method of the endpoint.
:param required: The required parameter set.
:type required: sequence of str
"""
for exclude in required:
params = dict.fromkeys([p for p in required if p != exclude], '')
resp = self.request(path=path, method=method, params=params, user=user)
self.assertMissingParameter(resp, exclude)
def _genToken(self, user):
"""
Helper method for creating an authentication token for the user.
"""
token = Token().createToken(user)
return str(token['_id'])
def _buildHeaders(self, headers, cookie, user, token, basicAuth,
authHeader):
if cookie is not None:
headers.append(('Cookie', cookie))
if user is not None:
headers.append(('Girder-Token', self._genToken(user)))
elif token is not None:
if isinstance(token, dict):
headers.append(('Girder-Token', token['_id']))
else:
headers.append(('Girder-Token', token))
if basicAuth is not None:
auth = base64.b64encode(basicAuth.encode('utf8'))
headers.append((authHeader, 'Basic %s' % auth.decode()))
def request(self, path='/', method='GET', params=None, user=None,
prefix='/api/v1', isJson=True, basicAuth=None, body=None,
type=None, exception=False, cookie=None, token=None,
additionalHeaders=None, useHttps=False,
authHeader='Authorization', appPrefix=''):
"""
Make an HTTP request.
:param path: The path part of the URI.
:type path: str
:param method: The HTTP method.
:type method: str
:param params: The HTTP parameters.
:type params: dict
:param prefix: The prefix to use before the path.
:param isJson: Whether the response is a JSON object.
:param basicAuth: A string to pass with the Authorization: Basic header
of the form 'login:password'
:param exception: Set this to True if a 500 is expected from this call.
:param cookie: A custom cookie value to set.
:param token: If you want to use an existing token to login, pass
the token ID.
:type token: str
:param additionalHeaders: A list of headers to add to the
request. Each item is a tuple of the form
(header-name, header-value).
:param useHttps: If True, pretend to use HTTPS.
:param authHeader: The HTTP request header to use for authentication.
:type authHeader: str
:param appPrefix: The CherryPy application prefix (mounted location without trailing slash)
:type appPrefix: str
:returns: The cherrypy response object from the request.
"""
headers = [('Host', '127.0.0.1'), ('Accept', 'application/json')]
qs = fd = None
if additionalHeaders:
headers.extend(additionalHeaders)
if isinstance(body, str):
body = body.encode('utf8')
if params:
qs = urllib.parse.urlencode(params)
if params and body:
# In this case, we are forced to send params in query string
fd = io.BytesIO(body)
headers.append(('Content-Type', type))
headers.append(('Content-Length', '%d' % len(body)))
elif method in ['POST', 'PUT', 'PATCH'] or body:
if type:
qs = body
elif params:
qs = qs.encode('utf8')
headers.append(('Content-Type', type or 'application/x-www-form-urlencoded'))
headers.append(('Content-Length', '%d' % len(qs or b'')))
fd = io.BytesIO(qs or b'')
qs = None
app = cherrypy.tree.apps[appPrefix]
request, response = app.get_serving(
local, remote, 'http' if not useHttps else 'https', 'HTTP/1.1')
request.show_tracebacks = True
self._buildHeaders(headers, cookie, user, token, basicAuth, authHeader)
url = prefix + path
try:
response = request.run(method, url, qs, 'HTTP/1.1', headers, fd)
finally:
if fd:
fd.close()
if isJson:
body = self.getBody(response)
try:
response.json = json.loads(body)
except ValueError:
raise AssertionError('Received non-JSON response: ' + body)
if not exception and response.output_status.startswith(b'500'):
raise AssertionError('Internal server error: %s' % self.getBody(response))
return response
def getBody(self, response, text=True):
"""
Returns the response body as a text type or binary string.
:param response: The response object from the server.
:param text: If true, treat the data as a text string, otherwise, treat
as binary.
"""
data = '' if text else b''
for chunk in response.body:
if text and isinstance(chunk, bytes):
chunk = chunk.decode('utf8')
elif not text and not isinstance(chunk, bytes):
chunk = chunk.encode('utf8')
data += chunk
return data
def _sigintHandler(*args):
print('Received SIGINT, shutting down mock SMTP server...')
mockSmtp.stop()
sys.exit(1)
signal.signal(signal.SIGINT, _sigintHandler)
# If we insist on test databases not existing when we start, make sure we
# check right away.
if 'newdb' in os.environ.get('EXTRADEBUG', '').split():
dropTestDatabase(False)
```
#### File: girder/test/test_access.py
```python
import pytest
from girder.api.rest import loadmodel, Resource
from girder.api import access
from girder.constants import AccessType, TokenScope
from girder.models.user import User
from girder.models.token import Token
from girder.settings import SettingKey
from pytest_girder.assertions import assertStatus, assertStatusOk
CUSTOM_SCOPE = 'Some.Exclusive.Scope'
# We deliberately don't have an access decorator
def defaultFunctionHandler(**kwargs):
return
@access.admin
def adminFunctionHandler(**kwargs):
return
@access.user
def userFunctionHandler(**kwargs):
return
@access.public
def publicFunctionHandler(**kwargs):
return
@access.token(scope=CUSTOM_SCOPE, required=True)
def requireScope(**kwargs):
return
@access.public
@loadmodel(map={'id': 'user'}, model='user', level=AccessType.READ)
def plainFn(user, params):
return user
@access.public
@loadmodel(map={'userId': 'user'}, model='user', level=AccessType.READ)
def loadModelWithMap(user, params):
return user
class AccessTestResource(Resource):
def __init__(self):
super().__init__()
self.resourceName = 'accesstest'
self.route('GET', ('default_access', ), self.defaultHandler)
self.route('GET', ('admin_access', ), self.adminHandler)
self.route('GET', ('user_access', ), self.userHandler)
self.route('GET', ('public_access', ), self.publicHandler)
self.route('GET', ('cookie_auth', ), self.cookieHandler)
self.route('POST', ('cookie_auth', ), self.cookieHandler)
self.route('GET', ('scoped_cookie_auth', ), self.cookieScopedHandler)
self.route('GET', ('fn_admin', ), self.fnAdmin)
self.route('GET', ('scoped_user', ), self.scopedUser)
self.route('GET', ('fn_public', ), self.fnPublic)
self.route('GET', ('scoped_public', ), self.scopedPublic)
# We deliberately don't have an access decorator
def defaultHandler(self, **kwargs):
return
@access.admin
def adminHandler(self, **kwargs):
return
@access.user
def userHandler(self, **kwargs):
return
@access.public
def publicHandler(self, **kwargs):
return self.getCurrentUser()
@access.user(cookie=True)
def cookieHandler(self, **kwargs):
return
@access.user(scope=TokenScope.DATA_READ, cookie=True)
def cookieScopedHandler(self, **kwargs):
return
@access.admin()
def fnAdmin(self, **kwargs):
return
@access.user(scope=TokenScope.DATA_READ)
def scopedUser(self, **kwargs):
return
@access.public()
def fnPublic(self, **kwargs):
return self.getCurrentUser()
@access.public(scope=TokenScope.SETTINGS_READ)
def scopedPublic(self, **kwargs):
return self.getCurrentUser()
@pytest.fixture
def server(server):
server.root.api.v1.accesstest = AccessTestResource()
# Public access endpoints do not need to be a Resource subclass method,
# they can be a regular function
accesstest = server.root.api.v1.accesstest
accesstest.route('GET', ('default_function_access', ),
defaultFunctionHandler)
accesstest.route('GET', ('admin_function_access', ), adminFunctionHandler)
accesstest.route('GET', ('user_function_access', ), userFunctionHandler)
accesstest.route('GET', ('public_function_access', ),
publicFunctionHandler)
accesstest.route('GET', ('test_loadmodel_plain', ':id'), plainFn)
accesstest.route('GET', ('test_loadmodel_query',), loadModelWithMap)
accesstest.route('GET', ('test_required_scope_exists', ), requireScope)
yield server
@pytest.fixture
def cookie(user):
yield 'girderToken=%s' % str(Token().createToken(user)['_id'])
@pytest.fixture
def adminSettingToken(db, admin):
yield Token().createToken(user=admin, scope=TokenScope.SETTINGS_READ)
@pytest.fixture
def adminEmailToken(db, admin):
yield Token().createToken(user=admin, scope=TokenScope.DATA_READ)
@pytest.fixture
def userDataReadToken(db, user):
yield Token().createToken(user=user, scope=TokenScope.DATA_READ)
@pytest.fixture
def userSettingToken(db, user):
yield Token().createToken(user=user, scope=TokenScope.SETTINGS_READ)
@pytest.fixture
def userToken(db, user):
yield Token().createToken(user=user)
public_endpoints = ['/accesstest/public_function_access', '/accesstest/public_access',
'/accesstest/fn_public', '/accesstest/scoped_public']
user_endpoints = ['/accesstest/user_access', '/accesstest/scoped_user',
'/accesstest/user_function_access']
admin_endpoints = ['/accesstest/default_access',
'/accesstest/admin_access',
'/accesstest/fn_admin',
'/accesstest/default_function_access',
'/accesstest/admin_function_access']
@pytest.mark.parametrize('endpoint', public_endpoints)
def testPublicCanAccessPublicEndpoints(server, endpoint):
resp = server.request(path=endpoint, method='GET')
assertStatusOk(resp)
assert resp.json is None
@pytest.mark.parametrize('endpoint', user_endpoints + admin_endpoints)
def testPublicCannotAccessNonPublicEndpoints(server, endpoint):
resp = server.request(path=endpoint, method='GET')
assertStatus(resp, 401)
@pytest.mark.parametrize('endpoint', public_endpoints + user_endpoints)
def testUserCanAccessUserEndpoints(server, user, endpoint):
resp = server.request(path=endpoint, method='GET', user=user)
assertStatusOk(resp)
@pytest.mark.parametrize('endpoint', admin_endpoints)
def testUserCannotAccessAdminEndpoints(server, user, endpoint):
resp = server.request(path=endpoint, method='GET', user=user)
assertStatus(resp, 403)
@pytest.mark.parametrize('endpoint', public_endpoints + user_endpoints + admin_endpoints)
def testAdminCanAccessAllEndpoints(server, admin, endpoint):
resp = server.request(path=endpoint, method='GET', user=admin)
assertStatusOk(resp)
@pytest.mark.parametrize('method', ['GET', 'POST'])
def testCookieAuthFailsWithNoAuth(server, method):
resp = server.request(path='/accesstest/cookie_auth', method=method)
assertStatus(resp, 401)
@pytest.mark.parametrize('method', ['GET', 'POST'])
def testTokenAuthSucceedsOnCookieAuthEndpoints(server, user, method):
resp = server.request(path='/accesstest/cookie_auth', method=method, user=user)
assertStatusOk(resp)
@pytest.mark.parametrize('method', ['GET', 'POST'])
def testCookieAuthWorks(server, user, cookie, method):
resp = server.request(path='/accesstest/cookie_auth', method=method, cookie=cookie)
assertStatusOk(resp)
def testCookieScopedPrefersToken(server, user):
resp = server.request(
path='/accesstest/cookie_auth', user=user,
cookie='girderToken=<PASSWORD>')
assertStatusOk(resp)
def testLoadModelDecorator(server, user):
resp = server.request(
path='/accesstest/test_loadmodel_plain/%s' % user['_id'], method='GET')
assertStatusOk(resp)
assert resp.json['_id'] == str(user['_id'])
resp = server.request(path='/accesstest/test_loadmodel_query', params={'userId': None})
assertStatus(resp, 400)
assert resp.json['message'] == 'Invalid ObjectId: None'
def testGetFullAccessList(db, admin):
acl = User().getFullAccessList(admin)
assert len(acl['users']) == 1
def testReadingSettingsAsAdmin(server, admin):
# Reading settings as admin should work
resp = server.request(path='/system/setting', params={
'key': SettingKey.SMTP_PORT}, user=admin)
assertStatusOk(resp)
assert resp.json == 25
def testReadingSettingsAsUserShouldFail(server, user):
# Reading setting as non-admin should fail
resp = server.request(path='/system/setting', params={
'key': SettingKey.SMTP_PORT}, user=user)
assertStatus(resp, 403)
def testReadingSettingsWithAdminScopedToken(server, adminSettingToken):
# Reading settings with a properly scoped token should work
resp = server.request(path='/system/setting', params={
'key': SettingKey.SMTP_PORT}, token=adminSettingToken)
assertStatusOk(resp)
assert resp.json == 25
def testReadingSettingsWithAdminEmailToken(server, adminEmailToken):
# Reading settings with an improperly scoped token should fail
resp = server.request(path='/system/setting', params={
'key': SettingKey.SMTP_PORT}, token=adminEmailToken)
assertStatus(resp, 401)
def testReadingSettingsWithUserToken(server, userSettingToken):
# Non-admin user with this token scope should still not work
resp = server.request(path='/system/setting', params={
'key': SettingKey.SMTP_PORT}, token=userSettingToken)
assertStatus(resp, 403)
assert resp.json['message'] == 'Administrator access required.'
def testReadingAssetstoreWithSettingScopedToken(server, adminSettingToken):
# The setting-scope token should not grant access to other endpoints
resp = server.request(path='/assetstore', token=adminSettingToken)
assertStatus(resp, 401)
@pytest.mark.parametrize('endpoint', ['/accesstest/admin_access',
'/accesstest/fn_admin'])
def testAdminRawDecoratorIsEquivalentToReturnedDecorator(server, adminSettingToken, endpoint):
resp = server.request(path=endpoint, token=adminSettingToken)
assertStatus(resp, 401)
def testUserAccessToken(server, userDataReadToken):
resp = server.request(path='/accesstest/user_access', token=userDataReadToken)
assertStatus(resp, 401)
def testUserAccessTokenOnScopedEndpoint(server, userDataReadToken):
resp = server.request(path='/accesstest/scoped_user', token=userDataReadToken)
assertStatusOk(resp)
def testArtificialScopedAccess(server, admin, user, userDataReadToken, userToken):
# Test public access
for route in ('public_access', 'fn_public', 'scoped_public'):
path = '/accesstest/%s' % route
for t in (userDataReadToken, None):
resp = server.request(path=path, token=t)
assertStatusOk(resp)
assert resp.json is None
resp = server.request(path=path, token=userToken)
assertStatusOk(resp)
assert resp.json['_id'] == str(user['_id'])
# Make a correctly scoped token, should work.
token = Token().createToken(
user=user, scope=TokenScope.SETTINGS_READ)
resp = server.request(path=path, token=token)
assertStatusOk(resp)
assert resp.json['_id'] == str(user['_id'])
def testRequiredScopeExists(server, user):
token = Token().createToken(scope=CUSTOM_SCOPE)
resp = server.request(path='/accesstest/test_required_scope_exists')
# If not given a user or a valid auth token the status should be 401
assertStatus(resp, 401)
resp2 = server.request(path='/accesstest/test_required_scope_exists',
user=user)
# If the token does not have the CUSTOM_SCOPE the status should be 403
assertStatus(resp2, 403)
# If user is not given but the token has the correct scope
# the status should be 200
resp3 = server.request(path='/accesstest/test_required_scope_exists',
token=token)
assertStatus(resp3, 200)
```
#### File: girder/test/test_api_prefix.py
```python
import pytest
from pytest_girder.assertions import assertStatusOk
from girder.api import access
from girder.api.describe import Description, describeRoute
from girder.api.rest import Resource, Prefix
from girder.plugin import GirderPlugin
class Resourceful(Resource):
def __init__(self):
super().__init__()
self.route('GET', (), self.getResource, resource=self)
@access.public
@describeRoute(
Description('Get something.')
)
def getResource(self, params):
return ['custom REST route']
class APIPrefix(GirderPlugin):
def load(self, info):
info['apiRoot'].prefix = Prefix()
info['apiRoot'].prefix.resourceful = Resourceful()
info['apiRoot'].prefix.sibling = Resourceful()
@pytest.mark.plugin('has_api_prefix', APIPrefix)
@pytest.mark.parametrize('route', [
'/prefix/resourceful',
'/prefix/sibling'
])
def testCustomWebRoot(route, server):
"""
Tests the ability of plugins to serve their own custom server roots.
"""
resp = server.request(route)
assertStatusOk(resp)
assert resp.json == ['custom REST route']
``` |
{
"source": "jkitsao/flask-personal-blog",
"score": 3
} |
#### File: flask-personal-blog/app/models.py
```python
from . import db,login_manager
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from datetime import datetime
# ...
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255))
email = db.Column(db.String(255),unique = True,index = True)
bio = db.Column(db.String(100000))
profile_pic_path = db.Column(db.String(255))
pass_secure = db.Column(db.String(1000000))
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.pass_secure = generate_password_hash(password)
def verify_password(self,password):
return check_password_hash(self.pass_secure,password)
def __repr__(self):
return f'User {self.username}'
class Blog(db.Model):
__tablename__ = 'blog'
id = db.Column(db.Integer,primary_key = True)
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
category= db.Column(db.String(),index = True)
content= db.Column(db.String())
date_posted = db.Column(db.DateTime, default=datetime.utcnow)
comments = db.relationship('Comment', backref = 'blog1', lazy = 'dynamic')
def __repr__(self):
return f'blog1 {self.content}'
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer,primary_key = True)
blog_id = db.Column(db.Integer,db.ForeignKey ('blog.id'))
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
content= db.Column(db.String(1000000))
def __repr__(self):
return f'Comment :content {self.content}'
class Subscriber(UserMixin, db.Model):
__tablename__="subscribers"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
title = db.Column(db.String(255))
email = db.Column(db.String(255),unique = True,index = True)
def save_subscriber(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_subscribers(cls,id):
return Subscriber.query.all()
def __repr__(self):
return f'User {self.email}'
``` |
{
"source": "jkitzes/macroeco",
"score": 2
} |
#### File: macroeco/empirical/test_empirical.py
```python
from __future__ import division
import os
from configparser import ConfigParser
import unittest
from numpy.testing import (TestCase, assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_raises)
from pandas.util.testing import (assert_frame_equal)
import macroeco.empirical as emp
import macroeco.empirical._empirical as _emp
import numpy as np
import pandas as pd
import scipy.stats as stats
# Check whether shapely is installed
try:
import shapely.geometry as geo
shapely_missing = False
except:
shapely_missing = True
class Patches(TestCase):
def setUp(self):
local_path = os.path.dirname(os.path.abspath(__file__))
self.meta1_path = os.path.join(local_path, 'test_meta1.txt')
self.meta2_path = os.path.join(local_path, 'test_meta2.txt')
self.table1_path = os.path.join(local_path, 'test_table1.csv')
self.table1 = pd.DataFrame.from_csv(self.table1_path, index_col=False)
self.meta1 = ConfigParser()
self.meta1.read(self.meta1_path)
self.pat1 = emp.Patch(self.meta1_path) # No subset
self.pat2 = emp.Patch(self.meta2_path) # No subset
self.cols1 = 'spp_col:spp; count_col:count; x_col:x; y_col:y'
self.cols2 = 'spp_col:spp; count_col:count; x_col:mean; y_col:y'
self.A1 = 0.2 * 0.3
class TestPatch(Patches):
def test_load_data_meta(self):
assert_array_equal(self.pat1.table, self.table1)
assert_equal(self.pat1.meta, self.meta1)
def test_subset_numeric(self):
pat1 = emp.Patch(self.meta1_path, 'x>=0.2')
assert_array_equal(pat1.table, self.table1[self.table1.x >= 0.2])
self.meta1['x']['min'] = '0.2'
assert_equal(pat1.meta, self.meta1)
def test_subset_categorical(self):
pat1 = emp.Patch(self.meta1_path, "spp=='b'")
assert_array_equal(pat1.table, self.table1[self.table1['spp']=='b'])
assert_equal(pat1.meta, self.meta1) # Meta should not change
def test_multiple_subset(self):
# Only first element in table remains
pat1 = emp.Patch(self.meta1_path, "spp=='a' ; y < 0.2")
assert_array_equal(pat1.table.iloc[0], self.table1.iloc[0])
assert_equal(len(pat1.table), 1)
self.meta1['y']['max'] = '0.1'
assert_equal(pat1.meta, self.meta1)
def test_subset_count(self):
# Subsetting on count should work
pat1 = emp.Patch(self.meta1_path, subset="count > 2")
assert_equal(pat1.table['count'].iloc[0], 3)
assert_equal(len(pat1.table), 1)
class TestSAD(Patches):
def test_simple(self):
# Falling back on spp_col in metadata, so count 1 for each row
sad = emp.sad(self.pat1, None, None)
assert_array_equal(sad[0][1]['y'], [3,2])
def test_simple_with_cols(self):
# Specify count and spp_col here
sad = emp.sad(self.pat1, self.cols1, None)
assert_array_equal(sad[0][1]['y'], [4,4])
def test_two_way_split(self):
# Complete split generates 6 results
sad = emp.sad(self.pat1, self.cols1, 'x:2; y:3')
assert_equal(len(sad), 6)
# Goes through x then y
assert_equal(sad[0][1]['spp'].values, 'a')
assert_equal(sad[0][1]['y'].values, 2)
assert_equal(sad[1][1]['y'].values, [1,1])
assert_equal(sad[5][1]['spp'].values, 'b')
assert_equal(sad[0][1]['y'].values, 2)
def test_one_way_uneven_split(self):
# 0.2 should fall in second division of y
sad = emp.sad(self.pat1, self.cols1, 'y:2')
assert_equal(len(sad), 2)
assert_equal(sad[0][1]['spp'].values, ['a'])
assert_equal(sad[0][1]['y'].values, [2])
assert_equal(sad[1][1]['spp'].values, ['a','b'])
assert_equal(sad[1][1]['y'].values, [2,4])
def test_split_categorical(self):
sad = emp.sad(self.pat1, self.cols1, 'year:split; x:2')
assert_equal(sad[0][1]['y'].values, 3)
assert_equal(sad[1][1]['y'].values, [])
assert_equal(sad[2][1]['y'].values, [1,1])
assert_equal(sad[3][1]['y'].values, [3])
def test_clean(self):
# No a in second split on x
sad = emp.sad(self.pat1, self.cols1, 'x:2', clean=False)
assert_equal(len(sad[1][1]), 2) # Both spp when clean False
sad = emp.sad(self.pat1, self.cols1, 'x:2', clean=True)
assert_equal(len(sad[1][1]), 1) # Only 'b' when clean True
def test_split_panda_default_column_names(self):
# Columns can be named as key words in pandas
sad = emp.sad(self.pat2, self.cols2, splits="mean:2", clean=False)
assert_equal(len(sad[1][1]), 2)
sad = emp.sad(self.pat2, self.cols2, splits="mean:2; y:3", clean=True)
assert_equal(len(sad[1][1]), 2)
class TestSSAD(Patches):
def test_no_splits(self):
# Just total abundance by species
ssad = emp.ssad(self.pat1, self.cols1, None)
assert_array_equal(ssad[0][1]['y'], [4])
assert_array_equal(ssad[1][1]['y'], [4])
def test_with_split(self):
ssad = emp.ssad(self.pat1, self.cols1, 'x:2')
assert_array_equal(ssad[0][1]['y'], [4,0]) # spp a
assert_array_equal(ssad[1][1]['y'], [1,3]) # spp b
class TestSAR(Patches):
def test_no_splits(self):
sar = emp.sar(self.pat1, self.cols1, None, '1,1; 2,1; 2,3')
assert_array_almost_equal(sar[0][1]['x'],
[1*self.A1, 0.5*self.A1, 1/6*self.A1])
assert_array_equal(sar[0][1]['y'], [2, 1.5, (1+2+1+0+0+1)/6.])
def test_with_split(self):
sar = emp.sar(self.pat1, self.cols1, 'year:split', '2,1; 1,3')
assert_array_almost_equal(sar[0][1]['x'], [0.5*self.A1, 1/3.*self.A1])
assert_array_almost_equal(sar[1][1]['x'], [0.5*self.A1, 1/3.*self.A1])
assert_array_equal(sar[0][1]['y'], [0.5, 2/3.])
assert_array_equal(sar[1][1]['y'], [3/2., 1])
def test_single_division(self):
sar = emp.sar(self.pat1, self.cols1, None, '2,1')
assert_array_almost_equal(sar[0][1]['x'], [0.5*self.A1])
assert_array_equal(sar[0][1]['y'], [1.5])
def test_empty_equals_split_subset(self):
sar_empty = emp.sar(self.pat1, self.cols1, "", '1,1')
sar_split = emp.sar(self.pat1, self.cols1, "x:1; y:1", '1,1')
print sar_empty
print sar_split
assert_frame_equal(sar_empty[0][1].sort(axis=1),
sar_split[0][1].sort(axis=1))
class TestEAR(Patches):
def test_no_splits(self):
sar = emp.sar(self.pat1, self.cols1, None, '1,1; 2,1; 2,3', ear=True)
assert_array_equal(sar[0][1]['y'], [2, 0.5, 0])
def test_with_split(self):
sar = emp.sar(self.pat1, self.cols1, 'year:split', '2,1;1,3', ear=True)
assert_array_equal(sar[0][1]['y'], [0.5, 0])
assert_array_equal(sar[1][1]['y'], [0.5, 1/3.])
class TestCommGrid(Patches):
def test_no_splits_Sorensen(self):
comm = emp.comm_grid(self.pat1, self.cols1, None, '2,1')
assert_almost_equal(comm[0][1]['x'], [0.1])
assert_array_equal(comm[0][1]['y'], [2./(2+1)])
def test_no_splits_Jaccard(self):
comm = emp.comm_grid(self.pat1, self.cols1, None, '2,1',
metric='Jaccard')
assert_almost_equal(comm[0][1]['x'], [0.1])
assert_array_equal(comm[0][1]['y'], [1/2.])
def test_with_split(self):
comm = emp.comm_grid(self.pat1, self.cols1, 'year:split', '2,1')
assert_array_equal(comm[0][1]['y'], [0])
assert_array_equal(comm[1][1]['y'], [2/3.])
def test_y_division_even(self):
comm = emp.comm_grid(self.pat1, self.cols1, '', '1,3')
assert_array_equal(comm[0][1]['pair'], ['(0.15 0.1) - (0.15 0.2)',
'(0.15 0.1) - (0.15 0.3)',
'(0.15 0.2) - (0.15 0.3)'])
assert_array_almost_equal(comm[0][1]['x'], [0.1, 0.2, 0.1])
assert_array_equal(comm[0][1]['y'], [2/3., 2/3., 1.])
def test_x_y_division_uneven_y(self):
comm = emp.comm_grid(self.pat1, self.cols1, '', '2,2')
print comm
assert_array_equal(comm[0][1]['pair'], ['(0.1 0.125) - (0.1 0.275)',
'(0.1 0.125) - (0.2 0.125)',
'(0.1 0.125) - (0.2 0.275)',
'(0.1 0.275) - (0.2 0.125)',
'(0.1 0.275) - (0.2 0.275)',
'(0.2 0.125) - (0.2 0.275)'])
assert_array_almost_equal(comm[0][1]['x'], [0.15, 0.1, 0.180278, 0.180278,
0.1, 0.15], 6)
assert_array_equal(comm[0][1]['y'], [2/3., 0, 0, 0, 2/3., 0])
def test_x_y_division_uneven_y_jaccard(self):
comm = emp.comm_grid(self.pat1, self.cols1, '', '2,2',metric='Jaccard')
assert_array_equal(comm[0][1]['y'], [1/2., 0, 0, 0, 1/2., 0])
@unittest.skipIf(shapely_missing, "shapely not present, skipping O-ring test")
class TestORing(Patches):
# TODO: Main may fail with error if dataframe has no records when trying to
# fit or make plot.
def test_spp_no_present_returns_empty_df(self):
o_ring = emp.o_ring(self.pat1, self.cols1, '', 'nothere', [0,.1,.2])
assert_frame_equal(o_ring[0][1], pd.DataFrame(columns=['x','y']))
def test_one_individual_returns_zeros(self):
self.pat1.table = self.pat1.table[2:4] # Leave 1 'a' and 1 'b'
o_ring = emp.o_ring(self.pat1, self.cols1, '', 'a', [0,.1,.2])
assert_array_equal(o_ring[0][1]['y'], [0, 0])
def test_no_density_a(self):
# Points on bin edge may be allocated ambiguously due to floating point
# issues - testing here with slightly offset edges
o_ring = emp.o_ring(self.pat1, self.cols1, '', 'a', [0,.101,.201,.301],
density=False)
assert_array_almost_equal(o_ring[0][1]['x'], [0.0505, 0.151, 0.251])
assert_array_almost_equal(o_ring[0][1]['y'], [8, 4, 0])
def test_no_density_b(self):
o_ring = emp.o_ring(self.pat1, self.cols1, '', 'b', [0,.1,.2,.3],
density=False)
assert_array_almost_equal(o_ring[0][1]['x'], [0.05, 0.15,0.25])
assert_array_almost_equal(o_ring[0][1]['y'], [6, 6, 0])
def test_with_split_a(self):
o_ring = emp.o_ring(self.pat1, self.cols1, 'y:2', 'a', [0,.1,.2],
density=False)
assert_array_equal(o_ring[0][1]['y'], [2, 0]) # Bottom
assert_array_equal(o_ring[1][1]['y'], [2, 0]) # Top
def test_with_split_b(self):
o_ring = emp.o_ring(self.pat1, self.cols1, 'y:2', 'b', [0,.1,.2],
density=False)
assert_array_equal(o_ring[0][1]['y'], []) # Bottom
assert_array_equal(o_ring[1][1]['y'], [6, 6]) # Top
def test_density_a(self):
# First radius is 0.05
o_ring = emp.o_ring(self.pat1, self.cols1, '', 'a', [0,.10000001])
assert_array_almost_equal(o_ring[0][1]['y'],
[8 / (1.25*np.pi*(0.1)**2)],
3)
def test_density_b(self):
# First radius is 0.05
o_ring = emp.o_ring(self.pat1, self.cols1, '', 'b', [0,.10000001,.1828427])
assert_array_almost_equal(o_ring[0][1]['y'],
[6 / (1.25*np.pi*(0.1)**2),
6 / (3/8 * np.pi*(0.1828427**2 - 0.1**2))],
3)
class TestProduct():
def test_product_with_order(self):
# Several places rely on product to sequentially loop first -> last
expected = [[1,5], [1,6], [1,7], [2,5], [2,6], [2,7]]
assert_equal(_emp._product([1,2],[5,6,7]), expected)
class TestDistance():
def test_cartesian_distance(self):
assert_equal(_emp._distance((0,0),(2,2)), np.sqrt(8))
class TestDecDegDistance():
def test_ucberkeley_to_sf(self):
# Latlong: http://www.findlatitudeandlongitude.com
# Dist: http://www.movable-type.co.uk/scripts/latlong.html (17.37 km)
berkeley = (37.87133, -122.259293)
sf = (37.780213, -122.419968)
assert_almost_equal(_emp._decdeg_distance(berkeley, sf), 17.37, 1)
class TestEmpiricalCDF():
def test_sorted_data(self):
test_data = [1, 1, 1, 1, 2, 3, 4, 5, 6, 6]
ans = [.4, .4, .4, .4, .5, .6, .7, .8, 1, 1]
res = emp.empirical_cdf(test_data)
assert_array_equal(ans, res['ecdf'])
def test_unsorted_data(self):
test_data = [6, 6, 1, 1, 5, 1, 1, 2, 3, 4]
ans = [.4, .4, .4, .4, .5, .6, .7, .8, 1, 1]
res = emp.empirical_cdf(test_data)
assert_array_equal(ans, res['ecdf']) # Result sorted
assert_array_equal(np.sort(test_data), res['data']) # Data sorted
def test_all_data_same(self):
test_data = [3, 3, 3, 3]
ans = [1, 1, 1, 1]
res = emp.empirical_cdf(test_data)
assert_array_equal(ans, res['ecdf'])
```
#### File: macroeco/misc/format_data.py
```python
import numpy as np
import pandas as pd
def data_read_write(data_path_in, data_path_out, format_type, **kwargs):
"""
General function to read, format, and write data.
Parameters
----------
data_path_in : str
Path to the file that will be read
data_path_out : str
Path of the file that will be output
format_type : str
Either 'dense', 'grid', 'columnar', or 'transect'
kwargs
Specific keyword args for given data types. See Notes
Notes
-----
'Dense Parameters'
non_label_cols : str
Comma separated list of non label columns. ex. "lat, long, tree"
sep : str
The delimiter for the dense data. Default, ","
na_values : int, float, str
Value to be labeled as NA. Default, ""
See misc.format_dense() for additional keyword parameters
"""
if format_type == "dense":
# Set dense defaults
kwargs = _set_dense_defaults_and_eval(kwargs)
# Try to parse non label columns appropriately
try:
nlc = [nm.strip() for nm in kwargs['non_label_cols'].split(",")]
kwargs.pop('non_label_cols', None)
except KeyError:
raise KeyError("'non_label_cols' is a required keyword dense data")
# Read data with dense specific keywords
arch_data = pd.read_csv(data_path_in, sep=kwargs['delimiter'],
na_values=kwargs['na_values'])
form_data = format_dense(arch_data, nlc, **kwargs)
elif format_type == "grid":
pass
elif format_type == "stacked":
pass
elif format_type == "transect":
pass
else:
raise NameError("%s is not a supported data format" % format_type)
form_data.to_csv(data_path_out, index=False)
def format_dense(base_data, non_label_cols, **kwargs):
"""
Formats dense data type to stacked data type.
Takes in a dense data type and converts into a stacked data type.
Parameters
----------
data : DataFrame
The dense data
non_label_cols : list
A list of columns in the data that are not label columns
label_col : str
Name of the label column in the formatted data. Default, "label"
count_col : str
Name of the count column in the formatted data. Default, "count"
nan_to_zero : bool
Set all nans to zero. Default, False
drop_na : bool
Drop all columns with nan in the dataset. Default, False
Returns
-------
: DataFrame
A formatted DataFrame in the stacked format
Notes
-----
Example of Dense Data conversion
>>> import pandas as pd
>>> dense_data = pd.DataFrame({'row' : [1,2,1,2], 'column' : [1,1,2,2],
'labelA': [1,0,3,4], 'labelB' : [3,2,1,4]})
>>> dense_data
column labelA labelB row
0 1 1 3 1
1 1 0 2 2
2 2 3 1 1
3 2 4 4 2
[4 rows x 4 columns]
# labelA and labelB might be species names. 'row' and 'column'
# are non-species names so pass these in as non_label_cols
>>> stacked_data = format_dense(dense_data, ['row', 'column'])
>>> stacked_data
row column label count
0 1 1 labelA 1
1 1 1 labelB 3
2 2 1 labelA 0
3 2 1 labelB 2
4 1 2 labelA 3
5 1 2 labelB 1
6 2 2 labelA 4
7 2 2 labelB 4
[8 rows x 4 columns]
"""
kwargs = _set_dense_defaults_and_eval(kwargs)
# Stack data in columnar form.
indexed_data = base_data.set_index(keys=non_label_cols)
columnar_data = indexed_data.stack(dropna=False)
columnar_data = columnar_data.reset_index()
# Rename columns
num = len(non_label_cols)
columnar_data.rename(columns={0: kwargs['count_col'], 'level_%i' % num:
kwargs['label_col']}, inplace=True)
# Set nans to zero?
if kwargs['nan_to_zero']:
ind = np.isnan(columnar_data[kwargs['count_col']])
columnar_data.loc[ind, kwargs['count_col']] = 0
columnar_data.reset_index(inplace=True, drop=True)
# Drop nans?
if kwargs['drop_na']:
columnar_data = columnar_data.dropna(how="any")
columnar_data.reset_index(inplace=True, drop=True)
return columnar_data
def _set_dense_defaults_and_eval(kwargs):
"""
Sets default values in kwargs if kwargs are not already given.
Evaluates all values using eval
Parameters
-----------
kwargs : dict
Dictionary of dense specific keyword args
Returns
-------
: dict
Default, evaluated dictionary
"""
kwargs['delimiter'] = kwargs.get('delimiter', ',')
kwargs['na_values'] = kwargs.get('na_values', '')
kwargs['nan_to_zero'] = kwargs.get('nan_to_zero', False)
kwargs['drop_na'] = kwargs.get('drop_na', False)
kwargs['label_col'] = kwargs.get('label_col', 'label')
kwargs['count_col'] = kwargs.get('count_col', 'count')
for key, val in kwargs.iteritems():
try:
kwargs[key] = eval(val)
except:
kwargs[key] = val
return kwargs
def format_stacked():
"""
"""
pass
def format_transect():
"""
"""
pass
def format_grid():
"""
"""
pass
```
#### File: macroeco/models/test_distributions.py
```python
from __future__ import division
from numpy.testing import (TestCase, assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_, assert_raises)
import numpy as np
from decimal import Decimal
from macroeco.models import *
from macroeco.models._distributions import _trunc_logser_solver
import matplotlib.pyplot as plt
import scipy as sp
import scipy.stats as stats
class TestGeom(TestCase):
def test_pmf(self):
vals = geom.pmf([0,1,2], 0.25)
assert_array_almost_equal(vals, np.array([0.25, 0.1875, 0.140625]))
def test_mean(self):
mu1 = geom.mean(0.5)
assert_almost_equal(mu1, 1)
mu2 = geom.mean(0.25)
assert_almost_equal(mu2, 3)
def test_cdf(self):
vals = geom.cdf([0,1,2], 0.5)
assert_array_almost_equal(vals, [0.5,0.75,0.875])
def test_translate_args(self):
ps = geom.translate_args([10, 20])
assert_array_almost_equal(ps, [1/11, 1/21])
def test_fit_mle(self):
p = geom.fit_mle([1,2,4,5])
assert_almost_equal(p, 0.25)
class TestGeomUptrunc(TestCase):
def test_pmf(self):
# Expected values are regular geo cdf divided by cdf at b
vals = geom_uptrunc.pmf([0,1,2], 0.25, 2)
assert_array_almost_equal(vals,
np.array([0.25,0.1875,0.140625]) / 0.578125)
def test_cdf(self):
# Expected values are regular geom cdf divided by cdf at b
vals = geom_uptrunc.cdf([0,1,2], 0.5, 2)
assert_array_almost_equal(vals, np.array([0.5,0.75,0.875]) / 0.875)
def test_cdf_x_len_1(self):
# cdf should be not throw error even if x is len 1
vals = geom_uptrunc.cdf(0, 0.5, 2)
assert_almost_equal(vals, 0.5 / 0.875)
def test_mean(self):
mu1 = geom_uptrunc.mean(0.801, 32)
assert_almost_equal(mu1, 4, decimal=2)
def test_translate_args_harte_16(self):
# TODO: The Harte figures appear to be inaccurate, generate better
# canonical test case for next two tests and for test_fit_mle and
# test_mean
# From Harte 2011, Oxford U Press, Tab 7.4, n0=16 row, Eq 7.50
b = 16
mu = np.array([2, 1]) # A0/8, A0/16
expected = np.array([1-0.669, 1-0.500])
ps, _ = geom_uptrunc.translate_args(mu, b)
assert_almost_equal(ps, expected, decimal=3)
def test_translate_args_harte_32(self):
# From Harte 2011, Oxford U Press, Tab 7.4, n0=32 row, Eq 7.50
b = 32
mu = np.array([4, 2]) # A0/8, A0/16
expected = np.array([1-0.801, 1-0.667])
ps, _ = geom_uptrunc.translate_args(mu, b)
assert_almost_equal(ps, expected, decimal=3)
def test_translate_args_mqwilber_hand_calc(self):
# TODO: Confirm last 4 of tests, which more accurate
b = np.array([60, 340, 34])
mu = np.array([60*.1, 340*.6, 34*.9])
expected = np.array([1-.8572, 1-1.0036, 1-1.2937])
ps, _ = geom_uptrunc.translate_args(mu, b)
assert_almost_equal(ps, expected, decimal=3)
def test_translate_args_with_sum_of_pmf(self):
p1, b1 = geom_uptrunc.translate_args(341/4, 341) # Issue 33
assert_array_almost_equal(1,np.sum(geom_uptrunc.pmf(range(342),p1,b1)))
p2, b2 = geom_uptrunc.translate_args(120, 200) # Arbitrary
assert_array_almost_equal(1,np.sum(geom_uptrunc.pmf(range(201),p2,b2)))
def test_fit_mle(self):
p1, _ = geom_uptrunc.fit_mle([0,10], 10)
assert_almost_equal(p1, 0)
p2, _ = geom_uptrunc.fit_mle([1,3], 16)
assert_almost_equal(p2, 1-0.669, decimal=2)
class TestNbinom(TestCase):
def test_pmf(self):
#> dnbinom(c(0,1,2), 3, mu=5)
#[1] 0.05273438 0.09887695 0.12359619
vals = nbinom.pmf([0,1,2], 5, 3)
assert_array_almost_equal(vals, [0.05273438, 0.09887695, 0.12359619])
def test_cdf(self):
#> pnbinom(c(0,1,2),2,mu=30)
#[1] 0.00390625 0.01123047 0.02153015
vals = nbinom.cdf([0,1,2], 30, 2)
assert_array_almost_equal(vals, [0.00390625, 0.01123047, 0.02153015])
def test_mean_var(self):
mu1, var1 = nbinom.stats(20, 2, moments='mv')
assert_array_almost_equal([mu1, var1], [20, 20+(20**2)/2])
def test_get_p_from_mu(self):
assert_almost_equal(nbinom._get_p_from_mu(10, 2), 2/12)
def test_fit_mle_with_rvs(self):
np.random.seed(8)
x = nbinom.rvs(20, 10, size=100)
mu, k = nbinom.fit_mle(x)
assert_array_almost_equal([mu, k], [20, 10], decimal=0)
def test_fit_mle_with_R(self):
#> library(MASS)
#> fitdistr(seq(49), "negative binomial")
x = np.array(range(1,50))
mu, k = nbinom.fit_mle(x)
assert_array_almost_equal([mu, k], [25, 2.4337345], decimal=1)
def test_fit_mle_with_manual_calc(self):
x = np.array([6,17,14,12,8,10,4,9,3,12,4,2,12,8,14,16,9,10,8,5,6])
mu, k = nbinom.fit_mle(x, k_array=np.arange(0.01,10,0.01))
assert_array_almost_equal([mu, k], [9, 8.54], decimal=2)
def test_alternative_rvs(self):
rand_alt = nbinom.rvs_alt(5, 1, l=0, size=10000)
rand = nbinom.rvs(5, 1, size=10000)
alt_k = nbinom.fit_mle(rand_alt, k_array=np.arange(0.5, 1.5, 0.01))
k = nbinom.fit_mle(rand, k_array=np.arange(0.5, 1.5, 0.01))
assert_almost_equal(alt_k, k, decimal=1)
class TestNbinom_ztrunc(TestCase):
def test_pmf(self):
# Test pmf gives back expected mean
tpmf = nbinom_ztrunc.pmf(np.arange(1, 500), 4, 1)
tmean = np.sum(np.arange(1, 500) * tpmf)
assert_almost_equal(tmean, 4)
# Test pmf of 0 is 0
tpmf = nbinom_ztrunc.pmf(0, 1, 1)
assert_equal(tpmf, 0)
def test_cdf(self):
# Test cdf and pmf agree!
tpmf = np.sum(nbinom_ztrunc.pmf(np.arange(1, 20), 20, 10))
tcdf = nbinom_ztrunc.cdf(19, 20, 10)
assert_almost_equal(tpmf, tcdf)
def test_get_p_from_mu(self):
# Test the fit p values are equal to those given in He and Legendre
# 2002
test_values = [205.9878, 410.9853, 794.7613, 1210.0497,
1945.9970, 3193.8362]
test_ks = [2, 1, 0.5, 0.3, 0.1363, 0.01]
ps = np.array([nbinom_ztrunc.translate_args(335356 / 814., tk,
return_p=True)[0] for tk in test_ks])
assert_array_almost_equal(ps, test_values, decimal=0)
def test_fit_mle(self):
# Test fit returns something close the input
rvs_data = nbinom_ztrunc(10, 1).rvs(size=1000)
ml_mean, ml_k = nbinom_ztrunc.fit_mle(rvs_data)
assert_almost_equal(ml_mean, np.mean(rvs_data))
assert_almost_equal(ml_k, 1, decimal=0)
rvs_data = nbinom_ztrunc(20, 10).rvs(size=1000)
ml_mean, ml_k = nbinom_ztrunc.fit_mle(rvs_data)
assert_almost_equal(ml_mean, np.mean(rvs_data))
assert_almost_equal(ml_k, 10, decimal=0)
class TestCnbinom(TestCase):
def test_pmf(self):
# Test pmf sums to one
pmf = cnbinom.pmf(np.arange(0, 101), 20, 1, 100)
assert_almost_equal(np.sum(pmf), 1)
def test_cdf(self):
# Test cdf is one at appropriate value
cdf = cnbinom.cdf(100, 20, 1, 100)
assert_almost_equal(cdf, 1)
def test_fit_of_vector(self):
# Test fit of vector from Issue #3 (github.com/jkitzes/macroeco)
data = np.array([3,2,1,0,0,0,0,0,0,0,0,0,0,0,0])
k_fit = cnbinom.fit_mle(data)[0]
assert_equal(False, k_fit == -0.26)
def test_zillio_plots(self):
""" Test the cnbinom function replicated the Zillio and He plots
References
----------
<NAME> <NAME>. 2010. Modeling spatial aggregation of finite
populations. Ecology, 91, 3698-3706
"""
# Define Preliminary a and k to test
a = np.array([0.1, .3, .8])
k = np.array([.1, 1, 10])
fnbd_vec = []
nbd_vec = []
binm_vec = []
descrip = []
# Get data
for ta in a:
for tk in k:
fnbd_vec.append(cnbinom.pmf(np.arange(1, 101),
ta * 100, tk, 100))
nbd_vec.append(nbinom.pmf(np.arange(1, 101), ta * 100, tk))
binm_vec.append(stats.binom.pmf(np.arange(1, 101), 100, ta))
descrip.append("a=%s, k=%s" % (ta, tk))
# Loop through the data and plot it
fig, axes = plt.subplots(3, 3, sharex=True, figsize=(15, 7))
axes = axes.flatten()
for i, ax in enumerate(axes):
ax.plot(np.arange(1, 101), fnbd_vec[i])
ax.plot(np.arange(1, 101), nbd_vec[i], '--')
ax.plot(np.arange(1, 101), binm_vec[i], '.-')
ax.legend(('fnbd', 'nbd', 'binm'), loc='best')
ax.set_xlabel('abundance')
ax.set_ylabel('P(x)')
ax.text(0.6, 0.3, descrip[i], transform=ax.transAxes)
#Uncomment to save figure
#fig.savefig("test_cnbinom")
class TestDgamma(TestCase):
def test_pmf(self):
# import macroeco_distribution as mac
# mac.dis_gamma_ll([1,1,2,5,6,7], 5, .3)
test_val = -32.3085384957
pred_val = np.sum(dgamma.logpmf([1, 1, 2, 5, 6, 7], 5, .3))
assert_almost_equal(test_val, pred_val)
# ab = [1, 1, 1, 1, 2, 4, 4, 4, 4, 4, 45, 267]
# mac.dis_gamma_ll(ab, 0.1, 200)
test_val = -39.889246913391531
ab = [1, 1, 1, 1, 2, 4, 4, 4, 4, 4, 45, 267]
pred_val = np.sum(dgamma.logpmf(ab, 0.1, 200))
assert_almost_equal(test_val, pred_val)
def test_cdf(self):
# Test that cdf gets close to one
assert_almost_equal(dgamma.cdf(1000, 4, .9), 1)
def test_fit_mle(self):
# mac.dis_gamma_solver([1,1,2,5,6,7])
fit_alpha = 1.1324749
fit_theta = 2.86753
alpha, theta = dgamma.fit_mle([1, 1, 2, 5, 6, 7])
assert_almost_equal(fit_alpha, alpha, decimal=3)
assert_almost_equal(fit_theta, theta, decimal=3)
def test_rank(self):
# When alpha is almost zero should be similar to logseries with p =
# e^(-1 / theta)
logseries_rank = logser_uptrunc.rank(10, np.exp(-1 / 3), 1000)
dgamma_rank = dgamma.rank(10, 0.0001, 3)
assert_array_equal(logseries_rank, dgamma_rank)
class TestLogser(TestCase):
def test_pmf(self):
# Testing against values in Williams 1944,
# Some applications of the logarithmic series and the index of
# diversity to ecological problems, pg. 18.
# Acridiidae: S = 826, p = 0.92964 (There seems to be an error in
# their data at 3 -> should be 83.3 not 88.3)
test_vals = np.array([289.3, 134.5, 83.3, 58.1, 43.2, 33.5, 26.7, 21.7,
17.9, 15., 12.7, 10.8, 9.3, 8., 6.9, 6.1, 5.3, 4.6, 4.1, 3.6])
pred_pmf = logser.pmf(np.arange(1, 21), 0.92964)
pred_vals = np.round(pred_pmf * 826, decimals=1)
assert_array_equal(test_vals, pred_vals)
# Mantidae: S = 209, p = 0.89781
test_vals = np.array([82.3, 36.9, 22.1, 14.9, 10.7, 8., 6.2, 4.8, 3.9,
3.1, 2.5, 2.1, 1.7, 1.4, 1.2, 1., 0.9, 0.7, 0.6, 0.5])
pred_pmf = logser.pmf(np.arange(1, 21), 0.89781)
pred_vals = np.round(pred_pmf * 209, decimals=1)
assert_array_equal(test_vals, pred_vals)
# Blattidae: S = 197, p = 0.96476
test_vals = np.array([56.8, 27.4, 17.6, 12.8, 9.8, 7.9, 6.5, 5.5, 4.7,
4.1, 3.6, 3.2, 2.8, 2.5, 2.3, 2.1, 1.9, 1.7,
1.6, 1.4, 1.3, 1.2, 1.1, 1., 1., 0.9, 0.8,
0.8, 0.7, 0.7])
pred_pmf = logser.pmf(np.arange(1, 31), 0.96476)
pred_vals = np.round(pred_pmf * 197, decimals=1)
assert_array_equal(test_vals, pred_vals)
def test_translate_args(self):
# Using values from Williams 1994
test_vals = [0.92964, 0.89781, 0.96476, 0.97003]
data = [4112 / 826., 805. / 209, 1612. / 197, 480. / 52]
pred_vals = [logser.translate_args(td) for td in data]
assert_array_almost_equal(test_vals, pred_vals, decimal=5)
def test_fit_mle(self):
test_val = .97003 # Value from Williams 1944
x = np.arange(1, 53.)
norm_x = x / sum(x)
data = norm_x * (480)
pred_val = logser.fit_mle(data)
assert_almost_equal(test_val, pred_val, decimal=5)
class TestLogserUptrunc(TestCase):
def test_pmf(self):
# import macroeco_distributions as mac
# mac.trunc_logser(.8, 100).pmf(4)
test_val = logser_uptrunc(.8, 100).pmf(4)
assert_almost_equal(test_val, 0.063624697299)
# import macroeco_distributions as mac
# mac.trunc_logser(.45, 3).pmf(3)
test_val = logser_uptrunc(.45, 3).pmf(3)
assert_almost_equal(test_val, 0.052224371373307543)
def test_cdf(self):
# import macroeco_distributions as mac
# mac.trunc_logser(.8, 100).cdf(4)
test_val = logser_uptrunc(.8, 100).cdf(4)
assert_almost_equal(test_val, 0.86556098617469057)
# import macroeco_distributions as mac
# mac.trunc_logser(.45, 3).cdf(2)
test_val = logser_uptrunc(.45, 3).cdf(2)
assert_array_almost_equal(test_val, 0.9477756286266924)
def test_mean(self):
# Expected mean is N / S
N = 500
S = 30.
p = logser_uptrunc.translate_args(N / S, N)[0]
mean = logser_uptrunc.stats(p, N)[0]
assert_almost_equal(mean, N / S, decimal=5)
def test_fit_mle(self):
# Should return same result as translate args
data = np.arange(1, 40)
N = np.sum(data)
S = len(data)
fits = logser_uptrunc.fit_mle(data)
assert_array_almost_equal(fits,
logser_uptrunc.translate_args(N / S, N),
decimal=5)
def test_translate_args(self):
# Test that values equal values from John's book (Harte 2011)
lg = logser_uptrunc.translate_args(4 * 4 / 4, 4 * 4)[0]
assert_almost_equal(-np.log(lg), 0.0459, decimal=4)
lg = logser_uptrunc.translate_args(2 ** 4 * 4 / 4, 2 ** 4 * 4)[0]
assert_almost_equal(-np.log(lg), -0.00884, decimal=5)
lg = logser_uptrunc.translate_args(2 ** 8 * 4 / 4, 2 ** 8 * 4)[0]
assert_almost_equal(-np.log(lg), -0.00161, decimal=5)
lg = logser_uptrunc.translate_args(2 ** 8 * 16 / 16, 2 ** 8 * 16)[0]
assert_almost_equal(-np.log(lg), 0.000413, decimal=6)
lg = logser_uptrunc.translate_args(2 ** 12 * 64 / 64, 2 ** 12 * 64)[0]
assert_almost_equal(-np.log(lg), 0.0000228, decimal=7)
lg = logser_uptrunc.translate_args(20 / 20, 20)[0]
assert_equal(0, 0)
def test_n_close_to_s(self):
# Test the solver doesn't fail when N is very close to S
_trunc_logser_solver(2, 3)
_trunc_logser_solver(3, 4)
_trunc_logser_solver(100, 101)
def test_rank(self):
# Test rank against values generated by hand
exp_vals = np.array([1., 1., 2., 3., 4., 7., 11., 18., 31., 62.])
# Test values generated
test_vals = logser_uptrunc.rank(10, .99, 100)
assert_array_equal(exp_vals, test_vals)
def test_rvs(self):
# Make sure random number generator is returning what is expected
res1 = logser_uptrunc.rvs(.9, 100)
assert_equal(1, len(np.atleast_1d(res1)))
res2 = lognorm.rvs(.9, 100, size=5) # Should be length 5
assert_equal(5, len(res2))
class TestLognorm(TestCase):
def test_pmf(self):
# R pmf: dlnorm(c(1:10), 2, 2)
r_output = np.array([0.1210, .0806, .0601, 0.0476, 0.0391, .0331,
0.0285, 0.0249, 0.0221, 0.0197])
test1 = lognorm.pdf(np.arange(1, 11), 2, 2)
assert_array_almost_equal(test1, r_output, decimal=4)
# R pmf: dlnorm(5, -3, 5)
r_ans = 0.0104333
test2 = lognorm.pdf(5, -3, 5)
assert_almost_equal(test2, r_ans)
def test_cdf(self):
# R cdf: plnorm(c(1,1,4,5,12), 1.2, 3.45)
r_output = np.array([0.3639854, 0.3639854, 0.5215318, 0.5472346,
0.6452161])
test = lognorm.cdf([1, 1, 4, 5, 12], 1.2, 3.45)
assert_array_almost_equal(test, r_output, decimal=7)
def test_translate_args(self):
mean = 67; sigma = 2
mu, sigma = lognorm.translate_args(mean, sigma)
# Expected mu: np.log(mean) - (sigma**2 / 2)
exp_mu = 2.2046926
assert_almost_equal(mu, exp_mu)
def test_fit_mle(self):
'''
# R code
pmf <- function(x, N, S, sigma){
mu = log(N / S) - (sigma^2 / 2)
dlnorm(x, meanlog=mu, sdlog=sigma)
}
mle <- function(sdlog, x, N, S){
-sum(log(pmf(x, N, S, sdlog)))
}
params <- function(x){
N = sum(x);
S = length(x);
optimize(mle, interval=c(0,5), x, N, S)
}
data = # some data
params(data)'''
data1 = [1, 1, 1, 1, 1, 2, 2, 3, 3, 4, 5, 6, 123, 456]
data2 = [2, 2, 2, 4, 67, 34, 152, 9]
r_fits = [2.07598, 1.59213] # data1, data2
testfit1 = lognorm.fit_mle(data1, fix_mean=True)[1]
testfit2 = lognorm.fit_mle(data2, fix_mean=True)[1]
assert_almost_equal(r_fits[0], testfit1, decimal=5)
assert_almost_equal(r_fits[1], testfit2, decimal=5)
# Scipy code: stats.lognorm.fit(data1, floc=0)
scipy_ans = 1.79518287
test1 = lognorm.fit_mle(data1)[1]
assert_almost_equal(scipy_ans, test1)
def test_rvs(self):
# Test that multiple random numbers can be returned without error
res1 = lognorm.rvs(5, 5) # Should be length 1
assert_equal(1, len(np.atleast_1d(res1)))
res2 = lognorm.rvs(5, 5, size=5) # Should be length 5
assert_equal(5, len(res2))
def test_stats(self):
# Test that stats returns the correct stats
mu, sigma = lognorm.translate_args(50, 2)
mean, sigma = lognorm.stats(mu, sigma, moments="mv")
assert_almost_equal(50, mean)
res = lognorm.stats(mu, sigma, moments="mvsk")
assert_equal(len(res), 4)
class TestPlnorm(TestCase):
def test_pmf(self):
# Test against R VGAM fxn: dpolono(c(1:10), -1, 3)
r_res = [0.121392844, 0.057692006, 0.035586652, 0.024863530,
0.018681089, 0.014721035, 0.011998072, 0.010027588, 0.008545518,
0.007396607]
test = plnorm.pmf(np.arange(1, 11), -1, 3)
assert_array_almost_equal(r_res, test)
# Test against macroeco_distributions.pln:
# pln.pmf([0, 50, 1000], 2.34, 5, 0)
md_res = np.array([2.86468926e-01, 1.51922299e-03, 5.25717609e-05])
test = plnorm.pmf([0, 50, 1000], 2.34, 5)
assert_array_almost_equal(md_res, test)
# Unit test from test_macroeco_distributions
# Test values for Poisson lognomal are chosen from Table 1 and Table 2
# in Grundy Biometrika 38:427-434.
# In Table 1 the values are deducted from 1 which give p(0).
pln_table1 = [[-2.0, 2, '0.9749'],
[-2.0, 8, '0.9022'],
[-2.0, 16, '0.8317'],
[0.5, 2, '0.1792'],
[0.5, 8, '0.2908'],
[0.5, 16, '0.3416'],
[3, 2, '0.0000'],
[3, 8, '0.0069'],
[3, 16, '0.0365']]
pln_table2 = [[-2.0, 2, '0.0234'],
[-2.0, 8, '0.0538'],
[-2.0, 16, '0.0593'],
[0.5, 2, '0.1512'],
[0.5, 8, '0.1123'],
[0.5, 16, '0.0879'],
[3, 2, '0.0000'],
[3, 8, '0.0065'],
[3, 16, '0.0193']]
for vals in pln_table1:
test = plnorm.pmf(0, np.log(10 ** vals[0]), vals[1] ** .5)
assert_almost_equal(test, float(vals[2]), decimal=4)
for vals in pln_table2:
test = plnorm.pmf(1, np.log(10 ** vals[0]), vals[1] ** .5)
assert_almost_equal(test, float(vals[2]), decimal=4)
def test_cdf(self):
# Test against R VGAM fxn: ppolono(c(0, 15, 10000), .1, 2)
r_res = [0.3954088, 0.9048902, 0.9999973]
test = plnorm.cdf([0, 15, 10000], .1, 2)
assert_array_almost_equal(r_res, test, decimal=5)
# Test against macroeco_distributions:
# pln.cdf([1,2,3], 20, 4, 0)
md_res = np.array([7.34761277e-07, 1.18860746e-06, 1.67083480e-06])
test = plnorm.cdf([1, 2, 3], 20, 4)
assert_array_almost_equal(md_res, test, decimal=5)
def test_fit_mle(self):
# Test against R poilog: poilogMLE(data, zTrune=FALSE)
data = np.array([1,1,1,1,1,2,2,2,3,3,4,4,5,5,6,6,12,45,67])
Rfits = (1.31928, 1.18775)
fits = plnorm.fit_mle(data)
assert_array_almost_equal(Rfits, fits, decimal=3)
# Test against macroeco_distributions
# pln_solver(data, lower_trunc=False)
md_res = (1.3195580310886075, 1.1876019842774048)
assert_array_almost_equal(md_res, fits, decimal=4)
def test_rank(self):
# This should be a slow test!
# Test against ppf.
# >>> n = 50
# >>> vals = (np.arange(1, n+1) - 0.5) / n
# >>> plnorm.ppf(vals, 1, 1)
test_case = np.array([ 0., 0., 0., 0., 0., 0., 0., 0.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 2., 2.,
2., 2., 2., 2., 3., 3., 3., 3., 3., 3., 4., 4.,
4., 4., 5., 5., 5., 6., 6., 6., 7., 7., 8., 9.,
10., 11., 13., 15., 19., 29.])
pred_res = plnorm.rank(50, 1, 1, crit=0.5, upper=40)
# Test the values are within one
diff = np.abs(pred_res - test_case)
zeros = np.sum(diff == 0)
ones = np.sum(diff == 1)
assert_equal(zeros + ones, len(diff))
class TestPlnormZtrunc(TestCase):
def test_pmf(self):
# Test against macroeco_distributions:
# pln.pmf([0, 50, 1000], 2.34, 5, 1)
md_res = np.array([0, 2.12916164e-03, 7.36783061e-05])
test = plnorm_ztrunc.pmf([0, 50, 1000], 2.34, 5)
assert_array_almost_equal(md_res, test)
def test_cdf(self):
# Test against dpolonorm
# ppolono(c(1,2,3), 4.3, 100) / (1 - ppolono(0, 4.3, 100))
r_res = [0.007670365, 0.011507417, 0.014065948]
test = plnorm_ztrunc.cdf(np.arange(1, 4), 4.3, 100)
assert_array_almost_equal(r_res, test)
def test_fit_mle(self):
data = np.array([1,1,1,4,4,4,4,5,5,5,12,44,55,112])
# macroeco_distributions fit: pln_solver(data)
md_fits = (1.068510556981163, 1.8800439687956865)
test = plnorm_ztrunc.fit_mle(data)
assert_array_almost_equal(test, md_fits, decimal=4)
# R poilog: poilogMLE(data)
r_fits = (1.067620, 1.880646)
assert_array_almost_equal(test, r_fits, decimal=3)
def test_rank(self):
# TODO: Can't test this against ppf because ppf is too slow
# Make sure it is working when crit = 0
test = [ 1., 1., 2., 2., 2., 2., 2., 3., 3.,
4., 5., 5., 6., 6., 7., 7., 8., 11., 14., 22.]
rad = plnorm_ztrunc.rank(20, 1, 1, crit=0, upper=40)
assert_array_equal(test, rad)
class TestExpon(TestCase):
def test_pdf(self):
vals = expon.pdf([0,1,2], 2.5)
assert_almost_equal(vals, [2.5, 0.205212497, 0.016844867])
def test_mean(self):
mu1 = expon.mean(0.5)
assert_almost_equal(mu1, 2)
mu2 = expon.mean(0.25)
assert_almost_equal(mu2, 4)
def test_cdf(self):
vals = expon.cdf([0,1,2], 0.5)
assert_array_almost_equal(vals, [0, 0.39346934, 0.632120559])
def test_translate_args(self):
assert_almost_equal(1/13, expon.translate_args(13))
def test_fit_mle(self):
assert_almost_equal(1/8, expon.fit_mle([6,7,9,10]))
class TestExponUptrunc(TestCase):
def test_pdf(self):
vals = expon_uptrunc.pdf([0,1,2], 0.2, 10)
assert_almost_equal(vals, [0.231303529, 0.189375312, 0.155047392])
def test_pdf_lambda_equal_zero_is_uniform(self):
vals = expon_uptrunc.pdf([0,1,2], 0.0000001, 10)
assert_almost_equal(vals, [0.1, 0.1, 0.1])
def test_pdf_integrates_to_one(self):
val1 = sp.integrate.quad(expon_uptrunc.pdf, 0, 10, (0.2, 10))
assert_almost_equal(val1[0], 1)
val2 = sp.integrate.quad(expon_uptrunc.pdf, 0, 100, (.000000001, 100))
assert_almost_equal(val2[0], 1)
val3 = sp.integrate.quad(expon_uptrunc.pdf, 0, 100, (-5, 100))
assert_almost_equal(val3[0], 1)
def test_mean_lambda_equal_zero(self):
# If lam zero (uniform distribution), mean should be 1/2 b
assert_almost_equal(expon_uptrunc.mean(0.0000001, 10), 5, 5)
def test_mean(self):
def integrand(x, lam, b):
return x * expon_uptrunc.pdf(x, lam, b)
for lam in [2, 4.5]:
val = sp.integrate.quad(integrand, 0, 5, args=(lam, 10))[0]
assert_almost_equal(expon_uptrunc.mean(lam, 5), val, 4)
def test_cdf(self):
vals = expon_uptrunc.cdf([0,1,2], 0.2, 10)
assert_array_almost_equal(vals, [0, 0.209641082, 0.381280683])
def test_translate_args_uniform_case(self):
lam = expon_uptrunc.translate_args(5, 10)
assert_almost_equal(lam[0], 0)
def test_translate_args(self):
# mean -> lambda -> mean comparison
lam = expon_uptrunc.translate_args(3, 10)
assert_almost_equal(expon_uptrunc.mean(lam, 10), 3)
def test_fit_mle_uniform_case(self):
data = [5,5,5]
mean = np.mean(data)
lam = expon_uptrunc.fit_mle(data, 10)[0]
assert_almost_equal(expon_uptrunc.mean(lam, 10), 5, 4)
def test_fit_mle(self):
data = [4,5,7,8]
mean = np.mean(data)
lam = expon_uptrunc.fit_mle(data, 10)[0]
assert_almost_equal(expon_uptrunc.mean(lam, 10), 6)
``` |
{
"source": "jkiv/shapool-client",
"score": 2
} |
#### File: src/shapool/shapool.py
```python
import binascii
from icepool import icepool
import logging
import math
import struct
import time
from . import midstate
_log = logging.getLogger('shapool-client.shapool')
class Shapool:
def __init__(self, ctx: icepool.IcepoolContext, number_of_devices: int, cores_per_device: int):
self._ctx = ctx
self.number_of_devices = number_of_devices
self.hardcoded_bits = math.ceil(math.log2(cores_per_device))
nonce_step = (0x100 >> self.hardcoded_bits) // self.number_of_devices
self.device_configs = bytes([i * nonce_step for i in range(self.number_of_devices)])
def __del__(self):
self._ctx.assert_reset()
def start_execution(self):
self._ctx.deassert_reset()
def interrupt_execution(self):
self._ctx.spi_assert_daisy()
self._ctx.spi_deassert_daisy()
def reset(self):
self._ctx.assert_reset()
def poll_until_ready_or_timeout(self, timeout_s):
ready = False
if timeout_s is None:
while not ready:
ready = self._ctx.poll_ready()
else:
start_time = time.time()
while not ready and time.time() - start_time < timeout_s:
ready = self._ctx.poll_ready()
return ready
def update_device_configs(self):
self._ctx.assert_reset()
self._ctx.spi_assert_daisy()
self._ctx.spi_write_daisy(self.device_configs)
self._ctx.spi_deassert_daisy()
def update_job(self, midstate, message):
self._ctx.assert_reset()
self._ctx.spi_assert_shared()
self._ctx.spi_write_shared(midstate + message)
self._ctx.spi_deassert_shared()
def get_result(self):
self._ctx.spi_assert_daisy()
results = self._ctx.spi_read_daisy(5 * self.number_of_devices)
self._ctx.spi_deassert_daisy()
for n_device in range(self.number_of_devices):
result_offset = 5*n_device
flags = results[result_offset]
if flags != 0:
nonce, = struct.unpack(">L", results[result_offset+1:result_offset+5])
nonce = Shapool._correct_nonce(\
nonce, flags, self.device_configs[n_device], self.hardcoded_bits)
return nonce
return None
def update_difficulty(self, difficulty):
# TODO
pass
@staticmethod
def _pack_job(version, previous_hash, merkle_root, timestamp, bits):
# version, previous_hash, merkle_root should be bytes, already in correct order
message = version + \
previous_hash + \
merkle_root + \
timestamp + \
bits
return message[:64], message[64:]
@staticmethod
def _precompute_midstate(first_block):
state = midstate.ShaState()
state.update(first_block)
return state.as_bin(True)
@staticmethod
def _correct_nonce(nonce, flags, device_offset, hardcoded_bits):
mapping = {
0x01: 0x0000_0000,
0x02: 0x0000_0001,
0x04: 0x0000_0002,
0x08: 0x0000_0003,
0x10: 0x0000_0004,
0x20: 0x0000_0005,
0x40: 0x0000_0006,
0x80: 0x0000_0007
}
print(f'{flags=:02x} {hardcoded_bits=} {nonce=:08x} {mapping[flags]<<(32-hardcoded_bits)=:08x} {device_offset<<24=:08x}')
nonce -= 2
nonce |= mapping[flags] << (32-hardcoded_bits)
nonce ^= device_offset << 24
return nonce
``` |
{
"source": "jkjaer/latexResearchDiary",
"score": 3
} |
#### File: jkjaer/latexResearchDiary/newBuild.py
```python
import sys, datetime, calendar, os, re, io
sys.path.append('database/')
sys.path.append('logic/')
import DiaryDatabaseWrapper, addTask, commonDiaryFunctions
def newBuild(argv):
"""
Generate a new build file.
"""
# Get/validate tags and dates
includeTagList, excludeTagList, dateList, taskLabelList = \
validateInputsAndSetDefaults(argv)
# Find the tasks with valid tags and the selected dates
taskDict, extractedTagsList, extractedAuthorList = \
getTaskDictionary(includeTagList, excludeTagList, dateList, \
taskLabelList)
# Write the extracted tags and titles to the tag dictionary file in the
# buildFiles folder.
writeTagsToTexDictionary(extractedTagsList)
# Write the extracted authors to the author dictionary file in the
# buildFiles folder.
writeAuthorsToTexDictionary(extractedAuthorList)
createBuildFile(taskDict)
print('The tag dictionary and the task list have successfully been updated.')
# Input validation
def validateInputsAndSetDefaults(argv):
"""
Validate the provided input and set the defaults values for
the optional parameters if not specified or empty.
"""
nInputs = len(argv)
if nInputs==0:
# Default setup for no input parameters
includeTagList = getAllTags()
excludeTagList = list()
dateList = getDefaultDates()
taskLabelList = list()
elif nInputs==1:
if argv[0]=='all' or argv[0]=='':
# Include all tags and dates
includeTagList = getAllTags()
excludeTagList = list()
else:
# Only tags are specified
includeTagList, excludeTagList = separateAndValidateTags(argv[0])
dateList = getAllDatesWithEntries()
taskLabelList = list()
elif nInputs==2:
if argv[0]=='all' or argv[0]=='':
# Include all tags
includeTagList = getAllTags()
excludeTagList = list()
else:
includeTagList, excludeTagList = separateAndValidateTags(argv[0])
if argv[1]=='all' or argv[1]=='':
# Include all dates
dateList = getAllDatesWithEntries()
else:
dateList = checkAndFindDates(argv[1])
taskLabelList = list()
elif nInputs==3:
if argv[0]=='all' or argv[0]=='':
# Include all tags
includeTagList = getAllTags()
excludeTagList = list()
else:
includeTagList, excludeTagList = separateAndValidateTags(argv[0])
if argv[1]=='all' or argv[1]=='':
# Include all dates
dateList = getAllDatesWithEntries()
else:
dateList = checkAndFindDates(argv[1])
if argv[2]=='all' or argv[2]=='':
# An empty list means that all task labels should be included
taskLabelList = list()
else:
taskLabelList = validateTaskLabels(argv[2])
else:
print("Error: You must specify 0, 1, 2, or 3 input parameters.")
print("newBuild.py \'tagA, tagB\' \'YYYY-MM-DD, YYYY-MM\' " \
"\'taskLabel1, taskLabel2\'")
sys.exit(2)
return includeTagList, excludeTagList, dateList, taskLabelList
# Get all tags
def getAllTags():
"""
Retrieve all tags from the tags database.
"""
# Create a diary database object.
db = DiaryDatabaseWrapper.DiaryDatabaseWrapper()
tagRows = db.selectFromTable('tags',('name',),'')
db.close()
return [element[0] for element in tagRows]
# Get the default dates with valid entries.
def getDefaultDates():
"""
Get the default dates. These are the lastest 90 days.
"""
nDays = 90
dateNDaysAgo = datetime.date.today()-datetime.timedelta(days=nDays)
return getAllDatesWithEntries(fromDate=dateNDaysAgo)
# Get all dates with entries
def getAllDatesWithEntries(fromDate=datetime.date(1970,1,1)):
"""
Get all dates from a specific date.
"""
dateList = list()
entriesDir = commonDiaryFunctions.unicodeDir(os.path.abspath(__file__)) + \
'/entries'
yearFolders = getDigitFolders(entriesDir)
for yearFolder in yearFolders:
iPath = entriesDir + '/' + yearFolder
monthFolders = getDigitFolders(iPath)
for monthFolder in monthFolders:
jPath = iPath + '/' + monthFolder
dayFolders = getDigitFolders(jPath)
for dayFolder in dayFolders:
candidateDate = datetime.date(year=int(yearFolder),\
month=int(monthFolder),day=int(dayFolder))
if fromDate<=candidateDate:
dateList.append(candidateDate)
return sorted(dateList)
# Separates the provided tags into including and excluding tags
def separateAndValidateTags(tagListString):
"""
Separates the provided tags into two list. The first list consists
of tags without a leading exclamation mark while the second list
only consists of tags with a leading exclamation mark.
"""
rawTagList = tagListString.split(',')
rawIncludeTagList = list()
rawExcludeTagList = list()
for tag in rawTagList:
# Remove leading and trailing spaces
tag = tag.strip()
# Separate the tags
if tag[0] == '!':
rawExcludeTagList.append(tag[1:])
else:
if tag == 'all':
rawIncludeTagList = getAllTags()
else:
rawIncludeTagList.append(tag)
# Check the tags
if len(rawExcludeTagList):
excludeTagList = addTask.checkTags(','.join(rawExcludeTagList))[1]
else:
excludeTagList = list()
if len(rawIncludeTagList):
includeTagList = addTask.checkTags(','.join(rawIncludeTagList))[1]
else:
includeTagList = list()
return includeTagList, excludeTagList
# Validates that the provided task labels have the correct format
def validateTaskLabels(taskLabelListString):
"""
Validates that the provided task labels have the correct format.
"""
taskLabelList = taskLabelListString.split(',')
validatedTaskLabelList = list()
# The task labels must match the pattern YYYYMMDD_XXXI
# where XXX are optional initials (letters a-zA-Z) and I is a number.
taskLabelPattern = re.compile(r'^([0-9]{8})_([a-zA-Z]*)([0-9]+)$')
for taskLabel in taskLabelList:
# Remove leading and trailing spaces
taskLabel = taskLabel.strip()
# Check if the first eight characters corresponds to a valid date
try:
validDateTime = datetime.datetime.strptime(taskLabel[0:8], '%Y%m%d')
except ValueError:
raise ValueError("Invalid task label supplied" + taskLabel + \
". Should be YYYYMMDD_XXXI where YYYYMMDD are the "\
"date, XXX are optional initials (letters a-zA-Z), "\
"and I is a number.")
# Check the task labels
if re.match(taskLabelPattern, taskLabel):
validatedTaskLabelList.append(taskLabel)
else:
print("Invalid task label supplied" + taskLabel + \
". Should be YYYYMMDD_XXXI where YYYYMMDD are the "\
"date, XXX are optional initials (letters a-zA-Z), "\
"and I is a number.")
sys.exit(2)
return validatedTaskLabelList
# Returns a list of folders consisting of only digits
def getDigitFolders(path):
"""
Returns a list folders consisting of only digits.
"""
digitFolderList = list()
filesAndFolders = os.listdir(path)
for fileAndFolder in filesAndFolders:
iPath = path + '/' + fileAndFolder
if fileAndFolder.isdigit() and os.path.isdir(iPath):
digitFolderList.append(fileAndFolder)
return digitFolderList
# Check if the provided dates are valid
def checkAndFindDates(dateListString):
"""
Check if the provided dates are valid
"""
rawDateList = dateListString.split(',')
allDatesList = list()
for date in rawDateList:
# First remove space around the date
date = date.strip()
try:
# First see if the date is a specific day
validDateTime = datetime.datetime.strptime(date, '%Y-%m-%d')
allDatesList.append(validDateTime.date())
except ValueError:
try:
# If not a specific day, see if it is a specific month.
validDateTime = datetime.datetime.strptime(date, '%Y-%m')
allDatesList.extend(getAllDatesFromYearMonth(validDateTime.year,\
validDateTime.month))
except ValueError:
try:
# If neither a specific day or month, see if it is a
# specific year.
validDateTime = datetime.datetime.strptime(date, '%Y')
allDatesList.extend(getAllDatesFromYear(validDateTime.year))
except ValueError:
raise ValueError("Incorrect data format, should be " + \
"either YYYY-MM-DD, YYYY-MM, or YYYY.")
# Remove duplicate dates and sort the list
uniqueDatesList = list(set(allDatesList))
# Only retain those dates with entries
dateList = list()
for date in uniqueDatesList:
if dateHasEntry(date):
dateList.append(date)
return sorted(dateList)
# Get all dates from year and month
def getAllDatesFromYearMonth(year,month):
"""
Get all dates from year and month.
"""
if month<12:
nDays = (datetime.date(year,month+1,1)-\
datetime.date(year,month,1)).days
else:
nDays = (datetime.date(year+1,1,1)-\
datetime.date(year,month,1)).days
dateList = list()
for dayNo in range(1,nDays+1):
dateList.append(datetime.date(year,month,dayNo))
return dateList
# Get all datas from year
def getAllDatesFromYear(year):
"""
Get all datas from year.
"""
dateList = list()
for monthNo in range(1,13):
dateList.extend(getAllDatesFromYearMonth(year,monthNo))
return dateList
# Check if a date folder has been created
def dateHasEntry(date):
"""
Returns true if a folder structure exists for the day
"""
dir = commonDiaryFunctions.unicodeDir(os.path.abspath(__file__)) + \
'/entries/' + str(date.year) + '/' + str(date.month).zfill(2) + \
'/' + str(date.day).zfill(2)
if os.path.isdir(dir):
return True
else:
return False
# Write the tags and titles to the tag dictionary file in the build files folder.
def writeTagsToTexDictionary(tagList):
"""
Write the tags and titles to the tag dictionary file in the build files
folder.
"""
tagTitleList = getTagTitles(tagList)
buildFilesDir = commonDiaryFunctions.unicodeDir(os.path.abspath(__file__))\
+ '/buildFiles'
if not os.path.exists(buildFilesDir):
os.makedirs(buildFilesDir)
tagDictionaryFile = io.open(buildFilesDir + '/tagDictionary.tex',\
'w',encoding='utf-8')
nTags = len(tagList)
for iTag in range(nTags):
tagTitle = tagTitleList[iTag]
tagDictionaryFile.write('\expandafter\\newcommand\csname tag' + \
tagList[iTag] + '\endcsname{' + tagTitle + '}\n')
tagDictionaryFile.close()
# Retrieve the tag titles associated with the tag names.
def getTagTitles(tagList):
"""
Retrieve the tag titles associated with the tag names.
"""
db = DiaryDatabaseWrapper.DiaryDatabaseWrapper()
tagTitles = list()
for tag in tagList:
tagRows = db.selectFromTable('tags',('title',),\
'WHERE name=\'' + tag + '\'')
tagTitles.append(tagRows[0][0])
db.close()
return tagTitles
# Write the authors to the tag dictionary file in the build files folder.
def writeAuthorsToTexDictionary(authorInitialsList):
"""
Write the authors to the tag dictionary file in the build files
folder.
"""
authorNameList, authorEmailList = getAuthorNamesAndEmail(authorInitialsList)
buildFilesDir = commonDiaryFunctions.unicodeDir(os.path.abspath(__file__))\
+ '/buildFiles'
if not os.path.exists(buildFilesDir):
os.makedirs(buildFilesDir)
authorDictionaryFile = io.open(buildFilesDir + '/authorDictionary.tex',\
'w',encoding='utf-8')
nAuthors = len(authorInitialsList)
for iAuthor in range(nAuthors):
authorName = authorNameList[iAuthor]
authorEmail = authorEmailList[iAuthor]
authorDictionaryFile.write('\expandafter\\newcommand\csname author' + \
authorInitialsList[iAuthor] + \
'name\endcsname{' + authorName +'}\n')
authorDictionaryFile.write('\expandafter\\newcommand\csname author' + \
authorInitialsList[iAuthor] + \
'email\endcsname{' + \
authorEmail +'}\n')
authorDictionaryFile.close()
# Retrieve the author names and emails from the author initials.
def getAuthorNamesAndEmail(authorInitialsList):
"""
Retrieve the author names and emails from the author initials.
"""
db = DiaryDatabaseWrapper.DiaryDatabaseWrapper()
authorNameList = list()
authorEmailList = list()
for authorInitials in authorInitialsList:
authorRows = db.selectFromTable('authors',('name','email'),\
'WHERE initials=\'' + authorInitials + '\'')
authorNameList.append(authorRows[0][0])
authorEmailList.append(authorRows[0][1])
db.close()
return authorNameList, authorEmailList
# Find the tasks with valid tags and the selected dates
def getTaskDictionary(includeTagList, excludeTagList, dateList, \
taskLabelList):
"""
Find the tasks with valid tags and the selected dates. The key of the
returned dictionary is the date and the values are the file names of
the tags.
"""
taskDict = dict()
extractedTagsList = list()
extractedAuthorList = list()
diaryDir = commonDiaryFunctions.unicodeDir(os.path.abspath(__file__))
for date in dateList:
relativeDateDir = 'entries/' + str(date.year) + '/' + \
str(date.month).zfill(2) + '/' + str(date.day).zfill(2)
# The file name of a task must match the pattern YYYYMMDD_XXXI.tex
# where XXX are optional initials (letters a-zA-Z) and I is a number.
fileNamePattern = re.compile(r'^' + str(date.year) + \
str(date.month).zfill(2) + str(date.day).zfill(2) + \
'_([a-zA-Z]*)([0-9]+)\.tex$')
# Retrieve a sorted list of all files and folders in relativeDateDir
filesAndFoldersList = \
sorted(os.listdir(diaryDir + '/' + relativeDateDir))
validTaskPathList = list()
for fileOrFolder in filesAndFoldersList:
relativeTaskPath = relativeDateDir + '/' + fileOrFolder
taskPath = diaryDir + '/' + relativeTaskPath
if os.path.isfile(taskPath) and \
re.match(fileNamePattern, fileOrFolder):
# If the taskLabelList is not empty, check if the file name
# is in the list
if len(taskLabelList)==0 or fileOrFolder[:-4] in taskLabelList:
extractedTags = extractTagsFromValidTask(taskPath, \
includeTagList, excludeTagList)
if len(extractedTags)>0:
extractedAuthors = extractAuthorsFromTask(taskPath)
if len(extractedAuthors)>0:
extractedAuthorList.extend(extractedAuthors)
validTaskPathList.append(relativeTaskPath)
extractedTagsList.extend(extractedTags)
# If a least one task path has been added, add it to the dictionary
if len(validTaskPathList)>0:
taskDict[date] = validTaskPathList
# return the task dictionary and the unique extracted tags and authors
return taskDict, sorted(list(set(extractedTagsList))), \
sorted(list(set(extractedAuthorList)))
# Extracts all the tags from a valid task.
def extractTagsFromValidTask(taskPath, includeTagList, excludeTagList):
"""
Extracts all the tags from a valid task. A valid task contains at least
one of the tags in the tagList and no excluding tags.
"""
texFile = io.open(taskPath,'r',encoding='utf-8')
for line in texFile:
for includeTag in includeTagList:
# The line must match the pattern \tags{tagA,...,tagN}
pattern = re.compile(r'^\s*\\tags\{(([a-zA-Z0-9\s]*,)*)' + \
includeTag + '(([a-zA-Z0-9\s]*,)*)([a-zA-Z0-9\s]*)\}\s*$')
if re.match(pattern, line):
# A valid tag was found in the entry. Extract all tags and
# return.
pattern = re.compile(r'^\s*\\tags\{(!*[a-zA-Z0-9,\s]+)\}\s*$')
tagListString = re.search(pattern,line).group(1)
# Check that the extracted tags are valid and split the
# string into a list
extractedTagList = addTask.checkTags(tagListString)[1]
# Check that none of the tags are in the excludeTagList
for extractedTag in extractedTagList:
if extractedTag in excludeTagList:
texFile.close()
return list()
# If none of the extracted tags are in the excludeTagList,
# return the extracted tag list
texFile.close()
return extractedTagList
# None of the tags in the includeTagList was in the task
texFile.close()
return list()
# Extracts all the authors from a task.
def extractAuthorsFromTask(taskPath):
"""
Extracts all the authors from a task
"""
texFile = io.open(taskPath,'r',encoding='utf-8')
for line in texFile:
# The line must match the pattern \authors{initials1,...,initials2}
pattern = \
re.compile(r'^\s*\\authors\{(([a-zA-Z0-9\s]*,)*)([a-zA-Z0-9\s]*)\}\s*$')
if re.match(pattern, line):
# The author line was found. Extract all author initials
pattern = re.compile(r'^\s*\\authors\{(!*[a-zA-Z0-9,\s]+)\}\s*$')
authorListString = re.search(pattern,line).group(1)
# Check that the authors are valid and split the
# string into a list
if len(authorListString)>0:
extractedAuthorList = addTask.checkAuthors(authorListString)
else:
extractedAuthorList = list()
texFile.close()
return extractedAuthorList
# The author string was not found in the file
texFile.close()
return list()
# Create the build file
def createBuildFile(taskDict):
"""
Create the build file.
"""
oldYear = 1970
oldMonth = 0
buildFilesDir = commonDiaryFunctions.unicodeDir(os.path.abspath(__file__))\
+ '/buildFiles'
buildFile = io.open(buildFilesDir + '/taskList.tex','w',encoding='utf-8')
for date, taskPathList in sorted(taskDict.items()):
# If a new year, month, and/or are started, add a new part, chapter,
# and/or section
year = date.year
month = date.month
day = date.day
if oldYear!=year:
buildFile.write('\part{'+ \
commonDiaryFunctions.unicodeStr(year) +'}\n')
oldYear = year
# reset oldMonth
oldMonth = 0
if oldMonth!=month:
buildFile.write('\chapter{'+ \
commonDiaryFunctions.unicodeStr(calendar.month_name[month]) +'}\n')
oldMonth=month
buildFile.write('\section{'+ \
commonDiaryFunctions.unicodeStr(calendar.month_name[month]) + \
' ' + commonDiaryFunctions.unicodeStr(day) + ', '\
+ commonDiaryFunctions.unicodeStr(year) +'}\n')
# Add all tasks
for taskPath in taskPathList:
buildFile.write('\input{' + taskPath + '}\n')
buildFile.close()
# The first function to be called when this file is used as a script
if __name__ == '__main__':
unicodedInputList = \
commonDiaryFunctions.convertTerminalInputs2Unicode(sys.argv[1:])
newBuild(unicodedInputList)
```
#### File: latexResearchDiary/tests/commonDiaryTestFunctions.py
```python
import sys, io
def isFileContentIdentical(pathA,pathB):
"""
Do a line by line comparison of the content of two files. Return true
if they are equal
"""
# read the two files to two lists
fileLineListA = createFileLineList(pathA)
fileLineListB = createFileLineList(pathB)
nLinesA = len(fileLineListA)
nLinesB = len(fileLineListB)
if nLinesA!=nLinesB:
return False
# do a line by line comparison
for iLine in range(nLinesA):
if fileLineListA[iLine] != fileLineListB[iLine]:
return False
# if the function has not returned False at this point, the two files are
# the same
return True
def createFileLineList(path):
"""
Create a list from a file where each line is an element in the list.
"""
fileLineList = list()
fileHandle = io.open(path,'r',encoding='utf-8')
for line in fileHandle:
if sys.version_info[0] < 2:
fileLineList.append(line.decode('utf-8'))
else:
fileLineList.append(line)
fileHandle.close()
return fileLineList
``` |
{
"source": "jkjean19/GraphingCalc",
"score": 3
} |
#### File: jkjean19/GraphingCalc/CalcApp.py
```python
from flask import Flask, render_template, request
from urllib.parse import quote
import app.GraphingCalc as GC
from app.models import db, PostSingle, PostMulti
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:password@localhost:5432/calc_app'
db.init_app(app)
@app.route('/')
def home():
"""
The home page that asks for user input.
"""
return render_template('index.html')
@app.route('/results', methods=['POST'])
def result():
"""
The results page with the integrated/differentiated function and a graph.
"""
subject = request.form['calc']
function = request.form['function']
file = './images/' + quote(subject + '&' + function) + '.png'
if subject == 'single':
try:
result = GC.single_var(function, './static/'+file)
if not db.session.query(PostSingle).filter(PostSingle.func == function).count():
db_post = PostSingle(function, file)
db.session.add(db_post)
db.session.commit()
return render_template('results.html',
subject = subject,
function=function,
first_deriv = result[0],
second_deriv = result[1],
third_deriv = result[2],
integ_sol = result[-1],
img_file = file)
except:
return render_template('index.html')
else:
try:
result = GC.multi_var(function, './static/'+file)
if not db.session.query(PostMulti).filter(PostMulti.func == function).count():
db_post = PostMulti(function, file)
db.session.add(db_post)
db.session.commit()
return render_template('results.html',
subject = subject,
function=function,
partial_X = result[0],
partial_Y = result[1],
partial_XX = result[2],
partial_XY = result[3],
partial_YY = result[4],
integ_sol = result[-1],
img_file = file)
except:
return render_template('index.html')
if __name__ == "__main__":
app.run(threaded=True)
``` |
{
"source": "jk/jekyll-mermaid-blog",
"score": 4
} |
#### File: jekyll-mermaid-blog/code/example1_1.py
```python
def extendList(val, list=[]):
list.append(val)
return list
list1 = extendList(10)
list2 = extendList(123,[])
list3 = extendList('a')
print "list1 = %s" % list1 # list1 = [10, 'a']
print "list2 = %s" % list2 # list2 = [123]
print "list3 = %s" % list3 # list3 = [10, 'a']
def extendListUpdated(val, list=None):
# if no list is provided initialize an empty list
if list is None:
list = []
list.append(val)
return list
list1 = extendListUpdated(10)
list2 = extendListUpdated(123,[])
list3 = extendListUpdated('a')
print "list1 = %s" % list1 # [10]
print "list2 = %s" % list2 # [123]
print "list3 = %s" % list3 # ['a']
```
#### File: jekyll-mermaid-blog/code/example1_2.py
```python
def multipliers():
return [lambda x : i * x for i in range(4)]
print [m(2) for m in multipliers()] # [6, 6, 6, 6]
print range(4) # [0, 1, 2, 3]
print lambda x : i * x # <function <lambda> at 0x109ad8668>
print [lambda x : i * x for i in range(4)] # [<function <lambda> at 0x109ad8668>, <function <lambda> at 0x109adcb90>, <function <lambda> at 0x109adc500>, <function <lambda> at 0x109ae6500>]
print [lambda x : i * x for i in range(4)][0] # <function <lambda> at 0x109ad8668>
print [lambda x : i * x for i in range(4)][0](2) # 6 [3x2]
def multipliers():
for i in range(4): yield lambda x : i * x
dict = {"key":"value"}
for i in {"key":"value"}:
print i
def dynamicArguments(*arg, **kwargs):
print arg
print kwargs
dynamicArguments(1, first=4, 2, second=5, 3, third=6)
(1, 2, 3)
{'second': 5, 'third': 6, 'first': 4}
``` |
{
"source": "jkjium/pyMAVEN",
"score": 2
} |
#### File: jkjium/pyMAVEN/Parser.py
```python
import numpy
import argparse
import commlib as cb
class Atom():
def __init__(self,id,AtomName,Coordinates,Occupancy,bfactor,Element,Charge,parent):
self.id=id
self.AtomName=AtomName
self.AlternateLocationIndicator=None #remove it later
self.__parent=parent
self.Chain=None
self.__Coordinates=Coordinates
self.Occupancy=Occupancy
self.bfactor=bfactor
self.SegmentIdentifier=None
self.Element=Element
def GetParent(self):
return self.__parent
def GetCoordinates(self):
return self.__Coordinates
def CalcDist(self,another_atom):
#return numpy.linalg.norm(self.Coordinates-another_atom.Coordinates)
return cb.dist(self.__Coordinates, another_atom.GetCoordinates())
class Residue():
def __init__(self,id,name,parent):
self.__id=id
self.__name=name
self.__parent=parent
self.__Atoms=None
def __setitem__(self,id,Atom):
try:
self.__Atoms[id]=Atom
except:
self.__Atoms={}
self.__Atoms[id]=Atom
def GetID(self):
return self.__id
def GetName(self):
return self.__name
def GetParent(self):
return self.__parent
def GetAtoms(self):
for i in sorted(self.__Atoms.keys()):yield self.__Atoms[i]
def GetCAlpha(self):
return [i for i in self.GetAtoms() if i.AtomName=='CA'][0]
def GetCenterofGravity(self):
'''
NOTE: Yet to add the atomic masses.
'''
atoms=self.GetAtoms()
AtomicMass=1
XYZ_M=[0,0,0]
MassofAA=0
for i in atoms:
XYZ_M[0]+=i.Coordinates[0]*AtomicMass
XYZ_M[1]+=i.Coordinates[1]*AtomicMass
XYZ_M[2]+=i.Coordinates[2]*AtomicMass
MassofAA=MassofAA+AtomicMass
return numpy.array([i/MassofAA for i in XYZ_M])
def GetTipofAA(self):
CAlpha=self.GetCAlpha()
resname=self.GetName()
TipofAA=None
if(resname=='ALA' or resname=='GLY'):
TipofAA=CAlpha
else:
MaxDistance=0
for i in self.GetAtoms():
tempdistance=CAlpha.CalcDist(i)
if(tempdistance>MaxDistance):
MaxDistance=tempdistance
TipofAA=i
return TipofAA
class Chain():
def __init__(self,id):
self.__id=id
self.__Residues=None
def __setitem__(self,ResidueNumber,Residue):
try:
self.__Residues[ResidueNumber]=Residue
except:
self.__Residues={}
self.__Residues[ResidueNumber]=Residue
def __getitem__(self,ResidueNumber):
return self.__Residues[ResidueNumber]
def GetID(self):
return self.__id
def GetResidues(self):
for i in sorted(self.__Residues.keys()):yield self.__Residues[i]
class Model():
def __init__(self,id,AllAtoms,AllResidues,AllChains):
self.__id=id
self.__AllAtoms=AllAtoms
self.__AllResidues=AllResidues
self.__AllChains=AllChains
def __getitem__(self,ChainID):
return self.__AllChains[ChainID]
def GetChains(self):
for i in sorted(self.__AllChains.keys()):yield self.__AllChains[i]
def GetResidues(self):
for i in sorted(self.__AllResidues.keys()):yield self.__AllResidues[i]
def GetAtoms(self):
for i in sorted(self.__AllAtoms.keys()):yield self.__AllAtoms[i]
def GetChain(self,ChainID):
return self.__AllChains[ChainID]
class Protein():
def __init__(self,id,name,Models):
self.id=id
self.name=name
self.Models=Models
def __getitem__(self,ModelNumber):
return self.Models[ModelNumber]
def LoadPDB(filename):
Models=[]
start=0
fh=open(filename).read()
frames=fh.split('\nMODEL')
if(len(frames)>1):
start=1
else:
start=0
for FrameNumber,frame in enumerate(frames[start:]):
#Map
AllAtoms={}
AllResidues={}
AllChains={}
lines=frame.split('\n')
for _ in lines:
if(_[0:4]=='ATOM'):
#NOTE: MAPS CAN BE REMOVED SAFELY
#Chain Defined
ChainID=_[21]
if(ChainID not in AllChains.keys()):AllChains[ChainID]=Chain(ChainID)
#Residue Defined
ResidueNumber=int(_[22:26].strip())
ResidueName=_[17:20]
if(ResidueNumber not in AllResidues.keys()):AllResidues[ResidueNumber]=Residue(ResidueNumber,ResidueName,AllChains[ChainID])
#Residue Added to the chain
AllChains[ChainID].__setitem__(ResidueNumber,AllResidues[ResidueNumber])
#Atom Defined
id=int(_[6:11])
AtomName=_[12:16].strip()
Coordinates=numpy.array([float(_[30:38]),float(_[38:46]),float(_[46:54])])
Occupancy=float(_[54:60])
bfactor=float(_[60:66])
Element=_[76:78].strip()
Charge=_[78:80]
AllAtoms[id]=Atom(id,AtomName,Coordinates,Occupancy,bfactor,Element,Charge,AllResidues[ResidueNumber])
#Atom added to the residue
AllResidues[ResidueNumber].__setitem__(id,AllAtoms[id])
#What to do with these?
AlternateLocationIndicator=_[16]
CodeForInsertions=_[26]
SegmentIdentifier=_[72:76]
#print
Models.append(Model(FrameNumber,AllAtoms,AllResidues,AllChains))
return Protein(filename,None,Models)
#####-----#####-----#####-----#####-----#####-----
def DownloadPDB(filename):
'''
INFO: This class is used to download a PDB stucture from RCSB PDB
'''
import mechanize
br = mechanize.Browser()
response=br.open("https://files.rcsb.org/view/"+filename)
folderandfile='pdb_files/'+filename
open(folderandfile,'w').write(response.read())
return True
def IO():
'''
INFO: Argument parser to the program for now.
'''
parser=argparse.ArgumentParser()
parser.add_argument('filename',metavar='PDBID')
args=parser.parse_args()
return args
#'5mti.pdb'
def main():
filename=IO().filename
mol=LoadPDB('5mti.pdb')
#mol=LoadPDB('1ov9.pdb')
#Following will select 0th frame from NMR, will select chain A from it and will select 2nd Amino acid from it.
print mol[0]['A'].GetResidues()
#print mol[0]['A'][2].GetTipofAA().id,mol[0]['A'][2].GetCAlpha().id
return True
if(__name__=='__main__'):
main()
```
#### File: jkjium/pyMAVEN/protein.py
```python
import sys
import math
import copy
import numpy as np
from atom import atom
from AAmap import AAmap
from cluster import cluster
from ncg import ncg
import commp as cp
#from sklearn.cluster import spectral_clustering
#from scipy.sparse import coo_matrix
__all__=['protein']
class protein(object):
# read pdb from file
def __init__(self, pdbname, chain = 'all', top='', pfam='', center='CA', cutoff=5, scutoff=1, flag=0, desc='', nbcutoff=4):
self.atoms=[]
#dictionary for pairwise distance
self.pairwiseDict={}
self.clusters=[]
#pdb, top, pfam, str, pdbidx, seqheader, alignstr, alignidx, center, cutoff, scutoff, flag, desc
#self.pdb = pdbname[len(pdbname)-8:len(pdbname)-4]
self.pdbfile = pdbname
self.pdb = pdbname[:-4]
self.chain = chain
self.top = top
self.pfam = pfam
self.center = center
self.cutoff = cutoff
self.scutoff = scutoff
self.seqheader = self.pdb
self.flag = flag
self.desc = desc
self.nbcutoff = nbcutoff
self.ca = []
fin=open(pdbname, 'r')
lines=fin.readlines()
fin.close()
lastname =''
lastres = ''
aamap = AAmap()
for i in xrange(0,len(lines)):
line = lines[i]
# load only one model
if 'END' in line[0:6]:
break
if line[17:20].strip() not in aamap.AAA2A:
continue
if self.chain != 'all':
if (self.chain != line[21]):
continue
if line[0:6]=='ATOM ':
at = atom(lines[i])
if (at.name == lastname) and (at.resSeq == lastres):
#print '[%s]::alter loc:\n%s' % (self.pdbfile, lines[i])
#if (line[16]==' ' or line[16]=='A'): # to avoid alternative location
continue
else:
self.atoms.append(at)
if at.name.strip()=='CA':
self.ca.append(at)
lastname = at.name
lastres = at.resSeq
# map for Chain+Resi : (index in sequence, ResName)
# 'B529': (132, 'V')
self.resDict = {} # assigned in self.getSeq() function
# resAtoms, a list of lists, each (element) list contains atoms of residues
# resArray, gives a list of keys eg. (A,Q,70), (A,I,71), (A,V,72)
self.seq, self.resArray, self.resAtoms = self.getSeq()
# some residue does not have CA!! 1e6i.aln.pdb the last residue
#aamap = AAmap()
#self.seq = ''.join([aamap.getAAmap(a.resName) for a in self.ca])
# map for sequence index: Chain+Resi(ResName)
# 132 : 'B529(V)'
self.seqDict = {-1: '.'}
for r in self.resDict:
self.seqDict[self.resDict[r][0]] = '%s(%s)' % (r, self.resDict[r][1])
# get atom by atom name, ie. CA, CB, ...
def atomsbyname(self, aname):
return [a for a in self.atoms if a.name.strip() == aname]
# get geometrical center for each residue
def atomsbygmcenter(self):
ats = []
for al in self.resAtoms:
x=0.0
y=0.0
z=0.0
# save CA as a template
# get accumulative coordinates
for a in al:
x+=a.x
y+=a.y
z+=a.z
# replace geom center to template coordinate
# and save for output
reta = copy.copy(al[0])
reta.x = x/len(al)
reta.y = y/len(al)
reta.z = z/len(al)
ats.append(reta)
return ats
# get geometrical center for each residue side chain
# except for gly
def atomsbyscgmcenter(self):
bb = ['N', 'CA', 'C', 'O']
ats = []
for al in self.resAtoms:
x=0.0
y=0.0
z=0.0
# save CA as a template
# get accumulative coordinates
count = 0
if al[0].resName == 'GLY':
for a in al:
if a.name.strip() == 'CA':
count+=1
x=a.x
y=a.y
z=a.z
else:
for a in al:
#if a.name.strip() in bb and a.resName!='GLY':
if a.name.strip() in bb:
continue
count+=1
x+=a.x
y+=a.y
z+=a.z
# replace geom center to template coordinate
# and save for output
reta = copy.copy(al[0])
if count == 0:
cp._info('err:incomplete residue: %s %d %s' % (self.pdbfile, reta.resSeq, reta.resName))
continue
reta.x = x/count
reta.y = y/count
reta.z = z/count
ats.append(reta)
return ats
# output residue contact by dist cutoff and seqcutoff
# no redundancy
def contactbycutoff(self, atomset, cutoff, seqcutoff=0.0):
cgs = []
for i in xrange(0, len(atomset)):
for j in xrange(i+1, len(atomset)):
a = atomset[i]
b = atomset[j]
dist = np.linalg.norm(np.array((a.x, a.y, a.z))-np.array((b.x, b.y, b.z)))
if dist <= cutoff and abs(a.resSeq-b.resSeq) > seqcutoff:
cgs.append((a,b))
return cgs
# output residue contact by nearest neighbor
# redundant contact removed
def contactbynearest(self, atomset, size):
cgs = []
ncgArray = []
for a in atomset:
c = ncg(a, size)
ncgArray.append(c)
# grow by nearest neighbor
# remove duplicate
dup = set()
for c in ncgArray:
c.grow(atomset)
key = ' '.join(sorted(['%s%d' % (a.chainID, a.resSeq) for a in c.atoms]))
#if key in dup:
# print 'duplicate key: %s' % key
if key not in dup:
cgs.append(c.atoms)
dup.add(key)
return cgs
# print PDB content
def printPDB(self):
for i in xrange(0,len(self.atoms)):
a=self.atoms[i]
a.dump()
# print Coordinates
def printCoor(self):
for i in xrange(0,len(self.atoms)):
a=self.atoms[i]
print a.getCoor()
# return sequence extracted from pdb file
# assign values for self.resDict['B641'] = (seqpos, 'R')
def getSeq(self):
aamap = AAmap()
seq=''
#last_resSeq = -1 # 1a8v the first resi starts from -1 !!!!
last_resSeq = -9999 # 1a8v the first resi starts from -1 !!!!
seqPos = 0
resArray = []
resAtomsAll = []
resatoms = []
for i in xrange(0,len(self.atoms)):
a=self.atoms[i]
if last_resSeq != a.resSeq:
seq=seq+aamap.getAAmap(a.resName)
last_resSeq = a.resSeq
key = '%s%d' % (a.chainID, a.resSeq)
self.resDict[key] = (seqPos, seq[seqPos])
seqPos+=1
#resArray.append('%s %s %s' % (a.chainID,aamap.getAAmap(a.resName),str(a.resSeq)))
resArray.append((a.chainID,aamap.getAAmap(a.resName),a.resSeq))
if len(resatoms)>0:
resAtomsAll.append(resatoms)
resatoms=[]
resatoms.append(a)
# after loop add the last res into resatoms
# only resSeq change trigger adding above
if len(resatoms)>0:
resAtomsAll.append(resatoms)
return seq, resArray, resAtomsAll
# print PDB sequence into fasta file
def writeFASTA(self):
fafile = self.pdb+'.fa'
aamap = AAmap()
seq=''
count = 0
last_resSeq = -1
for i in xrange(0,len(self.atoms)):
a=self.atoms[i]
if last_resSeq != a.resSeq:
seq=seq+aamap.getAAmap(a.resName)
last_resSeq = a.resSeq
count+=1
seq=seq+'\n'
header = '>%s/1-%d\n' % (self.pdb, count)
print header+seq
fp=open(fafile, 'w')
fp.write(header+seq)
fp.close()
# extract all the CA atoms from the pdb file
def writeCA(self, filename, chain='all'):
fd=open(filename, 'w')
count=0
for i in xrange(0,len(self.atoms)):
a=self.atoms[i]
#a.dump()
if chain == 'all':
if 'CA' in a.name:
fd.write(a.writeAtom())
count=count+1
else:
if 'CA' in a.name and a.chainID == chain:
fd.write(a.writeAtom())
count=count+1
fd.close()
if count==0:
print "No atom written in [%s]!" % (filename)
# extract all the CA atoms in Chain A from pdb. Write to a file
def writeChainACA(self, filename):
fd=open(filename, 'w')
count=0
for i in xrange(0,len(self.atoms)):
a=self.atoms[i]
#a.dump()
if 'CA' in a.name and a.chainID=='A':
fd.write(a.writeAtom())
count=count+1
fd.close()
if count==0:
print "No atom written in [%s]!" % (filename)
# get tip atom list
#def atomsbytip(self, profile):
def atomsbytip(self):
profile = 'AAtips.py'
cgs=[]
# loading tip atoms records
fp=open(profile, 'r')
lines = fp.readlines()
fp.close()
AAtipDict={}
for i in xrange(0,len(lines)):
line = lines[i].strip()
AAstr = line.split(',')
AAname = AAstr[0]
AAtips = AAstr[2]
AAtipDict[AAname]=AAtips.split(' ')
# print '%s %s' % (AAname, AAtipDict[AAname])
# iterate all atoms
#fd=open(filename, 'w')
lastAtom = self.atoms[0]
currentResi = lastAtom.resSeq
matchCount=0
outputCount=0
X=0.0
Y=0.0
Z=0.0
isDone = 0
for i in xrange(0,len(self.atoms)):
a = copy.copy(self.atoms[i])
#if a.chainID!='A':
# continue
if a.resName not in AAtipDict:
print 'err:%s:: non AA atom %s %d [%s]]' % (self.pdb, a.resName, a.resSeq, a.name.strip())
continue
if a.resSeq == currentResi: # if in the same residue
if isDone == 0:
# check against all the tip atoms
for tipAtom in AAtipDict[a.resName]:
# summing up coordinates if matching
if a.name.strip()==tipAtom:
X+=a.x
Y+=a.y
Z+=a.z
matchCount+=1
# print '%s:: matching %s %d [%s] with [%s], matchCount: %d' % (self.pdb, a.resName, a.resSeq, a.name.strip(), tipAtom, matchCount)
# output final coordinates and change flow control variables
if matchCount == len(AAtipDict[a.resName]):
# print '%s:: matching complete %s %d' % (self.pdb, a.resName, a.resSeq)
a.x = X/matchCount
a.y = Y/matchCount
a.z = Z/matchCount
# print '[%s]\n' % a.writeAtom()
#fd.write(a.writeAtom())
cgs.append(a)
outputCount+=1
X=0.0
Y=0.0
Z=0.0
isDone = 1
# lastAtom = a
# print 'matchCount trace : %d' % (matchCount)
else: # residue number changed
if matchCount!=len(AAtipDict[lastAtom.resName]):
#print "%s:: Tip atom not found for [%d] [%s], use last atom [%s] instead. matchCount: %d, tipCount: %d" % (self.pdb, lastAtom.resSeq, lastAtom.resName, lastAtom.name, matchCount, len(AAtipDict[lastAtom.resName]))
#fd.write(lastAtom.writeAtom())
X=0.0
Y=0.0
Z=0.0
if a.name.strip()!='N':
print "err:%s:: No leading [N] for RESI [%d] [%s]" % (self.pdb, a.resSeq, a.resName)
currentResi = a.resSeq
matchCount=0
isDone=0
lastAtom = a # save last atom after all business' done
# for the last residue (there is no residue number change for it)
if matchCount!=len(AAtipDict[lastAtom.resName]):
print "err:%s:: Tip atom not found for [%d] [%s]" % (self.pdb, lastAtom.resSeq, lastAtom.resName)
#fd.write(a.writeAtom())
if outputCount==0:
print "err:No atom written from %s" % (self.pdb)
return cgs
# tip atom extraction
# not for chain A only
# def writeChainATips(self, profile, filename):
# this will change the original atoms!!!!
def writeTips(self, profile, filename):
# loading tip atoms records
fp=open(profile, 'r')
lines = fp.readlines()
fp.close()
AAtipDict={}
for i in xrange(0,len(lines)):
line = lines[i].strip()
AAstr = line.split(',')
AAname = AAstr[0]
AAtips = AAstr[2]
AAtipDict[AAname]=AAtips.split(' ')
# print '%s %s' % (AAname, AAtipDict[AAname])
# iterate all atoms
fd=open(filename, 'w')
lastAtom = self.atoms[0]
currentResi = lastAtom.resSeq
matchCount=0
outputCount=0
X=0.0
Y=0.0
Z=0.0
isDone = 0
for i in xrange(0,len(self.atoms)):
#a = self.atoms[i]
a = copy.copy(self.atoms[i])
#if a.chainID!='A':
# continue
if a.resName not in AAtipDict:
print '%s:: non AA atom %s %d [%s]]' % (self.pdb, a.resName, a.resSeq, a.name.strip())
continue
if a.resSeq == currentResi: # if in the same residue
if isDone == 0:
# check against all the tip atoms
for tipAtom in AAtipDict[a.resName]:
# summing up coordinates if matching
if a.name.strip()==tipAtom:
X+=a.x
Y+=a.y
Z+=a.z
matchCount+=1
# print '%s:: matching %s %d [%s] with [%s], matchCount: %d' % (self.pdb, a.resName, a.resSeq, a.name.strip(), tipAtom, matchCount)
# output final coordinates and change flow control variables
if matchCount == len(AAtipDict[a.resName]):
# print '%s:: matching complete %s %d' % (self.pdb, a.resName, a.resSeq)
a.x = X/matchCount
a.y = Y/matchCount
a.z = Z/matchCount
# print '[%s]\n' % a.writeAtom()
fd.write(a.writeAtom())
outputCount+=1
X=0.0
Y=0.0
Z=0.0
isDone = 1
# lastAtom = a
# print 'matchCount trace : %d' % (matchCount)
else: # residue number changed
if matchCount!=len(AAtipDict[lastAtom.resName]):
#print "%s:: Tip atom not found for [%d] [%s], use last atom [%s] instead. matchCount: %d, tipCount: %d" % (self.pdb, lastAtom.resSeq, lastAtom.resName, lastAtom.name, matchCount, len(AAtipDict[lastAtom.resName]))
#fd.write(lastAtom.writeAtom())
X=0.0
Y=0.0
Z=0.0
if a.name.strip()!='N':
print "%s:: No leading [N] for RESI [%d] [%s]" % (self.pdb, a.resSeq, a.resName)
currentResi = a.resSeq
matchCount=0
isDone=0
lastAtom = a # save last atom after all business' done
# for the last residue (there is no residue number change for it)
if matchCount!=len(AAtipDict[lastAtom.resName]):
print "%s:: Tip atom not found for [%d] [%s]" % (self.pdb, lastAtom.resSeq, lastAtom.resName)
#fd.write(a.writeAtom())
if outputCount==0:
print "No atom written from [%s]!" % (filename)
# write concise version of a tip file
# x,y,z,resi,resn
# input tip file
# output the concised version
def writeSPDB(self):
aamap = AAmap()
fo = open(self.pdbfile+'.spdb', 'w')
for a in self.atoms:
fo.write('%f %f %f %d %s\n' % (a.x, a.y, a.z, a.resSeq, aamap.getAAmap(a.resName)))
fo.close()
# use spectral clustering method to find residue contact groups
'''
def spectralClustering(self, cluster_size):
rowlist = []
collist = []
datalist = []
N = len(self.atoms)
for i in xrange(0,N):
v1=np.array((self.atoms[i].x, self.atoms[i].y, self.atoms[i].z))
for j in xrange(0,N):
v2 = np.array((self.atoms[j].x, self.atoms[j].y, self.atoms[j].z))
if i == j:
euclidean = 0.0
affinity = 5.0 # set a large value
else:
euclidean = np.linalg.norm(v1-v2)
affinity = 1/euclidean
key = "%d-%d" % (i,j)
self.pairwiseDict[key]= euclidean
rowlist.append(i)
collist.append(j)
datalist.append(affinity)
# prepare affinity matrix for clustering
row = np.array(rowlist)
col = np.array(collist)
data = np.array(datalist)
graph = coo_matrix((data, (row, col)), shape=(N, N))
labels = spectral_clustering(graph, n_clusters=int(N/cluster_size), eigen_solver='arpack')
amap = AAmap()
cluster2fid = {}
for index, lab in enumerate(labels) :
cluster2fid.setdefault(lab, [])
cluster2fid[lab].append(index)
for key in cluster2fid: # for each cluster
c=cluster(self.pdb, self.top, self.pfam, '', '', self.seqheader, '', '', self.center, self.cutoff, self.scutoff, self.flag, 1.0, self.desc)
for index in cluster2fid[key]:
c.addNeighbor(amap, self.atoms[index],index)
c.pdbidx=c.pdbidx.lstrip() # will change meanDist
c.pdbResSeq=c.pdbResSeq.lstrip()
meanDist = self.clusterMeanDist(c)
print ('%s,%0.2f,%s,%s,%s') % (self.pdb, meanDist, ''.join(sorted(c.str)), ''.join(sorted(c.typeStr)), c.pdbResSeq)
'''
# pairwise
# read all atom XXXX_A.pdb and get pairwise contact within a cutoff and a sequential distance
# output XXXX_A.res.csu_d
def pairContactbyCutoff(self, cutoff, seqdist):
print self.pdbfile
cid = self.pdbfile[5] # just for XXXX_A.pdb naming format
existDict = {}
fo = open(self.pdbfile[:-4]+'.res.csu_d', 'w')
for i in xrange(0, len(self.atoms)):
for j in xrange(0, len(self.atoms)):
a = self.atoms[i]
b = self.atoms[j]
if abs(a.resSeq - b.resSeq) <= seqdist or a.resSeq == b.resSeq:
continue
v1 = np.array((a.x, a.y, a.z))
v2 = np.array((b.x, b.y, b.z))
dist = np.linalg.norm(v1-v2)
key = '%d %d' % (a.resSeq, b.resSeq)
if key not in existDict and dist <= cutoff:
fo.write('%s\t%d%s\t%s\t%d%s\n' % (a.resName, a.resSeq, cid, b.resName, b.resSeq, cid))
existDict['%d %d' % (a.resSeq, b.resSeq)] = 1
existDict['%d %d' % (b.resSeq, a.resSeq)] = 1
fo.close()
# calculate pairwise distance
def pairwise(self):
for i in xrange(0,len(self.atoms)):
v1=np.array((self.atoms[i].x, self.atoms[i].y, self.atoms[i].z))
for j in xrange(0,len(self.atoms)):
key = "%d-%d" % (i,j)
v2 = np.array((self.atoms[j].x, self.atoms[j].y, self.atoms[j].z))
self.pairwiseDict[key]= np.linalg.norm(v1-v2)
# print pairwise distance between atom[i] and /other atoms
# index starts from 0
def getPairwiseOf(self, index):
a = self.atoms[index]
a.dump()
v1=np.array((a.x, a.y, a.z))
for j in xrange(0,len(self.atoms)):
key = "%d-%d" % (index,j)
v2 = np.array((self.atoms[j].x, self.atoms[j].y, self.atoms[j].z))
print "%s: [%f], [%d]" % (key, np.linalg.norm(v1-v2), abs(index-j))
# find clusters (centered by CA). Save all clusters in an array
def filterClusters(self):
if len(self.pairwiseDict)==0:
self.pairwise()
amap = AAmap()
for i in xrange(0,len(self.atoms)):
c=cluster(self.pdb, self.top, self.pfam, '', '', self.seqheader, '', '', self.center, self.cutoff, self.scutoff, self.flag, 1.0, self.desc)
c.addNeighbor(amap, self.atoms[i],i) # put itself in first
nbnum=0
for j in xrange(0,len(self.atoms)):
key= "%d-%d" % (i, j)
if (self.pairwiseDict[key] <= self.cutoff) and (abs(i-j) >= self.scutoff):
c.addNeighbor(amap, self.atoms[j], j)
nbnum=nbnum+1
c.thetaPhi.append(self.calculateThetaPhi(self.atoms[i], self.atoms[j]))
if nbnum<self.nbcutoff:
continue
c.pdbidx=c.pdbidx.lstrip() # will change meanDist
c.pdbResSeq=c.pdbResSeq.lstrip()
meanDist = self.clusterMeanDist(c)
if meanDist < 5.8:
print ('%s,%0.2f,%s,%s,%s,%s') % (self.pdb, meanDist, ''.join(sorted(c.str)), ''.join(sorted(c.typeStr)), c.pdbResSeq, self.getSphericalStr(c))
self.clusters.append(c)
#self.writeClusterPDB(('%s%d.pdb') % (self.pdb,len(self.clusters)), templateAtom, c.thetaPhi)
def calculateThetaPhi(self, at1, at2):
# v1 = np.array((at1.x, at1.y, at1.z))
# v2 = np.array((at2.x, at2.y, at2.z))
# v0 = (v1-v2)/np.linalg.norm(v1-v2)
#
# x = v0[0]
# y = v0[1]
# z = v0[2]
x=at1.x-at2.x
y=at1.y-at2.y
z=at1.z-at2.z
phi = math.atan2(z, math.sqrt(x*x+y*y))
th = math.atan2(y,x)
return (th, phi)
def getSphericalStr(self, c):
x=''
y=''
z=''
for item in c.thetaPhi:
x=('%s %0.4f') % (x,item[0])
y=('%s %0.4f') % (y,item[1])
z=('%s 0') % (z)
return (('%s%s%s,%d') % (x,y,z,len(c.thetaPhi))).lstrip()
# get mean pairwised distance for each atom in the cluster
# should filter those who has large mean dist value
# which means they are not real clusters
def clusterMeanDist(self,cl):
dist=0.0
count=0
idxArray=cl.pdbidx.split(' ')
for i in xrange(0, len(idxArray)):
for j in xrange(i+1, len(idxArray)):
count+=1
key='%s-%s' % (idxArray[i],idxArray[j])
dist+=self.pairwiseDict[key]
#print 'clusterMeanDist:: ', dist/count
if count==0:
return 0.0
return dist/count
# print a cluster object
def dumpClusters(self):
for i in xrange(0,len(self.clusters)):
a=self.atoms[i]
c=self.clusters[i]
print '++++++++++++++++++++++++++++++++++\n'
a.dump()
c.dump()
print '++++++++++++++++++++++++++++++++++\n'
# print protein object
def dump(self):
print ('pdb:[%s]\n' +
'top:[%s]\n' +
'pfam:[%s]\n' +
'center:[%s]\n'+
'cutoff:[%f]\n' +
'scutoff:[%d]\n' +
'seqheader:[%s]\n' +
'flag:[%d]\n' +
'desc:[%s]\n') % \
(
self.pdb,
self.top,
self.pfam,
self.center,
self.cutoff,
self.scutoff,
self.seqheader,
self.flag,
self.desc
)
def getClusterNum(self):
return len(self.clusters)
``` |
{
"source": "jkjk822/bazel-skylib",
"score": 2
} |
#### File: bazel-skylib/rules/native_binary.bzl
```python
load("//rules/private:copy_file_private.bzl", "copy_bash", "copy_cmd")
def _impl_rule(ctx, is_windows):
out = ctx.actions.declare_file(ctx.attr.out)
if is_windows:
copy_cmd(ctx, ctx.file.src, out)
else:
copy_bash(ctx, ctx.file.src, out)
return DefaultInfo(
executable = out,
files = depset([out]),
runfiles = ctx.runfiles(
files = [out],
collect_data = True,
collect_default = True,
),
)
def _impl(ctx):
return _impl_rule(ctx, ctx.attr.is_windows)
_ATTRS = {
"src": attr.label(
executable = True,
allow_single_file = True,
mandatory = True,
cfg = "host",
),
"data": attr.label_list(allow_files = True),
# "out" is attr.string instead of attr.output, so that it is select()'able.
"out": attr.string(mandatory = True),
"is_windows": attr.bool(mandatory = True),
}
_native_binary = rule(
implementation = _impl,
attrs = _ATTRS,
executable = True,
)
_native_test = rule(
implementation = _impl,
attrs = _ATTRS,
test = True,
)
def native_binary(name, src, out, data = None, **kwargs):
"""Wraps a pre-built binary or script with a binary rule.
You can "bazel run" this rule like any other binary rule, and use it as a tool in genrule.tools for example. You can also augment the binary with runfiles.
Args:
name: The name of the test rule.
src: label; path of the pre-built executable
out: output; an output name for the copy of the binary. (Bazel requires that this rule make a copy of 'src'.)
data: list of labels; data dependencies
**kwargs: The <a href="https://docs.bazel.build/versions/master/be/common-definitions.html#common-attributes-binaries">common attributes for binaries</a>.
"""
_native_binary(
name = name,
src = src,
out = out,
data = data,
is_windows = select({
"@bazel_tools//src/conditions:host_windows": True,
"//conditions:default": False,
}),
**kwargs
)
def native_test(name, src, out, data = None, **kwargs):
"""Wraps a pre-built binary or script with a test rule.
You can "bazel test" this rule like any other test rule. You can also augment the binary with
runfiles.
Args:
name: The name of the test rule.
src: label; path of the pre-built executable
out: output; an output name for the copy of the binary. (Bazel requires that this rule make a copy of 'src'.)
data: list of labels; data dependencies
**kwargs: The <a href="https://docs.bazel.build/versions/master/be/common-definitions.html#common-attributes-tests">common attributes for tests</a>.
"""
_native_test(
name = name,
src = src,
out = out,
data = data,
is_windows = select({
"@bazel_tools//src/conditions:host_windows": True,
"//conditions:default": False,
}),
**kwargs
)
``` |
{
"source": "jkjkiiiii/PaddleHub",
"score": 2
} |
#### File: paddlehub/module/manager.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from functools import cmp_to_key
import tarfile
import sys
import importlib
import inspect
import paddlehub as hub
from paddlehub.common import utils
from paddlehub.common.downloader import default_downloader
from paddlehub.common.dir import MODULE_HOME
from paddlehub.common.cml_utils import paint_modules_info
from paddlehub.common.logger import logger
from paddlehub.common import tmp_dir
from paddlehub.module import module_desc_pb2
from paddlehub.version import hub_version as sys_hub_verion
from paddle import __version__ as sys_paddle_version
class LocalModuleManager(object):
def __init__(self, module_home=None):
self.local_modules_dir = module_home if module_home else MODULE_HOME
self.modules_dict = {}
if not os.path.exists(self.local_modules_dir):
utils.mkdir(self.local_modules_dir)
elif os.path.isfile(self.local_modules_dir):
raise ValueError("Module home should be a folder, not a file")
def check_module_valid(self, module_path):
try:
desc_pb_path = os.path.join(module_path, 'module_desc.pb')
if os.path.exists(desc_pb_path) and os.path.isfile(desc_pb_path):
info = {}
desc = module_desc_pb2.ModuleDesc()
with open(desc_pb_path, "rb") as fp:
desc.ParseFromString(fp.read())
info['version'] = desc.attr.map.data["module_info"].map.data[
"version"].s
info['name'] = desc.attr.map.data["module_info"].map.data[
"name"].s
return True, info
else:
module_file = os.path.realpath(
os.path.join(module_path, 'module.py'))
if os.path.exists(module_file):
basename = os.path.split(module_path)[-1]
dirname = os.path.join(
*list(os.path.split(module_path)[:-1]))
sys.path.insert(0, dirname)
_module = importlib.import_module(
"{}.module".format(basename))
for _item, _cls in inspect.getmembers(
_module, inspect.isclass):
_item = _module.__dict__[_item]
_file = os.path.realpath(
sys.modules[_item.__module__].__file__)
if issubclass(
_item,
hub.Module) and _file.startswith(module_file):
version = _item._version
break
sys.path.pop(0)
return True, {'version': version, 'name': _item._name}
logger.warning(
"%s does not exist, the module will be reinstalled" %
desc_pb_path)
except:
pass
return False, None
def all_modules(self, update=False):
if not update and self.modules_dict:
return self.modules_dict
self.modules_dict = {}
for sub_dir_name in os.listdir(self.local_modules_dir):
sub_dir_path = os.path.join(self.local_modules_dir, sub_dir_name)
if os.path.isdir(sub_dir_path):
if "-" in sub_dir_path:
new_sub_dir_path = sub_dir_path.replace("-", "_")
shutil.move(sub_dir_path, new_sub_dir_path)
sub_dir_path = new_sub_dir_path
valid, info = self.check_module_valid(sub_dir_path)
if valid:
module_name = info['name']
self.modules_dict[module_name] = (sub_dir_path,
info['version'])
return self.modules_dict
def search_module(self, module_name, module_version=None, update=False):
self.all_modules(update=update)
return self.modules_dict.get(module_name, None)
def install_module(self,
module_name=None,
module_dir=None,
module_package=None,
module_version=None,
upgrade=False,
extra=None):
md5_value = installed_module_version = None
from_user_dir = True if module_dir else False
with tmp_dir() as _dir:
if module_name:
self.all_modules(update=True)
module_info = self.modules_dict.get(module_name, None)
if module_info:
if not module_version or module_version == self.modules_dict[
module_name][1]:
module_dir = self.modules_dict[module_name][0]
module_tag = module_name if not module_version else '%s-%s' % (
module_name, module_version)
tips = "Module %s already installed in %s" % (
module_tag, module_dir)
return True, tips, self.modules_dict[module_name]
search_result = hub.HubServer().get_module_url(
module_name, version=module_version, extra=extra)
name = search_result.get('name', None)
url = search_result.get('url', None)
md5_value = search_result.get('md5', None)
installed_module_version = search_result.get('version', None)
if not url or (module_version is not None
and installed_module_version != module_version
) or (name != module_name):
if hub.HubServer()._server_check() is False:
tips = "Request Hub-Server unsuccessfully, please check your network."
return False, tips, None
module_versions_info = hub.HubServer().search_module_info(
module_name)
if module_versions_info is None:
tips = "Can't find module %s, please check your spelling." \
% (module_name)
elif module_version is not None and module_version not in [
item[1] for item in module_versions_info
]:
tips = "Can't find module %s with version %s, all versions are listed below." \
% (module_name, module_version)
tips += paint_modules_info(module_versions_info)
else:
tips = "The version of PaddlePaddle(%s) or PaddleHub(%s) can not match module, please upgrade your PaddlePaddle or PaddleHub according to the form below." \
% (sys_paddle_version, sys_hub_verion)
tips += paint_modules_info(module_versions_info)
return False, tips, None
result, tips, module_zip_file = default_downloader.download_file(
url=url,
save_path=_dir,
save_name=module_name,
replace=True,
print_progress=True)
result, tips, module_dir = default_downloader.uncompress(
file=module_zip_file,
dirname=os.path.join(_dir, "tmp_module"),
delete_file=True,
print_progress=True)
if module_package:
with tarfile.open(module_package, "r:gz") as tar:
file_names = tar.getnames()
size = len(file_names) - 1
module_dir = os.path.join(_dir, file_names[0])
for index, file_name in enumerate(file_names):
tar.extract(file_name, _dir)
if "-" in module_dir:
new_module_dir = module_dir.replace("-", "_")
shutil.move(module_dir, new_module_dir)
module_dir = new_module_dir
module_name = hub.Module(directory=module_dir).name
if from_user_dir:
module_name = hub.Module(directory=module_dir).name
module_version = hub.Module(directory=module_dir).version
self.all_modules(update=False)
module_info = self.modules_dict.get(module_name, None)
if module_info:
if module_version == module_info[1]:
module_dir = self.modules_dict[module_name][0]
module_tag = module_name if not module_version else '%s-%s' % (
module_name, module_version)
tips = "Module %s already installed in %s" % (
module_tag, module_dir)
return True, tips, self.modules_dict[module_name]
if module_dir:
if md5_value:
with open(
os.path.join(MODULE_HOME, module_dir, "md5.txt"),
"w") as fp:
fp.write(md5_value)
save_path = os.path.join(MODULE_HOME,
module_name.replace("-", "_"))
if save_path != module_dir:
if os.path.exists(save_path):
shutil.rmtree(save_path)
if from_user_dir:
shutil.copytree(module_dir, save_path)
else:
shutil.move(module_dir, save_path)
module_dir = save_path
tips = "Successfully installed %s" % module_name
if installed_module_version:
tips += "-%s" % installed_module_version
return True, tips, (module_dir, installed_module_version)
tips = "Download %s-%s failed" % (module_name, module_version)
return False, tips, module_dir
def uninstall_module(self, module_name, module_version=None):
self.all_modules(update=True)
if not module_name in self.modules_dict:
tips = "%s is not installed" % module_name
return True, tips
if module_version and module_version != self.modules_dict[module_name][
1]:
tips = "%s-%s is not installed" % (module_name, module_version)
return True, tips
tips = "Successfully uninstalled %s" % module_name
if module_version:
tips += '-%s' % module_version
module_dir = self.modules_dict[module_name][0]
shutil.rmtree(module_dir)
return True, tips
default_module_manager = LocalModuleManager()
``` |
{
"source": "jkjkjkjkjk/pywikibot-core",
"score": 3
} |
#### File: jkjkjkjkjk/pywikibot-core/generate_family_file.py
```python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
#
# (C) <NAME>, 2010-2013
# (C) Pywikibot team, 2010-2015
#
# Distributed under the terms of the MIT license
#
__version__ = '$Id: 81013ef7ef131adfec131ac6f657a3856b7e9356 $'
#
# system imports
import codecs
import os
import sys
# creating & retrieving urls
if sys.version_info[0] > 2:
from urllib.parse import urlparse
raw_input = input
else:
from urlparse import urlparse
# Disable user-config checks so the family can be created first,
# and then used when generating the user-config
_orig_no_user_config = os.environ.get('PYWIKIBOT2_NO_USER_CONFIG') # noqa
os.environ['PYWIKIBOT2_NO_USER_CONFIG'] = '2' # noqa
from pywikibot.site_detect import MWSite as Wiki
# Reset this flag in case another script is run by pwb after this script
if not _orig_no_user_config:
del os.environ['PYWIKIBOT2_NO_USER_CONFIG']
else:
os.environ['PYWIKIBOT2_NO_USER_CONFIG'] = _orig_no_user_config
class FamilyFileGenerator(object):
"""Family file creator."""
def __init__(self, url=None, name=None, dointerwiki=None):
"""Constructor."""
if url is None:
url = raw_input("Please insert URL to wiki: ")
if name is None:
name = raw_input("Please insert a short name (eg: freeciv): ")
self.dointerwiki = dointerwiki
self.base_url = url
self.name = name
self.wikis = {} # {'https://wiki/$1': Wiki('https://wiki/$1'), ...}
self.langs = [] # [Wiki('https://wiki/$1'), ...]
def run(self):
"""Main method, generate family file."""
print("Generating family file from %s" % self.base_url)
w = Wiki(self.base_url)
self.wikis[w.iwpath] = w
print()
print("==================================")
print("api url: %s" % w.api)
print("MediaWiki version: %s" % w.version)
print("==================================")
print()
self.getlangs(w)
self.getapis()
self.writefile()
def getlangs(self, w):
"""Determine language of a site."""
print("Determining other languages...", end="")
try:
self.langs = w.langs
print(u' '.join(sorted([wiki[u'prefix'] for wiki in self.langs])))
except Exception as e:
self.langs = []
print(e, "; continuing...")
if len([lang for lang in self.langs if lang['url'] == w.iwpath]) == 0:
self.langs.append({u'language': w.lang,
u'local': u'',
u'prefix': w.lang,
u'url': w.iwpath})
if len(self.langs) > 1:
if self.dointerwiki is None:
makeiw = raw_input(
"\nThere are %i languages available."
"\nDo you want to generate interwiki links?"
"This might take a long time. ([y]es/[N]o/[e]dit)"
% len(self.langs)).lower()
else:
makeiw = self.dointerwiki
if makeiw == "y":
pass
elif makeiw == "e":
for wiki in self.langs:
print(wiki['prefix'], wiki['url'])
do_langs = raw_input("Which languages do you want: ")
self.langs = [wiki for wiki in self.langs
if wiki['prefix'] in do_langs or
wiki['url'] == w.iwpath]
else:
self.langs = [wiki for wiki in self.langs
if wiki[u'url'] == w.iwpath]
def getapis(self):
"""Load other language pages."""
print("Loading wikis... ")
for lang in self.langs:
print(" * %s... " % (lang[u'prefix']), end="")
if lang[u'url'] not in self.wikis:
try:
self.wikis[lang[u'url']] = Wiki(lang[u'url'])
print("downloaded")
except Exception as e:
print(e)
else:
print("in cache")
def writefile(self):
"""Write the family file."""
fn = "pywikibot/families/%s_family.py" % self.name
print("Writing %s... " % fn)
try:
open(fn)
if raw_input("%s already exists. Overwrite? (y/n)"
% fn).lower() == 'n':
print("Terminating.")
sys.exit(1)
except IOError: # file not found
pass
f = codecs.open(fn, 'w', 'utf-8')
f.write("""
# -*- coding: utf-8 -*-
\"\"\"
This family file was auto-generated by $Id: 81013ef7ef131adfec131ac6f657a3856b7e9356 $
Configuration parameters:
url = %(url)s
name = %(name)s
Please do not commit this to the Git repository!
\"\"\"
from pywikibot import family
from pywikibot.tools import deprecated
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = '%(name)s'
self.langs = {
""".lstrip() % {'url': self.base_url, 'name': self.name})
for w in self.wikis.values():
f.write(" '%(lang)s': '%(hostname)s',\n"
% {'lang': w.lang, 'hostname': urlparse(w.server).netloc})
f.write(" }\n\n")
f.write(" def scriptpath(self, code):\n")
f.write(" return {\n")
for w in self.wikis.values():
f.write(" '%(lang)s': '%(path)s',\n"
% {'lang': w.lang, 'path': w.scriptpath})
f.write(" }[code]\n")
f.write("\n")
f.write(" @deprecated('APISite.version()')\n")
f.write(" def version(self, code):\n")
f.write(" return {\n")
for w in self.wikis.values():
if w.version is None:
f.write(" '%(lang)s': None,\n" % {'lang': w.lang})
else:
f.write(" '%(lang)s': u'%(ver)s',\n"
% {'lang': w.lang, 'ver': w.version})
f.write(" }[code]\n")
f.write("\n")
f.write(" def protocol(self, code):\n")
f.write(" return {\n")
for w in self.wikis.values():
f.write(" '%(lang)s': u'%(protocol)s',\n"
% {'lang': w.lang, 'protocol': urlparse(w.server).scheme})
f.write(" }[code]\n")
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: %s <url> <short name>" % sys.argv[0])
print("Example: %s https://www.mywiki.bogus/wiki/Main_Page mywiki"
% sys.argv[0])
print("This will create the file families/mywiki_family.py")
FamilyFileGenerator(*sys.argv[1:]).run()
```
#### File: pywikibot/families/wikivoyage_family.py
```python
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 3deafc7abd176ec6341da41f33d9524fbe88ca14 $'
# The new wikivoyage family that is hosted at wikimedia
from pywikibot import family
class Family(family.SubdomainFamily, family.WikimediaFamily):
"""Family class for Wikivoyage."""
name = 'wikivoyage'
def __init__(self):
"""Constructor."""
self.languages_by_size = [
'en', 'de', 'fa', 'it', 'fr', 'ru', 'nl', 'pt', 'pl', 'he', 'es',
'vi', 'sv', 'zh', 'ro', 'el', 'uk',
]
super(Family, self).__init__()
# Global bot allowed languages on
# https://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = ['es', 'ru', ]
```
#### File: pywikibot/userinterfaces/transliteration.py
```python
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 1c6f6d015f45c24c8e90a777d4543000efadf230 $'
class transliterator(object):
"""Class to transliterating text."""
def __init__(self, encoding):
"""
Initialize the transliteration mapping.
@param encoding: the encoding available. Any transliterated character
which can't be mapped, will be removed from the mapping.
@type encoding: str
"""
self.trans = {}
for char in u"ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ":
self.trans[char] = u"A"
for char in u"ȀǞ":
self.trans[char] = u"Ä"
self.trans[u"Ǻ"] = u"Å"
self.trans[u"Ä"] = u"Ae"
self.trans[u"Å"] = u"Aa"
for char in u"àáâầấẫẩậãāăằắẵặẳȧǡạḁȃąǎảẚ":
self.trans[char] = u"a"
for char in u"ȁǟ":
self.trans[char] = u"ä"
self.trans[u"ǻ"] = u"å"
self.trans[u"ä"] = u"ae"
self.trans[u"å"] = u"aa"
for char in u"ḂḄḆƁƂ":
self.trans[char] = u"B"
for char in u"ḃḅḇƀɓƃ":
self.trans[char] = u"b"
for char in u"ĆĈĊÇČƇ":
self.trans[char] = u"C"
for char in u"ćĉċçčƈȼ":
self.trans[char] = u"c"
self.trans[u"Ḉ"] = u"Ç"
self.trans[u"ḉ"] = u"ç"
self.trans[u"Ð"] = u"Dh"
self.trans[u"ð"] = u"dh"
for char in u"ĎḊḌḎḐḒĐƉƊƋ":
self.trans[char] = u"D"
for char in u"ďḋḍḏḑḓđɖɗƌ":
self.trans[char] = u"d"
for char in u"ÈȄÉÊḚËĒḔḖĔĖẸE̩ȆȨḜĘĚẼḘẺ":
self.trans[char] = u"E"
for char in u"ỀẾỄỆỂ":
self.trans[char] = u"Ê"
for char in u"èȅéêḛëēḕḗĕėẹe̩ȇȩḝęěẽḙẻ":
self.trans[char] = u"e"
for char in u"ềếễệể":
self.trans[char] = u"ê"
for char in u"ḞƑ":
self.trans[char] = u"F"
for char in u"ḟƒ":
self.trans[char] = u"f"
for char in u"ǴḠĞĠĢǦǤƓ":
self.trans[char] = u"G"
for char in u"ǵḡğġģǧǥɠ":
self.trans[char] = u"g"
self.trans[u"Ĝ"] = u"Gx"
self.trans[u"ĝ"] = u"gx"
for char in u"ḢḤḦȞḨḪH̱ĦǶ":
self.trans[char] = u"H"
for char in u"ḣḥḧȟḩḫ̱ẖħƕ":
self.trans[char] = u"h"
for char in u"IÌȈÍÎĨḬÏḮĪĬȊĮǏİỊỈƗ":
self.trans[char] = u"I"
for char in u"ıìȉíîĩḭïḯīĭȋįǐiịỉɨ":
self.trans[char] = u"i"
for char in u"ĴJ":
self.trans[char] = u"J"
for char in u"ɟĵ̌ǰ":
self.trans[char] = u"j"
for char in u"ḰǨĶḲḴƘ":
self.trans[char] = u"K"
for char in u"ḱǩķḳḵƙ":
self.trans[char] = u"k"
for char in u"ĹĻĽḶḸḺḼȽŁ":
self.trans[char] = u"L"
for char in u"ĺļľḷḹḻḽƚłɫ":
self.trans[char] = u"l"
for char in u"ḾṀṂ":
self.trans[char] = u"M"
for char in u"ḿṁṃɱ":
self.trans[char] = u"m"
for char in u"ǸŃÑŅŇṄṆṈṊŊƝɲȠ":
self.trans[char] = u"N"
for char in u"ǹńñņňṅṇṉṋŋɲƞ":
self.trans[char] = u"n"
for char in u"ÒÓÔÕṌṎȬÖŌṐṒŎǑȮȰỌǪǬƠỜỚỠỢỞỎƟØǾ":
self.trans[char] = u"O"
for char in u"òóôõṍṏȭöōṑṓŏǒȯȱọǫǭơờớỡợởỏɵøǿ":
self.trans[char] = u"o"
for char in u"ȌŐȪ":
self.trans[char] = u"Ö"
for char in u"ȍőȫ":
self.trans[char] = u"ö"
for char in u"ỒỐỖỘỔȎ":
self.trans[char] = u"Ô"
for char in u"ồốỗộổȏ":
self.trans[char] = u"ô"
for char in u"ṔṖƤ":
self.trans[char] = u"P"
for char in u"ṕṗƥ":
self.trans[char] = u"p"
self.trans[u"ᵽ"] = u"q"
for char in u"ȐŔŖŘȒṘṚṜṞ":
self.trans[char] = u"R"
for char in u"ȑŕŗřȓṙṛṝṟɽ":
self.trans[char] = u"r"
for char in u"ŚṤŞȘŠṦṠṢṨ":
self.trans[char] = u"S"
for char in u"śṥşșšṧṡṣṩȿ":
self.trans[char] = u"s"
self.trans[u"Ŝ"] = u"Sx"
self.trans[u"ŝ"] = u"sx"
for char in u"ŢȚŤṪṬṮṰŦƬƮ":
self.trans[char] = u"T"
for char in u"ţțťṫṭṯṱŧȾƭʈ":
self.trans[char] = u"t"
for char in u"ÙÚŨṸṴÜṲŪṺŬỤŮŲǓṶỦƯỮỰỬ":
self.trans[char] = u"U"
for char in u"ùúũṹṵüṳūṻŭụůųǔṷủưữựửʉ":
self.trans[char] = u"u"
for char in u"ȔŰǛǗǕǙ":
self.trans[char] = u"Ü"
for char in u"ȕűǜǘǖǚ":
self.trans[char] = u"ü"
self.trans[u"Û"] = u"Ux"
self.trans[u"û"] = u"ux"
self.trans[u"Ȗ"] = u"Û"
self.trans[u"ȗ"] = u"û"
self.trans[u"Ừ"] = u"Ù"
self.trans[u"ừ"] = u"ù"
self.trans[u"Ứ"] = u"Ú"
self.trans[u"ứ"] = u"ú"
for char in u"ṼṾ":
self.trans[char] = u"V"
for char in u"ṽṿ":
self.trans[char] = u"v"
for char in u"ẀẂŴẄẆẈ":
self.trans[char] = u"W"
for char in u"ẁẃŵẅẇẉ":
self.trans[char] = u"w"
for char in u"ẊẌ":
self.trans[char] = u"X"
for char in u"ẋẍ":
self.trans[char] = u"x"
for char in u"ỲÝŶŸỸȲẎỴỶƳ":
self.trans[char] = u"Y"
for char in u"ỳýŷÿỹȳẏỵỷƴ":
self.trans[char] = u"y"
for char in u"ŹẐŻẒŽẔƵȤ":
self.trans[char] = u"Z"
for char in u"źẑżẓžẕƶȥ":
self.trans[char] = u"z"
self.trans[u"ɀ"] = u"zv"
# Latin: extended Latin alphabet
self.trans[u"ɑ"] = u"a"
for char in u"ÆǼǢ":
self.trans[char] = u"AE"
for char in u"æǽǣ":
self.trans[char] = u"ae"
self.trans[u"Ð"] = u"Dh"
self.trans[u"ð"] = u"dh"
for char in u"ƎƏƐ":
self.trans[char] = u"E"
for char in u"ǝəɛ":
self.trans[char] = u"e"
for char in u"ƔƢ":
self.trans[char] = u"G"
for char in u"ᵷɣƣᵹ":
self.trans[char] = u"g"
self.trans[u"Ƅ"] = u"H"
self.trans[u"ƅ"] = u"h"
self.trans[u"Ƕ"] = u"Wh"
self.trans[u"ƕ"] = u"wh"
self.trans[u"Ɩ"] = u"I"
self.trans[u"ɩ"] = u"i"
self.trans[u"Ŋ"] = u"Ng"
self.trans[u"ŋ"] = u"ng"
self.trans[u"Œ"] = u"OE"
self.trans[u"œ"] = u"oe"
self.trans[u"Ɔ"] = u"O"
self.trans[u"ɔ"] = u"o"
self.trans[u"Ȣ"] = u"Ou"
self.trans[u"ȣ"] = u"ou"
self.trans[u"Ƽ"] = u"Q"
for char in u"ĸƽ":
self.trans[char] = u"q"
self.trans[u"ȹ"] = u"qp"
self.trans[u""] = u"r"
self.trans[u"ſ"] = u"s"
self.trans[u"ß"] = u"ss"
self.trans[u"Ʃ"] = u"Sh"
for char in u"ʃᶋ":
self.trans[char] = u"sh"
self.trans[u"Ʉ"] = u"U"
self.trans[u"ʉ"] = u"u"
self.trans[u"Ʌ"] = u"V"
self.trans[u"ʌ"] = u"v"
for char in u"ƜǷ":
self.trans[char] = u"W"
for char in u"ɯƿ":
self.trans[char] = u"w"
self.trans[u"Ȝ"] = u"Y"
self.trans[u"ȝ"] = u"y"
self.trans[u"IJ"] = u"IJ"
self.trans[u"ij"] = u"ij"
self.trans[u"Ƨ"] = u"Z"
for char in u"ʮƨ":
self.trans[char] = u"z"
self.trans[u"Ʒ"] = u"Zh"
self.trans[u"ʒ"] = u"zh"
self.trans[u"Ǯ"] = u"Dzh"
self.trans[u"ǯ"] = u"dzh"
for char in u"ƸƹʔˀɁɂ":
self.trans[char] = u"'"
for char in u"Þ":
self.trans[char] = u"Th"
for char in u"þ":
self.trans[char] = u"th"
for char in u"Cʗǃ":
self.trans[char] = u"!"
# Punctuation and typography
for char in u"«»“”„¨":
self.trans[char] = u'"'
for char in u"‘’′":
self.trans[char] = u"'"
self.trans[u"•"] = u"*"
self.trans[u"@"] = u"(at)"
self.trans[u"¤"] = u"$"
self.trans[u"¢"] = u"c"
self.trans[u"€"] = u"E"
self.trans[u"£"] = u"L"
self.trans[u"¥"] = u"yen"
self.trans[u"†"] = u"+"
self.trans[u"‡"] = u"++"
self.trans[u"°"] = u":"
self.trans[u"¡"] = u"!"
self.trans[u"¿"] = u"?"
self.trans[u"‰"] = u"o/oo"
self.trans[u"‱"] = u"o/ooo"
for char in u"¶§":
self.trans[char] = u">"
for char in u"…":
self.trans[char] = u"..."
for char in u"‒–—―":
self.trans[char] = u"-"
for char in u"·":
self.trans[char] = u" "
self.trans[u"¦"] = u"|"
self.trans[u"⁂"] = u"***"
self.trans[u"◊"] = u"<>"
self.trans[u"‽"] = u"?!"
self.trans[u"؟"] = u";-)"
self.trans[u"¹"] = u"1"
self.trans[u"²"] = u"2"
self.trans[u"³"] = u"3"
# Cyrillic
self.trans.update({u"А": u"A", u"а": u"a", u"Б": u"B", u"б": u"b",
u"В": u"V", u"в": u"v", u"Г": u"G", u"г": u"g",
u"Д": u"D", u"д": u"d", u"Е": u"E", u"е": u"e",
u"Ж": u"Zh", u"ж": u"zh", u"З": u"Z", u"з": u"z",
u"И": u"I", u"и": u"i", u"Й": u"J", u"й": u"j",
u"К": u"K", u"к": u"k", u"Л": u"L", u"л": u"l",
u"М": u"M", u"м": u"m", u"Н": u"N", u"н": u"n",
u"О": u"O", u"о": u"o", u"П": u"P", u"п": u"p",
u"Р": u"R", u"р": u"r", u"С": u"S", u"с": u"s",
u"Т": u"T", u"т": u"t", u"У": u"U", u"у": u"u",
u"Ф": u"F", u"ф": u"f", u"х": u"kh", u"Ц": u"C",
u"ц": u"c", u"Ч": u"Ch", u"ч": u"ch", u"Ш": u"Sh",
u"ш": u"sh", u"Щ": u"Shch", u"щ": u"shch", u"Ь": u"'",
u"ь": "'", u"Ъ": u'"', u"ъ": '"', u"Ю": u"Yu",
u"ю": u"yu", u"Я": u"Ya", u"я": u"ya", u"Х": u"Kh",
u"Χ": u"Kh"})
# Additional Cyrillic letters, most occuring in only one or a few languages
self.trans.update({u"Ы": u"Y", u"ы": u"y", u"Ё": u"Ë", u"ё": u"ë",
u"Э": u"È", u"Ѐ": u"È", u"э": u"è", u"ѐ": u"è",
u"І": u"I", u"і": u"i", u"Ї": u"Ji", u"ї": u"ji",
u"Є": u"Je", u"є": u"je", u"Ґ": u"G", u"Ҝ": u"G",
u"ґ": u"g", u"ҝ": u"g", u"Ђ": u"Dj", u"ђ": u"dj",
u"Ӣ": u"Y", u"ӣ": u"y", u"Љ": u"Lj", u"љ": u"lj",
u"Њ": u"Nj", u"њ": u"nj", u"Ћ": u"Cj", u"ћ": u"cj",
u"Җ": u"Zhj", u"җ": u"zhj", u"Ѓ": u"Gj", u"ѓ": u"gj",
u"Ќ": u"Kj", u"ќ": u"kj", u"Ӣ": u"Ii", u"ӣ": u"ii",
u"Ӯ": u"U", u"ӯ": u"u", u"Ҳ": u"H", u"ҳ": u"h",
u"Ҷ": u"Dz", u"ҷ": u"dz", u"Ө": u"Ô", u"Ӫ": u"Ô",
u"ө": u"ô", u"ӫ": u"ô", u"Ү": u"Y", u"ү": u"y", u"Һ": u"H",
u"һ": u"h", u"Ә": u"AE", u"Ӕ": u"AE", u"ә": u"ae",
u"Ӛ": u"Ë", u"Ӭ": u"Ë", u"ӛ": u"ë", u"ӭ": u"ë", u"Җ": u"Zhj",
u"җ": u"zhj", u"Ұ": u"U", u"ұ": u"u", u"ў": u"ù", u"Ў": u"Ù",
u"ѝ": u"ì", u"Ѝ": u"Ì", u"Ӑ": u"A", u"ă": u"a", u"Ӓ": u"Ä",
u"ҿ": u"ä", u"Ҽ": u"Ts", u"Ҿ": u"Ts", u"ҽ": u"ts", u"ҿ": u"ts",
u"Ҙ": u"Dh", u"ҙ": u"dh", u"Ӏ": u"", u"ӏ": u"", u"Ӆ": u"L",
u"ӆ": u"l", u"Ӎ": u"M", u"ӎ": u"m", u"Ӧ": u"Ö", u"ӧ": u"ö",
u"Ҩ": u"u", u"ҩ": u"u", u"Ҧ": u"Ph", u"ҧ": u"ph", u"Ҏ": u"R",
u"ҏ": u"r", u"Ҫ": u"Th", u"ҫ": u"th", u"Ҭ": u"T", u"ҭ": u"t",
u"Ӯ": u"Û", u"ӯ": u"û", u"Ұ": u"U", u"Ӹ": u"U", u"ұ": u"u",
u"ӹ": u"u", u"Ҵ": u"Tts", u"ҵ": u"tts", u"Ӵ": u"Ch", u"ӵ": u"ch"})
for char in u"ЈӤҊ":
self.trans[char] = u"J"
for char in u"јӥҋ":
self.trans[char] = u"j"
for char in u"ЏӁӜҶ":
self.trans[char] = u"Dzh"
for char in u"џӂӝҷ":
self.trans[char] = u"dzh"
for char in u"ЅӞӠӋҸ":
self.trans[char] = u"Dz"
for char in u"ѕӟӡӌҹ":
self.trans[char] = u"dz"
for char in u"ҒӶҔ":
self.trans[char] = u"G"
for char in u"ғӷҕ":
self.trans[char] = u"g"
for char in u"ҚҞҠӃ":
self.trans[char] = u"Q"
for char in u"қҟҡӄ":
self.trans[char] = u"q"
for char in u"ҢҤӉӇ":
self.trans[char] = u"Ng"
for char in u"ңҥӊӈ":
self.trans[char] = u"ng"
for char in u"ӖѢҌ":
self.trans[char] = u"E"
for char in u"ӗѣҍ":
self.trans[char] = u"e"
for char in u"ӲӰҮ":
self.trans[char] = u"Ü"
for char in u"ӳӱү":
self.trans[char] = u"ü"
# Archaic Cyrillic letters
self.trans.update({u"Ѹ": u"Ou", u"ѹ": u"ou", u"Ѡ": u"O", u"Ѻ": u"O", u"ѡ": u"o",
u"ѻ": u"o", u"Ѿ": u"Ot", u"ѿ": u"ot", u"Ѣ": u"E", u"ѣ": u"e",
u"Ѥ": u"Ei", u"Ѧ": u"Ei", u"ѥ": u"ei", u"ѧ": u"ei", u"Ѫ": u"Ai",
u"ѫ": u"ai", u"Ѯ": u"X", u"ѯ": u"x", u"Ѱ": u"Ps", u"ѱ": u"ps",
u"Ѳ": u"Th", u"ѳ": u"th", u"Ѵ": u"Ü", u"Ѷ": u"Ü", u"ѵ": u"ü"})
# Hebrew alphabet
for char in u"אע":
self.trans[char] = u"'"
self.trans[u"ב"] = u"b"
self.trans[u"ג"] = u"g"
self.trans[u"ד"] = u"d"
self.trans[u"ה"] = u"h"
self.trans[u"ו"] = u"v"
self.trans[u"ז"] = u"z"
self.trans[u"ח"] = u"kh"
self.trans[u"ט"] = u"t"
self.trans[u"י"] = u"y"
for char in u"ךכ":
self.trans[char] = u"k"
self.trans[u"ל"] = u"l"
for char in u"םמ":
self.trans[char] = u"m"
for char in u"ןנ":
self.trans[char] = u"n"
self.trans[u"ס"] = u"s"
for char in u"ףפ":
self.trans[char] = u"ph"
for char in u"ץצ":
self.trans[char] = u"ts"
self.trans[u"ק"] = u"q"
self.trans[u"ר"] = u"r"
self.trans[u"ש"] = u"sh"
self.trans[u"ת"] = u"th"
# Arab alphabet
for char in u"اﺍﺎ":
self.trans[char] = u"a"
for char in u"بﺏﺐﺒﺑ":
self.trans[char] = u"b"
for char in u"تﺕﺖﺘﺗ":
self.trans[char] = u"t"
for char in u"ثﺙﺚﺜﺛ":
self.trans[char] = u"th"
for char in u"جﺝﺞﺠﺟ":
self.trans[char] = u"g"
for char in u"حﺡﺢﺤﺣ":
self.trans[char] = u"h"
for char in u"خﺥﺦﺨﺧ":
self.trans[char] = u"kh"
for char in u"دﺩﺪ":
self.trans[char] = u"d"
for char in u"ذﺫﺬ":
self.trans[char] = u"dh"
for char in u"رﺭﺮ":
self.trans[char] = u"r"
for char in u"زﺯﺰ":
self.trans[char] = u"z"
for char in u"سﺱﺲﺴﺳ":
self.trans[char] = u"s"
for char in u"شﺵﺶﺸﺷ":
self.trans[char] = u"sh"
for char in u"صﺹﺺﺼﺻ":
self.trans[char] = u"s"
for char in u"ضﺽﺾﻀﺿ":
self.trans[char] = u"d"
for char in u"طﻁﻂﻄﻃ":
self.trans[char] = u"t"
for char in u"ظﻅﻆﻈﻇ":
self.trans[char] = u"z"
for char in u"عﻉﻊﻌﻋ":
self.trans[char] = u"'"
for char in u"غﻍﻎﻐﻏ":
self.trans[char] = u"gh"
for char in u"فﻑﻒﻔﻓ":
self.trans[char] = u"f"
for char in u"قﻕﻖﻘﻗ":
self.trans[char] = u"q"
for char in u"كﻙﻚﻜﻛک":
self.trans[char] = u"k"
for char in u"لﻝﻞﻠﻟ":
self.trans[char] = u"l"
for char in u"مﻡﻢﻤﻣ":
self.trans[char] = u"m"
for char in u"نﻥﻦﻨﻧ":
self.trans[char] = u"n"
for char in u"هﻩﻪﻬﻫ":
self.trans[char] = u"h"
for char in u"وﻭﻮ":
self.trans[char] = u"w"
for char in u"یيﻱﻲﻴﻳ":
self.trans[char] = u"y"
# Arabic - additional letters, modified letters and ligatures
self.trans[u"ﺀ"] = u"'"
for char in u"آﺁﺂ":
self.trans[char] = u"'a"
for char in u"ةﺓﺔ":
self.trans[char] = u"th"
for char in u"ىﻯﻰ":
self.trans[char] = u"á"
for char in u"یﯼﯽﯿﯾ":
self.trans[char] = u"y"
self.trans[u"؟"] = u"?"
# Arabic - ligatures
for char in u"ﻻﻼ":
self.trans[char] = u"la"
self.trans[u"ﷲ"] = u"llah"
for char in u"إأ":
self.trans[char] = u"a'"
self.trans[u"ؤ"] = u"w'"
self.trans[u"ئ"] = u"y'"
for char in u"◌◌":
self.trans[char] = u"" # indicates absence of vowels
# Arabic vowels
self.trans[u"◌"] = u"a"
self.trans[u"◌"] = u"u"
self.trans[u"◌"] = u"i"
self.trans[u"◌"] = u"a"
self.trans[u"◌"] = u"ay"
self.trans[u"◌"] = u"ay"
self.trans[u"◌"] = u"u"
self.trans[u"◌"] = u"iy"
# Arab numerals
for char in u"٠۰":
self.trans[char] = u"0"
for char in u"١۱":
self.trans[char] = u"1"
for char in u"٢۲":
self.trans[char] = u"2"
for char in u"٣۳":
self.trans[char] = u"3"
for char in u"٤۴":
self.trans[char] = u"4"
for char in u"٥۵":
self.trans[char] = u"5"
for char in u"٦۶":
self.trans[char] = u"6"
for char in u"٧۷":
self.trans[char] = u"7"
for char in u"٨۸":
self.trans[char] = u"8"
for char in u"٩۹":
self.trans[char] = u"9"
# Perso-Arabic
for char in u"پﭙﭙپ":
self.trans[char] = u"p"
for char in u"چچچچ":
self.trans[char] = u"ch"
for char in u"ژژ":
self.trans[char] = u"zh"
for char in u"گﮔﮕﮓ":
self.trans[char] = u"g"
# Greek
self.trans.update({u"Α": u"A", u"α": u"a", u"Β": u"B", u"β": u"b", u"Γ": u"G",
u"γ": u"g", u"Δ": u"D", u"δ": u"d", u"Ε": u"E", u"ε": u"e",
u"Ζ": u"Z", u"ζ": u"z", u"Η": u"I", u"η": u"i", u"θ": u"th",
u"Θ": u"Th", u"Ι": u"I", u"ι": u"i", u"Κ": u"K", u"κ": u"k",
u"Λ": u"L", u"λ": u"l", u"Μ": u"M", u"μ": u"m", u"Ν": u"N",
u"ν": u"n", u"Ξ": u"X", u"ξ": u"x", u"Ο": u"O", u"ο": u"o",
u"Π": u"P", u"π": u"p", u"Ρ": u"R", u"ρ": u"r", u"Σ": u"S",
u"σ": u"s", u"ς": u"s", u"Τ": u"T", u"τ": u"t", u"Υ": u"Y",
u"υ": u"y", u"Φ": u"F", u"φ": u"f", u"Ψ": u"Ps", u"ψ": u"ps",
u"Ω": u"O", u"ω": u"o", u"ϗ": u"&", u"Ϛ": u"St", u"ϛ": u"st",
u"Ϙ": u"Q", u"Ϟ": u"Q", u"ϙ": u"q", u"ϟ": u"q", u"Ϻ": u"S",
u"ϻ": u"s", u"Ϡ": u"Ss", u"ϡ": u"ss", u"Ϸ": u"Sh", u"ϸ": u"sh",
u"·": u":", u"Ά": u"Á", u"ά": u"á", u"Έ": u"É", u"Ή": u"É",
u"έ": u"é", u"ή": u"é", u"Ί": u"Í", u"ί": u"í", u"Ϊ": u"Ï",
u"ϊ": u"ï", u"ΐ": u"ï", u"Ό": u"Ó", u"ό": u"ó", u"Ύ": u"Ý",
u"ύ": u"ý", u"Ϋ": u"Y", u"ϋ": u"ÿ", u"ΰ": u"ÿ", u"Ώ": u"Ó",
u"ώ": u"ó"})
# Japanese (katakana and hiragana)
for char in u"アァあ":
self.trans[char] = u"a"
for char in u"イィい":
self.trans[char] = u"i"
for char in u"ウう":
self.trans[char] = u"u"
for char in u"エェえ":
self.trans[char] = u"e"
for char in u"オォお":
self.trans[char] = u"o"
for char in u"ャや":
self.trans[char] = u"ya"
for char in u"ュゆ":
self.trans[char] = u"yu"
for char in u"ョよ":
self.trans[char] = u"yo"
for char in u"カか":
self.trans[char] = u"ka"
for char in u"キき":
self.trans[char] = u"ki"
for char in u"クく":
self.trans[char] = u"ku"
for char in u"ケけ":
self.trans[char] = u"ke"
for char in u"コこ":
self.trans[char] = u"ko"
for char in u"サさ":
self.trans[char] = u"sa"
for char in u"シし":
self.trans[char] = u"shi"
for char in u"スす":
self.trans[char] = u"su"
for char in u"セせ":
self.trans[char] = u"se"
for char in u"ソそ":
self.trans[char] = u"so"
for char in u"タた":
self.trans[char] = u"ta"
for char in u"チち":
self.trans[char] = u"chi"
for char in u"ツつ":
self.trans[char] = u"tsu"
for char in u"テて":
self.trans[char] = u"te"
for char in u"トと":
self.trans[char] = u"to"
for char in u"ナな":
self.trans[char] = u"na"
for char in u"ニに":
self.trans[char] = u"ni"
for char in u"ヌぬ":
self.trans[char] = u"nu"
for char in u"ネね":
self.trans[char] = u"ne"
for char in u"ノの":
self.trans[char] = u"no"
for char in u"ハは":
self.trans[char] = u"ha"
for char in u"ヒひ":
self.trans[char] = u"hi"
for char in u"フふ":
self.trans[char] = u"fu"
for char in u"ヘへ":
self.trans[char] = u"he"
for char in u"ホほ":
self.trans[char] = u"ho"
for char in u"マま":
self.trans[char] = u"ma"
for char in u"ミみ":
self.trans[char] = u"mi"
for char in u"ムむ":
self.trans[char] = u"mu"
for char in u"メめ":
self.trans[char] = u"me"
for char in u"モも":
self.trans[char] = u"mo"
for char in u"ラら":
self.trans[char] = u"ra"
for char in u"リり":
self.trans[char] = u"ri"
for char in u"ルる":
self.trans[char] = u"ru"
for char in u"レれ":
self.trans[char] = u"re"
for char in u"ロろ":
self.trans[char] = u"ro"
for char in u"ワわ":
self.trans[char] = u"wa"
for char in u"ヰゐ":
self.trans[char] = u"wi"
for char in u"ヱゑ":
self.trans[char] = u"we"
for char in u"ヲを":
self.trans[char] = u"wo"
for char in u"ンん":
self.trans[char] = u"n"
for char in u"ガが":
self.trans[char] = u"ga"
for char in u"ギぎ":
self.trans[char] = u"gi"
for char in u"グぐ":
self.trans[char] = u"gu"
for char in u"ゲげ":
self.trans[char] = u"ge"
for char in u"ゴご":
self.trans[char] = u"go"
for char in u"ザざ":
self.trans[char] = u"za"
for char in u"ジじ":
self.trans[char] = u"ji"
for char in u"ズず":
self.trans[char] = u"zu"
for char in u"ゼぜ":
self.trans[char] = u"ze"
for char in u"ゾぞ":
self.trans[char] = u"zo"
for char in u"ダだ":
self.trans[char] = u"da"
for char in u"ヂぢ":
self.trans[char] = u"dji"
for char in u"ヅづ":
self.trans[char] = u"dzu"
for char in u"デで":
self.trans[char] = u"de"
for char in u"ドど":
self.trans[char] = u"do"
for char in u"バば":
self.trans[char] = u"ba"
for char in u"ビび":
self.trans[char] = u"bi"
for char in u"ブぶ":
self.trans[char] = u"bu"
for char in u"ベべ":
self.trans[char] = u"be"
for char in u"ボぼ":
self.trans[char] = u"bo"
for char in u"パぱ":
self.trans[char] = u"pa"
for char in u"ピぴ":
self.trans[char] = u"pi"
for char in u"プぷ":
self.trans[char] = u"pu"
for char in u"ペぺ":
self.trans[char] = u"pe"
for char in u"ポぽ":
self.trans[char] = u"po"
for char in u"ヴゔ":
self.trans[char] = u"vu"
self.trans[u"ヷ"] = u"va"
self.trans[u"ヸ"] = u"vi"
self.trans[u"ヹ"] = u"ve"
self.trans[u"ヺ"] = u"vo"
# Japanese and Chinese punctuation and typography
for char in u"・·":
self.trans[char] = u" "
for char in u"〃『』《》":
self.trans[char] = u'"'
for char in u"「」〈〉〘〙〚〛":
self.trans[char] = u"'"
for char in u"(〔":
self.trans[char] = u"("
for char in u")〕":
self.trans[char] = u")"
for char in u"[【〖":
self.trans[char] = u"["
for char in u"]】〗":
self.trans[char] = u"]"
for char in u"{":
self.trans[char] = u"{"
for char in u"}":
self.trans[char] = u"}"
for char in u"っ":
self.trans[char] = u":"
for char in u"ー":
self.trans[char] = u"h"
for char in u"゛":
self.trans[char] = u"'"
for char in u"゜":
self.trans[char] = u"p"
for char in u"。":
self.trans[char] = u". "
for char in u"、":
self.trans[char] = u", "
for char in u"・":
self.trans[char] = u" "
for char in u"〆":
self.trans[char] = u"shime"
for char in u"〜":
self.trans[char] = u"-"
for char in u"…":
self.trans[char] = u"..."
for char in u"‥":
self.trans[char] = u".."
for char in u"ヶ":
self.trans[char] = u"months"
for char in u"•◦":
self.trans[char] = u"_"
for char in u"※*":
self.trans[char] = u"*"
for char in u"Ⓧ":
self.trans[char] = u"(X)"
for char in u"Ⓨ":
self.trans[char] = u"(Y)"
for char in u"!":
self.trans[char] = u"!"
for char in u"?":
self.trans[char] = u"?"
for char in u";":
self.trans[char] = u";"
for char in u":":
self.trans[char] = u":"
for char in u"。":
self.trans[char] = u"."
for char in u",、":
self.trans[char] = u","
# Georgian
for char in u"ა":
self.trans[char] = u"a"
for char in u"ბ":
self.trans[char] = u"b"
for char in u"გ":
self.trans[char] = u"g"
for char in u"დ":
self.trans[char] = u"d"
for char in u"ეჱ":
self.trans[char] = u"e"
for char in u"ვ":
self.trans[char] = u"v"
for char in u"ზ":
self.trans[char] = u"z"
for char in u"თ":
self.trans[char] = u"th"
for char in u"ი":
self.trans[char] = u"i"
for char in u"კ":
self.trans[char] = u"k"
for char in u"ლ":
self.trans[char] = u"l"
for char in u"მ":
self.trans[char] = u"m"
for char in u"ნ":
self.trans[char] = u"n"
for char in u"ო":
self.trans[char] = u"o"
for char in u"პ":
self.trans[char] = u"p"
for char in u"ჟ":
self.trans[char] = u"zh"
for char in u"რ":
self.trans[char] = u"r"
for char in u"ს":
self.trans[char] = u"s"
for char in u"ტ":
self.trans[char] = u"t"
for char in u"უ":
self.trans[char] = u"u"
for char in u"ფ":
self.trans[char] = u"ph"
for char in u"ქ":
self.trans[char] = u"q"
for char in u"ღ":
self.trans[char] = u"gh"
for char in u"ყ":
self.trans[char] = u"q'"
for char in u"შ":
self.trans[char] = u"sh"
for char in u"ჩ":
self.trans[char] = u"ch"
for char in u"ც":
self.trans[char] = u"ts"
for char in u"ძ":
self.trans[char] = u"dz"
for char in u"წ":
self.trans[char] = u"ts'"
for char in u"ჭ":
self.trans[char] = u"ch'"
for char in u"ხ":
self.trans[char] = u"kh"
for char in u"ჯ":
self.trans[char] = u"j"
for char in u"ჰ":
self.trans[char] = u"h"
for char in u"ჳ":
self.trans[char] = u"w"
for char in u"ჵ":
self.trans[char] = u"o"
for char in u"ჶ":
self.trans[char] = u"f"
# Devanagari
for char in u"पप":
self.trans[char] = u"p"
for char in u"अ":
self.trans[char] = u"a"
for char in u"आा":
self.trans[char] = u"aa"
for char in u"प":
self.trans[char] = u"pa"
for char in u"इि":
self.trans[char] = u"i"
for char in u"ईी":
self.trans[char] = u"ii"
for char in u"उु":
self.trans[char] = u"u"
for char in u"ऊू":
self.trans[char] = u"uu"
for char in u"एे":
self.trans[char] = u"e"
for char in u"ऐै":
self.trans[char] = u"ai"
for char in u"ओो":
self.trans[char] = u"o"
for char in u"औौ":
self.trans[char] = u"au"
for char in u"ऋृर":
self.trans[char] = u"r"
for char in u"ॠॄ":
self.trans[char] = u"rr"
for char in u"ऌॢल":
self.trans[char] = u"l"
for char in u"ॡॣ":
self.trans[char] = u"ll"
for char in u"क":
self.trans[char] = u"k"
for char in u"ख":
self.trans[char] = u"kh"
for char in u"ग":
self.trans[char] = u"g"
for char in u"घ":
self.trans[char] = u"gh"
for char in u"ङ":
self.trans[char] = u"ng"
for char in u"च":
self.trans[char] = u"c"
for char in u"छ":
self.trans[char] = u"ch"
for char in u"ज":
self.trans[char] = u"j"
for char in u"झ":
self.trans[char] = u"jh"
for char in u"ञ":
self.trans[char] = u"ñ"
for char in u"टत":
self.trans[char] = u"t"
for char in u"ठथ":
self.trans[char] = u"th"
for char in u"डद":
self.trans[char] = u"d"
for char in u"ढध":
self.trans[char] = u"dh"
for char in u"णन":
self.trans[char] = u"n"
for char in u"फ":
self.trans[char] = u"ph"
for char in u"ब":
self.trans[char] = u"b"
for char in u"भ":
self.trans[char] = u"bh"
for char in u"म":
self.trans[char] = u"m"
for char in u"य":
self.trans[char] = u"y"
for char in u"व":
self.trans[char] = u"v"
for char in u"श":
self.trans[char] = u"sh"
for char in u"षस":
self.trans[char] = u"s"
for char in u"ह":
self.trans[char] = u"h"
for char in u"क":
self.trans[char] = u"x"
for char in u"त":
self.trans[char] = u"tr"
for char in u"ज":
self.trans[char] = u"gj"
for char in u"क़":
self.trans[char] = u"q"
for char in u"फ":
self.trans[char] = u"f"
for char in u"ख":
self.trans[char] = u"hh"
for char in u"H":
self.trans[char] = u"gh"
for char in u"ज":
self.trans[char] = u"z"
for char in u"डढ":
self.trans[char] = u"r"
# Devanagari ligatures (possibly incomplete and/or incorrect)
for char in u"ख्":
self.trans[char] = u"khn"
for char in u"त":
self.trans[char] = u"tn"
for char in u"द्":
self.trans[char] = u"dn"
for char in u"श":
self.trans[char] = u"cn"
for char in u"ह्":
self.trans[char] = u"fn"
for char in u"अँ":
self.trans[char] = u"m"
for char in u"॒॑":
self.trans[char] = u""
for char in u"०":
self.trans[char] = u"0"
for char in u"१":
self.trans[char] = u"1"
for char in u"२":
self.trans[char] = u"2"
for char in u"३":
self.trans[char] = u"3"
for char in u"४":
self.trans[char] = u"4"
for char in u"५":
self.trans[char] = u"5"
for char in u"६":
self.trans[char] = u"6"
for char in u"७":
self.trans[char] = u"7"
for char in u"८":
self.trans[char] = u"8"
for char in u"९":
self.trans[char] = u"9"
# Armenian
for char in u"Ա":
self.trans[char] = u"A"
for char in u"ա":
self.trans[char] = u"a"
for char in u"Բ":
self.trans[char] = u"B"
for char in u"բ":
self.trans[char] = u"b"
for char in u"Գ":
self.trans[char] = u"G"
for char in u"գ":
self.trans[char] = u"g"
for char in u"Դ":
self.trans[char] = u"D"
for char in u"դ":
self.trans[char] = u"d"
for char in u"Ե":
self.trans[char] = u"Je"
for char in u"ե":
self.trans[char] = u"e"
for char in u"Զ":
self.trans[char] = u"Z"
for char in u"զ":
self.trans[char] = u"z"
for char in u"Է":
self.trans[char] = u"É"
for char in u"է":
self.trans[char] = u"é"
for char in u"Ը":
self.trans[char] = u"Ë"
for char in u"ը":
self.trans[char] = u"ë"
for char in u"Թ":
self.trans[char] = u"Th"
for char in u"թ":
self.trans[char] = u"th"
for char in u"Ժ":
self.trans[char] = u"Zh"
for char in u"ժ":
self.trans[char] = u"zh"
for char in u"Ի":
self.trans[char] = u"I"
for char in u"ի":
self.trans[char] = u"i"
for char in u"Լ":
self.trans[char] = u"L"
for char in u"լ":
self.trans[char] = u"l"
for char in u"Խ":
self.trans[char] = u"Ch"
for char in u"խ":
self.trans[char] = u"ch"
for char in u"Ծ":
self.trans[char] = u"Ts"
for char in u"ծ":
self.trans[char] = u"ts"
for char in u"Կ":
self.trans[char] = u"K"
for char in u"կ":
self.trans[char] = u"k"
for char in u"Հ":
self.trans[char] = u"H"
for char in u"հ":
self.trans[char] = u"h"
for char in u"Ձ":
self.trans[char] = u"Dz"
for char in u"ձ":
self.trans[char] = u"dz"
for char in u"Ղ":
self.trans[char] = u"R"
for char in u"ղ":
self.trans[char] = u"r"
for char in u"Ճ":
self.trans[char] = u"Cz"
for char in u"ճ":
self.trans[char] = u"cz"
for char in u"Մ":
self.trans[char] = u"M"
for char in u"մ":
self.trans[char] = u"m"
for char in u"Յ":
self.trans[char] = u"J"
for char in u"յ":
self.trans[char] = u"j"
for char in u"Ն":
self.trans[char] = u"N"
for char in u"ն":
self.trans[char] = u"n"
for char in u"Շ":
self.trans[char] = u"S"
for char in u"շ":
self.trans[char] = u"s"
for char in u"Շ":
self.trans[char] = u"Vo"
for char in u"շ":
self.trans[char] = u"o"
for char in u"Չ":
self.trans[char] = u"Tsh"
for char in u"չ":
self.trans[char] = u"tsh"
for char in u"Պ":
self.trans[char] = u"P"
for char in u"պ":
self.trans[char] = u"p"
for char in u"Ջ":
self.trans[char] = u"Dz"
for char in u"ջ":
self.trans[char] = u"dz"
for char in u"Ռ":
self.trans[char] = u"R"
for char in u"ռ":
self.trans[char] = u"r"
for char in u"Ս":
self.trans[char] = u"S"
for char in u"ս":
self.trans[char] = u"s"
for char in u"Վ":
self.trans[char] = u"V"
for char in u"վ":
self.trans[char] = u"v"
for char in u"Տ":
self.trans[char] = u"T'"
for char in u"տ":
self.trans[char] = u"t'"
for char in u"Ր":
self.trans[char] = u"R"
for char in u"ր":
self.trans[char] = u"r"
for char in u"Ց":
self.trans[char] = u"Tsh"
for char in u"ց":
self.trans[char] = u"tsh"
for char in u"Ւ":
self.trans[char] = u"V"
for char in u"ւ":
self.trans[char] = u"v"
for char in u"Փ":
self.trans[char] = u"Ph"
for char in u"փ":
self.trans[char] = u"ph"
for char in u"Ք":
self.trans[char] = u"Kh"
for char in u"ք":
self.trans[char] = u"kh"
for char in u"Օ":
self.trans[char] = u"O"
for char in u"օ":
self.trans[char] = u"o"
for char in u"Ֆ":
self.trans[char] = u"F"
for char in u"ֆ":
self.trans[char] = u"f"
for char in u"և":
self.trans[char] = u"&"
for char in u"՟":
self.trans[char] = u"."
for char in u"՞":
self.trans[char] = u"?"
for char in u"՝":
self.trans[char] = u";"
for char in u"՛":
self.trans[char] = u""
# Tamil
for char in u"க்":
self.trans[char] = u"k"
for char in u"ஙண்ந்ன்":
self.trans[char] = u"n"
for char in u"ச":
self.trans[char] = u"c"
for char in u"ஞ்":
self.trans[char] = u"ñ"
for char in u"ட்":
self.trans[char] = u"th"
for char in u"த":
self.trans[char] = u"t"
for char in u"ப":
self.trans[char] = u"p"
for char in u"ம்":
self.trans[char] = u"m"
for char in u"ய்":
self.trans[char] = u"y"
for char in u"ர்ழ்ற":
self.trans[char] = u"r"
for char in u"ல்ள":
self.trans[char] = u"l"
for char in u"வ்":
self.trans[char] = u"v"
for char in u"ஜ":
self.trans[char] = u"j"
for char in u"ஷ":
self.trans[char] = u"sh"
for char in u"ஸ":
self.trans[char] = u"s"
for char in u"ஹ":
self.trans[char] = u"h"
for char in u"க்ஷ":
self.trans[char] = u"x"
for char in u"அ":
self.trans[char] = u"a"
for char in u"ஆ":
self.trans[char] = u"aa"
for char in u"இ":
self.trans[char] = u"i"
for char in u"ஈ":
self.trans[char] = u"ii"
for char in u"உ":
self.trans[char] = u"u"
for char in u"ஊ":
self.trans[char] = u"uu"
for char in u"எ":
self.trans[char] = u"e"
for char in u"ஏ":
self.trans[char] = u"ee"
for char in u"ஐ":
self.trans[char] = u"ai"
for char in u"ஒ":
self.trans[char] = u"o"
for char in u"ஓ":
self.trans[char] = u"oo"
for char in u"ஔ":
self.trans[char] = u"au"
for char in u"ஃ":
self.trans[char] = ""
# Bengali
for char in u"অ":
self.trans[char] = u"ô"
for char in u"আা":
self.trans[char] = u"a"
for char in u"ইিঈী":
self.trans[char] = u"i"
for char in u"উুঊূ":
self.trans[char] = u"u"
for char in u"ঋৃ":
self.trans[char] = u"ri"
for char in u"এেয়":
self.trans[char] = u"e"
for char in u"ঐৈ":
self.trans[char] = u"oi"
for char in u"ওো":
self.trans[char] = u"o"
for char in u"ঔৌ":
self.trans[char] = "ou"
for char in u"্":
self.trans[char] = u""
for char in u"ৎ":
self.trans[char] = u"t"
for char in u"ং":
self.trans[char] = u"n"
for char in u"ঃ":
self.trans[char] = u"h"
for char in u"ঁ":
self.trans[char] = u"ñ"
for char in u"ক":
self.trans[char] = u"k"
for char in u"খ":
self.trans[char] = u"kh"
for char in u"গ":
self.trans[char] = u"g"
for char in u"ঘ":
self.trans[char] = u"gh"
for char in u"ঙ":
self.trans[char] = u"ng"
for char in u"চ":
self.trans[char] = u"ch"
for char in u"ছ":
self.trans[char] = u"chh"
for char in u"জ":
self.trans[char] = u"j"
for char in u"ঝ":
self.trans[char] = u"jh"
for char in u"ঞ":
self.trans[char] = u"n"
for char in u"টত":
self.trans[char] = u"t"
for char in u"ঠথ":
self.trans[char] = u"th"
for char in u"ডদ":
self.trans[char] = u"d"
for char in u"ঢধ":
self.trans[char] = u"dh"
for char in u"ণন":
self.trans[char] = u"n"
for char in u"প":
self.trans[char] = u"p"
for char in u"ফ":
self.trans[char] = u"ph"
for char in u"ব":
self.trans[char] = u"b"
for char in u"ভ":
self.trans[char] = u"bh"
for char in u"ম":
self.trans[char] = u"m"
for char in u"য":
self.trans[char] = u"dzh"
for char in u"র":
self.trans[char] = u"r"
for char in u"ল":
self.trans[char] = u"l"
for char in u"শ":
self.trans[char] = u"s"
for char in u"হ":
self.trans[char] = u"h"
for char in u"য়":
self.trans[char] = u"-"
for char in u"ড়":
self.trans[char] = u"r"
for char in u"ঢ":
self.trans[char] = u"rh"
for char in u"০":
self.trans[char] = u"0"
for char in u"১":
self.trans[char] = u"1"
for char in u"২":
self.trans[char] = u"2"
for char in u"৩":
self.trans[char] = u"3"
for char in u"৪":
self.trans[char] = u"4"
for char in u"৫":
self.trans[char] = u"5"
for char in u"৬":
self.trans[char] = u"6"
for char in u"৭":
self.trans[char] = u"7"
for char in u"৮":
self.trans[char] = u"8"
for char in u"৯":
self.trans[char] = u"9"
# Thai (because of complications of the alphabet, self.transliterations
# are very imprecise here)
for char in u"ก":
self.trans[char] = u"k"
for char in u"ขฃคฅฆ":
self.trans[char] = u"kh"
for char in u"ง":
self.trans[char] = u"ng"
for char in u"จฉชฌ":
self.trans[char] = u"ch"
for char in u"ซศษส":
self.trans[char] = u"s"
for char in u"ญย":
self.trans[char] = u"y"
for char in u"ฎด":
self.trans[char] = u"d"
for char in u"ฏต":
self.trans[char] = u"t"
for char in u"ฐฑฒถทธ":
self.trans[char] = u"th"
for char in u"ณน":
self.trans[char] = u"n"
for char in u"บ":
self.trans[char] = u"b"
for char in u"ป":
self.trans[char] = u"p"
for char in u"ผพภ":
self.trans[char] = u"ph"
for char in u"ฝฟ":
self.trans[char] = u"f"
for char in u"ม":
self.trans[char] = u"m"
for char in u"ร":
self.trans[char] = u"r"
for char in u"ฤ":
self.trans[char] = u"rue"
for char in u"ๅ":
self.trans[char] = u":"
for char in u"ลฬ":
self.trans[char] = u"l"
for char in u"ฦ":
self.trans[char] = u"lue"
for char in u"ว":
self.trans[char] = u"w"
for char in u"หฮ":
self.trans[char] = u"h"
for char in u"อ":
self.trans[char] = u""
for char in u"ร":
self.trans[char] = u"ü"
for char in u"ว":
self.trans[char] = u"ua"
for char in u"อวโิ":
self.trans[char] = u"o"
for char in u"ะัา":
self.trans[char] = u"a"
for char in u"ว":
self.trans[char] = u"u"
for char in u"ำ":
self.trans[char] = u"am"
for char in u"ิ":
self.trans[char] = u"i"
for char in u"ี":
self.trans[char] = u"i:"
for char in u"ึ":
self.trans[char] = u"ue"
for char in u"ื":
self.trans[char] = u"ue:"
for char in u"ุ":
self.trans[char] = u"u"
for char in u"ู":
self.trans[char] = u"u:"
for char in u"เ็":
self.trans[char] = u"e"
for char in u"แ":
self.trans[char] = u"ae"
for char in u"ใไ":
self.trans[char] = u"ai"
for char in u"่้๊๋็์":
self.trans[char] = u""
for char in u"ฯ":
self.trans[char] = u"."
for char in u"ๆ":
self.trans[char] = u"(2)"
# Korean (Revised Romanization system within possible, incomplete)
for char in u"국":
self.trans[char] = u"guk"
for char in u"명":
self.trans[char] = u"myeong"
for char in u"검":
self.trans[char] = u"geom"
for char in u"타":
self.trans[char] = u"ta"
for char in u"분":
self.trans[char] = u"bun"
for char in u"사":
self.trans[char] = u"sa"
for char in u"류":
self.trans[char] = u"ryu"
for char in u"포":
self.trans[char] = u"po"
for char in u"르":
self.trans[char] = u"reu"
for char in u"투":
self.trans[char] = u"tu"
for char in u"갈":
self.trans[char] = u"gal"
for char in u"어":
self.trans[char] = u"eo"
for char in u"노":
self.trans[char] = u"no"
for char in u"웨":
self.trans[char] = u"we"
for char in u"이":
self.trans[char] = u"i"
for char in u"라":
self.trans[char] = u"ra"
for char in u"틴":
self.trans[char] = u"tin"
for char in u"루":
self.trans[char] = u"ru"
for char in u"마":
self.trans[char] = u"ma"
for char in u"니":
self.trans[char] = u"ni"
for char in u"아":
self.trans[char] = u"a"
for char in u"독":
self.trans[char] = u"dok"
for char in u"일":
self.trans[char] = u"il"
for char in u"모":
self.trans[char] = u"mo"
for char in u"크":
self.trans[char] = u"keu"
for char in u"샤":
self.trans[char] = u"sya"
for char in u"영":
self.trans[char] = u"yeong"
for char in u"불":
self.trans[char] = u"bul"
for char in u"가":
self.trans[char] = u"ga"
for char in u"리":
self.trans[char] = u"ri"
for char in u"그":
self.trans[char] = u"geu"
for char in u"지":
self.trans[char] = u"ji"
for char in u"야":
self.trans[char] = u"ya"
for char in u"바":
self.trans[char] = u"ba"
for char in u"슈":
self.trans[char] = u"syu"
for char in u"키":
self.trans[char] = u"ki"
for char in u"프":
self.trans[char] = u"peu"
for char in u"랑":
self.trans[char] = u"rang"
for char in u"스":
self.trans[char] = u"seu"
for char in u"로":
self.trans[char] = u"ro"
for char in u"메":
self.trans[char] = u"me"
for char in u"역":
self.trans[char] = u"yeok"
for char in u"도":
self.trans[char] = u"do"
# Kannada
self.trans[u"ಅ"] = u"a"
for char in u"ಆಾ":
self.trans[char] = u"aa"
for char in u"ಇಿ":
self.trans[char] = u"i"
for char in u"ಈೀ":
self.trans[char] = u"ii"
for char in u"ಉು":
self.trans[char] = u"u"
for char in u"ಊೂ":
self.trans[char] = u"uu"
for char in u"ಋೂ":
self.trans[char] = u"r'"
for char in u"ಎೆ":
self.trans[char] = u"e"
for char in u"ಏೇ":
self.trans[char] = u"ee"
for char in u"ಐೈ":
self.trans[char] = u"ai"
for char in u"ಒೊ":
self.trans[char] = u"o"
for char in u"ಓೋ":
self.trans[char] = u"oo"
for char in u"ಔೌ":
self.trans[char] = u"au"
self.trans[u"ಂ"] = u"m'"
self.trans[u"ಃ"] = u"h'"
self.trans[u"ಕ"] = u"k"
self.trans[u"ಖ"] = u"kh"
self.trans[u"ಗ"] = u"g"
self.trans[u"ಘ"] = u"gh"
self.trans[u"ಙ"] = u"ng"
self.trans[u"ಚ"] = u"c"
self.trans[u"ಛ"] = u"ch"
self.trans[u"ಜ"] = u"j"
self.trans[u"ಝ"] = u"ny"
self.trans[u"ಟ"] = u"tt"
self.trans[u"ಠ"] = u"tth"
self.trans[u"ಡ"] = u"dd"
self.trans[u"ಢ"] = u"ddh"
self.trans[u"ಣ"] = u"nn"
self.trans[u"ತ"] = u"t"
self.trans[u"ಥ"] = u"th"
self.trans[u"ದ"] = u"d"
self.trans[u"ಧ"] = u"dh"
self.trans[u"ನ"] = u"n"
self.trans[u"ಪ"] = u"p"
self.trans[u"ಫ"] = u"ph"
self.trans[u"ಬ"] = u"b"
self.trans[u"ಭ"] = u"bh"
self.trans[u"ಮ"] = u"m"
self.trans[u"ಯ"] = u"y"
self.trans[u"ರ"] = u"r"
self.trans[u"ಲ"] = u"l"
self.trans[u"ವ"] = u"v"
self.trans[u"ಶ"] = u"sh"
self.trans[u"ಷ"] = u"ss"
self.trans[u"ಸ"] = u"s"
self.trans[u"ಹ"] = u"h"
self.trans[u"ಳ"] = u"ll"
self.trans[u"೦"] = u"0"
self.trans[u"೧"] = u"1"
self.trans[u"೨"] = u"2"
self.trans[u"೩"] = u"3"
self.trans[u"೪"] = u"4"
self.trans[u"೫"] = u"5"
self.trans[u"೬"] = u"6"
self.trans[u"೭"] = u"7"
self.trans[u"೮"] = u"8"
self.trans[u"೯"] = u"9"
# Telugu
for char in u"అ":
self.trans[char] = u"a"
for char in u"ఆా":
self.trans[char] = u"aa"
for char in u"ఇి":
self.trans[char] = u"i"
for char in u"ఈీ":
self.trans[char] = u"ii"
for char in u"ఉు":
self.trans[char] = u"u"
for char in u"ఊూ":
self.trans[char] = u"uu"
for char in u"ఋృ":
self.trans[char] = u"r'"
for char in u"ౠౄ":
self.trans[char] = u'r"'
self.trans[u"ఌ"] = u"l'"
self.trans[u"ౡ"] = u'l"'
for char in u"ఎె":
self.trans[char] = u"e"
for char in u"ఏే":
self.trans[char] = u"ee"
for char in u"ఐై":
self.trans[char] = u"ai"
for char in u"ఒొ":
self.trans[char] = u"o"
for char in u"ఓో":
self.trans[char] = u"oo"
for char in u"ఔౌ":
self.trans[char] = u"au"
self.trans[u"ం"] = u"'"
self.trans[u"ః"] = u'"'
self.trans[u"క"] = u"k"
self.trans[u"ఖ"] = u"kh"
self.trans[u"గ"] = u"g"
self.trans[u"ఘ"] = u"gh"
self.trans[u"ఙ"] = u"ng"
self.trans[u"చ"] = u"ts"
self.trans[u"ఛ"] = u"tsh"
self.trans[u"జ"] = u"j"
self.trans[u"ఝ"] = u"jh"
self.trans[u"ఞ"] = u"ñ"
for char in u"టత":
self.trans[char] = u"t"
for char in u"ఠథ":
self.trans[char] = u"th"
for char in u"డద":
self.trans[char] = u"d"
for char in u"ఢధ":
self.trans[char] = u"dh"
for char in u"ణన":
self.trans[char] = u"n"
self.trans[u"ప"] = u"p"
self.trans[u"ఫ"] = u"ph"
self.trans[u"బ"] = u"b"
self.trans[u"భ"] = u"bh"
self.trans[u"మ"] = u"m"
self.trans[u"య"] = u"y"
for char in u"రఱ":
self.trans[char] = u"r"
for char in u"లళ":
self.trans[char] = u"l"
self.trans[u"వ"] = u"v"
self.trans[u"శ"] = u"sh"
for char in u"షస":
self.trans[char] = u"s"
self.trans[u"హ"] = u"h"
self.trans[u"్"] = ""
for char in u"ంఁ":
self.trans[char] = u"^"
self.trans[u"ః"] = u"-"
self.trans[u"౦"] = u"0"
self.trans[u"౧"] = u"1"
self.trans[u"౨"] = u"2"
self.trans[u"౩"] = u"3"
self.trans[u"౪"] = u"4"
self.trans[u"౫"] = u"5"
self.trans[u"౬"] = u"6"
self.trans[u"౭"] = u"7"
self.trans[u"౮"] = u"8"
self.trans[u"౯"] = u"9"
self.trans[u"౹"] = u"1/4"
self.trans[u"౺"] = u"1/2"
self.trans[u"౻"] = u"3/4"
self.trans[u"౼"] = u"1/16"
self.trans[u"౽"] = u"1/8"
self.trans[u"౾"] = u"3/16"
# Lao - note: pronounciation in initial position is used;
# different pronounciation in final position is ignored
self.trans[u"ກ"] = "k"
for char in u"ຂຄ":
self.trans[char] = "kh"
self.trans[u"ງ"] = "ng"
self.trans[u"ຈ"] = "ch"
for char in u"ສຊ":
self.trans[char] = "s"
self.trans[u"ຍ"] = "ny"
self.trans[u"ດ"] = "d"
self.trans[u"ຕ"] = "t"
for char in u"ຖທ":
self.trans[char] = "th"
self.trans[u"ນ"] = "n"
self.trans[u"ບ"] = "b"
self.trans[u"ປ"] = "p"
for char in u"ຜພ":
self.trans[char] = "ph"
for char in u"ຝຟ":
self.trans[char] = "f"
for char in u"ມໝ":
self.trans[char] = "m"
self.trans[u"ຢ"] = "y"
for char in u"ຣຼ":
self.trans[char] = "r"
for char in u"ລຼ":
self.trans[char] = "l"
self.trans[u"ວ"] = "v"
for char in u"ຮ":
self.trans[char] = "h"
self.trans[u"ອ"] = "'"
for char in u"ະັ":
self.trans[char] = "a"
self.trans[u"ິ"] = "i"
self.trans[u"ຶ"] = "ue"
self.trans[u"ຸ"] = "u"
self.trans[u"ເ"] = u"é"
self.trans[u"ແ"] = u"è"
for char in u"ໂົາໍ":
self.trans[char] = "o"
self.trans[u"ຽ"] = "ia"
self.trans[u"ເຶ"] = "uea"
self.trans[u"ຍ"] = "i"
for char in u"ໄໃ":
self.trans[char] = "ai"
self.trans[u"ຳ"] = "am"
self.trans[u"າ"] = "aa"
self.trans[u"ີ"] = "ii"
self.trans[u"ື"] = "yy"
self.trans[u"ູ"] = "uu"
self.trans[u"ເ"] = "e"
self.trans[u"ແ"] = "ei"
self.trans[u"໐"] = "0"
self.trans[u"໑"] = "1"
self.trans[u"໒"] = "2"
self.trans[u"໓"] = "3"
self.trans[u"໔"] = "4"
self.trans[u"໕"] = "5"
self.trans[u"໖"] = "6"
self.trans[u"໗"] = "7"
self.trans[u"໘"] = "8"
self.trans[u"໙"] = "9"
# Chinese -- note: incomplete
for char in u"埃挨哎唉哀皑癌蔼矮艾碍爱隘":
self.trans[char] = u"ai"
for char in u"鞍氨安俺按暗岸胺案":
self.trans[char] = u"an"
for char in u"肮昂盎":
self.trans[char] = u"ang"
for char in u"凹敖熬翱袄傲奥懊澳":
self.trans[char] = u"ao"
for char in u"芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸":
self.trans[char] = u"ba"
for char in u"白柏百摆佰败拜稗":
self.trans[char] = u"bai"
for char in u"斑班搬扳般颁板版扮拌伴瓣半办绊":
self.trans[char] = u"ban"
for char in u"邦帮梆榜膀绑棒磅蚌镑傍谤":
self.trans[char] = u"bang"
for char in u"苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆":
self.trans[char] = u"bao"
for char in u"杯碑悲卑北辈背贝钡倍狈备惫焙被":
self.trans[char] = u"bei"
for char in u"奔苯本笨":
self.trans[char] = u"ben"
for char in u"崩绷甭泵蹦迸":
self.trans[char] = u"beng"
for char in u"逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛":
self.trans[char] = u"bi"
for char in u"鞭边编贬扁便变卞辨辩辫遍":
self.trans[char] = u"bian"
for char in u"标彪膘表":
self.trans[char] = u"biao"
for char in u"鳖憋别瘪":
self.trans[char] = u"bie"
for char in u"彬斌濒滨宾摈":
self.trans[char] = u"bin"
for char in u"兵冰柄丙秉饼炳病并":
self.trans[char] = u"bing"
for char in u"玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳":
self.trans[char] = u"bo"
for char in u"哺补埠不布步簿部怖":
self.trans[char] = u"bu"
for char in u"猜裁材才财睬踩采彩菜蔡":
self.trans[char] = u"cai"
for char in u"餐参蚕残惭惨灿":
self.trans[char] = u"can"
for char in u"苍舱仓沧藏":
self.trans[char] = u"cang"
for char in u"操糙槽曹草":
self.trans[char] = u"cao"
for char in u"厕策侧册测":
self.trans[char] = u"ce"
for char in u"层蹭":
self.trans[char] = u"ceng"
for char in u"插叉茬茶查碴搽察岔差诧":
self.trans[char] = u"cha"
for char in u"拆柴豺":
self.trans[char] = u"chai"
for char in u"搀掺蝉馋谗缠铲产阐颤":
self.trans[char] = u"chan"
for char in u"昌猖场尝常长偿肠厂敞畅唱倡":
self.trans[char] = u"chang"
for char in u"超抄钞朝嘲潮巢吵炒":
self.trans[char] = u"chao"
for char in u"车扯撤掣彻澈":
self.trans[char] = u"che"
for char in u"郴臣辰尘晨忱沉陈趁衬":
self.trans[char] = u"chen"
for char in u"撑称城橙成呈乘程惩澄诚承逞骋秤":
self.trans[char] = u"cheng"
for char in u"吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽":
self.trans[char] = u"chi"
for char in u"充冲虫崇宠":
self.trans[char] = u"chong"
for char in u"抽酬畴踌稠愁筹仇绸瞅丑臭":
self.trans[char] = u"chou"
for char in u"初出橱厨躇锄雏滁除楚储矗搐触处":
self.trans[char] = u"chu"
for char in u"揣":
self.trans[char] = u"chuai"
for char in u"川穿椽传船喘串":
self.trans[char] = u"chuan"
for char in u"疮窗幢床闯创":
self.trans[char] = u"chuang"
for char in u"吹炊捶锤垂":
self.trans[char] = u"chui"
for char in u"春椿醇唇淳纯蠢":
self.trans[char] = u"chun"
for char in u"戳绰":
self.trans[char] = u"chuo"
for char in u"疵茨磁雌辞慈瓷词此刺赐次":
self.trans[char] = u"ci"
for char in u"聪葱囱匆从丛":
self.trans[char] = u"cong"
for char in u"凑":
self.trans[char] = u"cou"
for char in u"粗醋簇促":
self.trans[char] = u"cu"
for char in u"蹿篡窜":
self.trans[char] = u"cuan"
for char in u"摧崔催脆瘁粹淬翠":
self.trans[char] = u"cui"
for char in u"村存寸":
self.trans[char] = u"cun"
for char in u"磋撮搓措挫错":
self.trans[char] = u"cuo"
for char in u"搭达答瘩打大":
self.trans[char] = u"da"
for char in u"呆歹傣戴带殆代贷袋待逮怠":
self.trans[char] = u"dai"
for char in u"耽担丹单郸掸胆旦氮但惮淡诞弹蛋儋":
self.trans[char] = u"dan"
for char in u"当挡党荡档":
self.trans[char] = u"dang"
for char in u"刀捣蹈倒岛祷导到稻悼道盗":
self.trans[char] = u"dao"
for char in u"德得的":
self.trans[char] = u"de"
for char in u"蹬灯登等瞪凳邓":
self.trans[char] = u"deng"
for char in u"堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔":
self.trans[char] = u"di"
for char in u"颠掂滇碘点典靛垫电佃甸店惦奠淀殿":
self.trans[char] = u"dian"
for char in u"碉叼雕凋刁掉吊钓调":
self.trans[char] = u"diao"
for char in u"跌爹碟蝶迭谍叠":
self.trans[char] = u"die"
for char in u"丁盯叮钉顶鼎锭定订":
self.trans[char] = u"ding"
for char in u"丢":
self.trans[char] = u"diu"
for char in u"东冬董懂动栋侗恫冻洞":
self.trans[char] = u"dong"
for char in u"兜抖斗陡豆逗痘":
self.trans[char] = u"dou"
for char in u"都督毒犊独读堵睹赌杜镀肚度渡妒":
self.trans[char] = u"du"
for char in u"端短锻段断缎":
self.trans[char] = u"duan"
for char in u"堆兑队对":
self.trans[char] = u"dui"
for char in u"墩吨蹲敦顿囤钝盾遁":
self.trans[char] = u"dun"
for char in u"掇哆多夺垛躲朵跺舵剁惰堕":
self.trans[char] = u"duo"
for char in u"蛾峨鹅俄额讹娥恶厄扼遏鄂饿":
self.trans[char] = u"e"
for char in u"恩嗯":
self.trans[char] = u"en"
for char in u"而儿耳尔饵洱二贰":
self.trans[char] = u"er"
for char in u"发罚筏伐乏阀法珐":
self.trans[char] = u"fa"
for char in u"藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛":
self.trans[char] = u"fan"
for char in u"坊芳方肪房防妨仿访纺放":
self.trans[char] = u"fang"
for char in u"菲非啡飞肥匪诽吠肺废沸费":
self.trans[char] = u"fei"
for char in u"芬酚吩氛分纷坟焚汾粉奋份忿愤粪":
self.trans[char] = u"fen"
for char in u"丰封枫蜂峰锋风疯烽逢冯缝讽奉凤":
self.trans[char] = u"feng"
for char in u"佛":
self.trans[char] = u"fo"
for char in u"否":
self.trans[char] = u"fou"
for char in u"夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋复傅付阜父腹负富讣附妇缚咐":
self.trans[char] = u"fu"
for char in u"噶嘎":
self.trans[char] = u"ga"
for char in u"该改概钙盖溉":
self.trans[char] = u"gai"
for char in u"干甘杆柑竿肝赶感秆敢赣":
self.trans[char] = u"gan"
for char in u"冈刚钢缸肛纲岗港杠":
self.trans[char] = u"gang"
for char in u"篙皋高膏羔糕搞镐稿告":
self.trans[char] = u"gao"
for char in u"哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各":
self.trans[char] = u"ge"
for char in u"给":
self.trans[char] = u"gei"
for char in u"根跟":
self.trans[char] = u"gen"
for char in u"耕更庚羹埂耿梗":
self.trans[char] = u"geng"
for char in u"工攻功恭龚供躬公宫弓巩汞拱贡共":
self.trans[char] = u"gong"
for char in u"钩勾沟苟狗垢构购够":
self.trans[char] = u"gou"
for char in u"辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇":
self.trans[char] = u"gu"
for char in u"刮瓜剐寡挂褂":
self.trans[char] = u"gua"
for char in u"乖拐怪":
self.trans[char] = u"guai"
for char in u"棺关官冠观管馆罐惯灌贯":
self.trans[char] = u"guan"
for char in u"光广逛":
self.trans[char] = u"guang"
for char in u"瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽":
self.trans[char] = u"gui"
for char in u"辊滚棍":
self.trans[char] = u"gun"
for char in u"锅郭国果裹过":
self.trans[char] = u"guo"
for char in u"哈":
self.trans[char] = u"ha"
for char in u"骸孩海氦亥害骇":
self.trans[char] = u"hai"
for char in u"酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉":
self.trans[char] = u"han"
for char in u"夯杭航":
self.trans[char] = u"hang"
for char in u"壕嚎豪毫郝好耗号浩":
self.trans[char] = u"hao"
for char in u"呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺":
self.trans[char] = u"he"
for char in u"嘿黑":
self.trans[char] = u"hei"
for char in u"痕很狠恨":
self.trans[char] = u"hen"
for char in u"哼亨横衡恒":
self.trans[char] = u"heng"
for char in u"轰哄烘虹鸿洪宏弘红":
self.trans[char] = u"hong"
for char in u"喉侯猴吼厚候后":
self.trans[char] = u"hou"
for char in u"呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户":
self.trans[char] = u"hu"
for char in u"花哗华猾滑画划化话":
self.trans[char] = u"hua"
for char in u"槐徊怀淮坏":
self.trans[char] = u"huai"
for char in u"欢环桓还缓换患唤痪豢焕涣宦幻":
self.trans[char] = u"huan"
for char in u"荒慌黄磺蝗簧皇凰惶煌晃幌恍谎":
self.trans[char] = u"huang"
for char in u"灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘":
self.trans[char] = u"hui"
for char in u"荤昏婚魂浑混":
self.trans[char] = u"hun"
for char in u"豁活伙火获或惑霍货祸":
self.trans[char] = u"huo"
for char in u"击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪":
self.trans[char] = u"ji"
for char in u"嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁":
self.trans[char] = u"jia"
for char in u"歼监坚尖笺间煎兼肩艰奸缄茧检柬碱硷拣捡简俭剪减荐槛鉴践贱见键箭件健舰剑饯渐溅涧建":
self.trans[char] = u"jian"
for char in u"僵姜将浆江疆蒋桨奖讲匠酱降":
self.trans[char] = u"jiang"
for char in u"蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖":
self.trans[char] = u"jiao"
for char in u"揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届":
self.trans[char] = u"jie"
for char in u"巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲":
self.trans[char] = u"jin"
for char in u"荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净":
self.trans[char] = u"jing"
for char in u"囧炯窘":
self.trans[char] = u"jiong"
for char in u"揪究纠玖韭久灸九酒厩救旧臼舅咎就疚":
self.trans[char] = u"jiu"
for char in u"鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧":
self.trans[char] = u"ju"
for char in u"捐鹃娟倦眷卷绢":
self.trans[char] = u"juan"
for char in u"撅攫抉掘倔爵觉决诀绝":
self.trans[char] = u"jue"
for char in u"均菌钧军君峻俊竣浚郡骏":
self.trans[char] = u"jun"
for char in u"喀咖卡咯":
self.trans[char] = u"ka"
for char in u"开揩楷凯慨":
self.trans[char] = u"kai"
for char in u"刊堪勘坎砍看":
self.trans[char] = u"kan"
for char in u"康慷糠扛抗亢炕":
self.trans[char] = u"kang"
for char in u"考拷烤靠":
self.trans[char] = u"kao"
for char in u"坷苛柯棵磕颗科壳咳可渴克刻客课":
self.trans[char] = u"ke"
for char in u"肯啃垦恳":
self.trans[char] = u"ken"
for char in u"坑吭":
self.trans[char] = u"keng"
for char in u"空恐孔控":
self.trans[char] = u"kong"
for char in u"抠口扣寇":
self.trans[char] = u"kou"
for char in u"枯哭窟苦酷库裤":
self.trans[char] = u"ku"
for char in u"夸垮挎跨胯":
self.trans[char] = u"kua"
for char in u"块筷侩快":
self.trans[char] = u"kuai"
for char in u"宽款":
self.trans[char] = u"kuan"
for char in u"匡筐狂框矿眶旷况":
self.trans[char] = u"kuang"
for char in u"亏盔岿窥葵奎魁傀馈愧溃":
self.trans[char] = u"kui"
for char in u"坤昆捆困":
self.trans[char] = u"kun"
for char in u"括扩廓阔":
self.trans[char] = u"kuo"
for char in u"垃拉喇蜡腊辣啦":
self.trans[char] = u"la"
for char in u"莱来赖":
self.trans[char] = u"lai"
for char in u"蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥":
self.trans[char] = u"lan"
for char in u"琅榔狼廊郎朗浪":
self.trans[char] = u"lang"
for char in u"捞劳牢老佬姥酪烙涝":
self.trans[char] = u"lao"
for char in u"勒乐":
self.trans[char] = u"le"
for char in u"雷镭蕾磊累儡垒擂肋类泪":
self.trans[char] = u"lei"
for char in u"棱楞冷":
self.trans[char] = u"leng"
for char in u"厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利傈例俐痢立粒沥隶力璃哩":
self.trans[char] = u"li"
for char in u"俩":
self.trans[char] = u"lia"
for char in u"联莲连镰廉怜涟帘敛脸链恋炼练":
self.trans[char] = u"lian"
for char in u"粮凉梁粱良两辆量晾亮谅":
self.trans[char] = u"liang"
for char in u"撩聊僚疗燎寥辽潦了撂镣廖料":
self.trans[char] = u"liao"
for char in u"列裂烈劣猎":
self.trans[char] = u"lie"
for char in u"琳林磷霖临邻鳞淋凛赁吝拎":
self.trans[char] = u"lin"
for char in u"玲菱零龄铃伶羚凌灵陵岭领另令":
self.trans[char] = u"ling"
for char in u"溜琉榴硫馏留刘瘤流柳六":
self.trans[char] = u"liu"
for char in u"龙聋咙笼窿隆垄拢陇":
self.trans[char] = u"long"
for char in u"楼娄搂篓漏陋":
self.trans[char] = u"lou"
for char in u"芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮泸":
self.trans[char] = u"lu"
for char in u"峦挛孪滦卵乱":
self.trans[char] = u"luan"
for char in u"掠略":
self.trans[char] = u"lue"
for char in u"抡轮伦仑沦纶论":
self.trans[char] = u"lun"
for char in u"萝螺罗逻锣箩骡裸落洛骆络漯":
self.trans[char] = u"luo"
for char in u"驴吕铝侣旅履屡缕虑氯律率滤绿":
self.trans[char] = u"lv"
for char in u"妈麻玛码蚂马骂嘛吗":
self.trans[char] = u"ma"
for char in u"埋买麦卖迈脉":
self.trans[char] = u"mai"
for char in u"瞒馒蛮满蔓曼慢漫谩":
self.trans[char] = u"man"
for char in u"芒茫盲氓忙莽":
self.trans[char] = u"mang"
for char in u"猫茅锚毛矛铆卯茂冒帽貌贸":
self.trans[char] = u"mao"
for char in u"么":
self.trans[char] = u"me"
for char in u"玫枚梅酶霉煤没眉媒镁每美昧寐妹媚":
self.trans[char] = u"mei"
for char in u"门闷们":
self.trans[char] = u"men"
for char in u"萌蒙檬盟锰猛梦孟":
self.trans[char] = u"meng"
for char in u"眯醚靡糜迷谜弥米秘觅泌蜜密幂":
self.trans[char] = u"mi"
for char in u"棉眠绵冕免勉娩缅面":
self.trans[char] = u"mian"
for char in u"苗描瞄藐秒渺庙妙":
self.trans[char] = u"miao"
for char in u"蔑灭":
self.trans[char] = u"mie"
for char in u"民抿皿敏悯闽":
self.trans[char] = u"min"
for char in u"明螟鸣铭名命":
self.trans[char] = u"ming"
for char in u"谬":
self.trans[char] = u"miu"
for char in u"摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌":
self.trans[char] = u"mo"
for char in u"谋牟某":
self.trans[char] = u"mou"
for char in u"拇牡亩姆母墓暮幕募慕木目睦牧穆":
self.trans[char] = u"mu"
for char in u"拿哪呐钠那娜纳":
self.trans[char] = u"na"
for char in u"氖乃奶耐奈":
self.trans[char] = u"nai"
for char in u"南男难":
self.trans[char] = u"nan"
for char in u"囊":
self.trans[char] = u"nang"
for char in u"挠脑恼闹淖":
self.trans[char] = u"nao"
for char in u"呢":
self.trans[char] = u"ne"
for char in u"馁内":
self.trans[char] = u"nei"
for char in u"嫩":
self.trans[char] = u"nen"
for char in u"能":
self.trans[char] = u"neng"
for char in u"妮霓倪泥尼拟你匿腻逆溺":
self.trans[char] = u"ni"
for char in u"蔫拈年碾撵捻念":
self.trans[char] = u"nian"
for char in u"娘酿":
self.trans[char] = u"niang"
for char in u"鸟尿":
self.trans[char] = u"niao"
for char in u"捏聂孽啮镊镍涅":
self.trans[char] = u"nie"
for char in u"您":
self.trans[char] = u"nin"
for char in u"柠狞凝宁拧泞":
self.trans[char] = u"ning"
for char in u"牛扭钮纽":
self.trans[char] = u"niu"
for char in u"脓浓农弄":
self.trans[char] = u"nong"
for char in u"奴努怒":
self.trans[char] = u"nu"
for char in u"暖":
self.trans[char] = u"nuan"
for char in u"虐疟":
self.trans[char] = u"nue"
for char in u"挪懦糯诺":
self.trans[char] = u"nuo"
for char in u"女":
self.trans[char] = u"nv"
for char in u"哦":
self.trans[char] = u"o"
for char in u"欧鸥殴藕呕偶沤":
self.trans[char] = u"ou"
for char in u"啪趴爬帕怕琶":
self.trans[char] = u"pa"
for char in u"拍排牌徘湃派":
self.trans[char] = u"pai"
for char in u"攀潘盘磐盼畔判叛":
self.trans[char] = u"pan"
for char in u"乓庞旁耪胖":
self.trans[char] = u"pang"
for char in u"抛咆刨炮袍跑泡":
self.trans[char] = u"pao"
for char in u"呸胚培裴赔陪配佩沛":
self.trans[char] = u"pei"
for char in u"喷盆":
self.trans[char] = u"pen"
for char in u"砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰":
self.trans[char] = u"peng"
for char in u"坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬":
self.trans[char] = u"pi"
for char in u"篇偏片骗":
self.trans[char] = u"pian"
for char in u"飘漂瓢票":
self.trans[char] = u"piao"
for char in u"撇瞥":
self.trans[char] = u"pie"
for char in u"拼频贫品聘":
self.trans[char] = u"pin"
for char in u"乒坪苹萍平凭瓶评屏":
self.trans[char] = u"ping"
for char in u"坡泼颇婆破魄迫粕剖":
self.trans[char] = u"po"
for char in u"扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑濮":
self.trans[char] = u"pu"
for char in u"期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄弃汽泣讫":
self.trans[char] = u"qi"
for char in u"掐恰洽":
self.trans[char] = u"qia"
for char in u"牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉":
self.trans[char] = u"qian"
for char in u"枪呛腔羌墙蔷强抢":
self.trans[char] = u"qiang"
for char in u"橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍":
self.trans[char] = u"qiao"
for char in u"切茄且怯窃":
self.trans[char] = u"qie"
for char in u"钦侵亲秦琴勤芹擒禽寝沁":
self.trans[char] = u"qin"
for char in u"青轻氢倾卿清擎晴氰情顷请庆":
self.trans[char] = u"qing"
for char in u"琼穷":
self.trans[char] = u"qiong"
for char in u"秋丘邱球求囚酋泅":
self.trans[char] = u"qiu"
for char in u"趋区蛆曲躯屈驱渠取娶龋趣去":
self.trans[char] = u"qu"
for char in u"圈颧权醛泉全痊拳犬券劝":
self.trans[char] = u"quan"
for char in u"缺炔瘸却鹊榷确雀":
self.trans[char] = u"que"
for char in u"裙群":
self.trans[char] = u"qun"
for char in u"然燃冉染":
self.trans[char] = u"ran"
for char in u"瓤壤攘嚷让":
self.trans[char] = u"rang"
for char in u"饶扰绕":
self.trans[char] = u"rao"
for char in u"惹热":
self.trans[char] = u"re"
for char in u"壬仁人忍韧任认刃妊纫":
self.trans[char] = u"ren"
for char in u"扔仍":
self.trans[char] = u"reng"
for char in u"日":
self.trans[char] = u"ri"
for char in u"戎茸蓉荣融熔溶容绒冗":
self.trans[char] = u"rong"
for char in u"揉柔肉":
self.trans[char] = u"rou"
for char in u"茹蠕儒孺如辱乳汝入褥":
self.trans[char] = u"ru"
for char in u"软阮":
self.trans[char] = u"ruan"
for char in u"蕊瑞锐":
self.trans[char] = u"rui"
for char in u"闰润":
self.trans[char] = u"run"
for char in u"若弱":
self.trans[char] = u"ruo"
for char in u"撒洒萨":
self.trans[char] = u"sa"
for char in u"腮鳃塞赛":
self.trans[char] = u"sai"
for char in u"三叁伞散":
self.trans[char] = u"san"
for char in u"桑嗓丧":
self.trans[char] = u"sang"
for char in u"搔骚扫嫂":
self.trans[char] = u"sao"
for char in u"瑟色涩":
self.trans[char] = u"se"
for char in u"森":
self.trans[char] = u"sen"
for char in u"僧":
self.trans[char] = u"seng"
for char in u"莎砂杀刹沙纱傻啥煞":
self.trans[char] = u"sha"
for char in u"筛晒":
self.trans[char] = u"shai"
for char in u"珊苫杉山删煽衫闪陕擅赡膳善汕扇缮":
self.trans[char] = u"shan"
for char in u"墒伤商赏晌上尚裳":
self.trans[char] = u"shang"
for char in u"梢捎稍烧芍勺韶少哨邵绍":
self.trans[char] = u"shao"
for char in u"奢赊蛇舌舍赦摄射慑涉社设":
self.trans[char] = u"she"
for char in u"砷申呻伸身深娠绅神沈审婶甚肾慎渗":
self.trans[char] = u"shen"
for char in u"声生甥牲升绳省盛剩胜圣":
self.trans[char] = u"sheng"
for char in u"师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝势是嗜噬适仕侍释饰氏市恃室视试":
self.trans[char] = u"shi"
for char in u"收手首守寿授售受瘦兽":
self.trans[char] = u"shou"
for char in u"蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕":
self.trans[char] = u"shu"
for char in u"刷耍":
self.trans[char] = u"shua"
for char in u"摔衰甩帅":
self.trans[char] = u"shuai"
for char in u"栓拴":
self.trans[char] = u"shuan"
for char in u"霜双爽":
self.trans[char] = u"shuang"
for char in u"谁水睡税":
self.trans[char] = u"shui"
for char in u"吮瞬顺舜":
self.trans[char] = u"shun"
for char in u"说硕朔烁":
self.trans[char] = u"shuo"
for char in u"斯撕嘶思私司丝死肆寺嗣四伺似饲巳":
self.trans[char] = u"si"
for char in u"松耸怂颂送宋讼诵":
self.trans[char] = u"song"
for char in u"搜艘擞":
self.trans[char] = u"sou"
for char in u"嗽苏酥俗素速粟僳塑溯宿诉肃":
self.trans[char] = u"su"
for char in u"酸蒜算":
self.trans[char] = u"suan"
for char in u"虽隋随绥髓碎岁穗遂隧祟":
self.trans[char] = u"sui"
for char in u"孙损笋":
self.trans[char] = u"sun"
for char in u"蓑梭唆缩琐索锁所":
self.trans[char] = u"suo"
for char in u"塌他它她塔獭挞蹋踏":
self.trans[char] = u"ta"
for char in u"胎苔抬台泰酞太态汰":
self.trans[char] = u"tai"
for char in u"坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭":
self.trans[char] = u"tan"
for char in u"汤塘搪堂棠膛唐糖倘躺淌趟烫":
self.trans[char] = u"tang"
for char in u"掏涛滔绦萄桃逃淘陶讨套":
self.trans[char] = u"tao"
for char in u"特":
self.trans[char] = u"te"
for char in u"藤腾疼誊":
self.trans[char] = u"teng"
for char in u"梯剔踢锑提题蹄啼体替嚏惕涕剃屉":
self.trans[char] = u"ti"
for char in u"兲天添填田甜恬舔腆":
self.trans[char] = u"tian"
for char in u"挑条迢眺跳":
self.trans[char] = u"tiao"
for char in u"贴铁帖":
self.trans[char] = u"tie"
for char in u"厅听烃汀廷停亭庭挺艇":
self.trans[char] = u"ting"
for char in u"通桐酮瞳同铜彤童桶捅筒统痛":
self.trans[char] = u"tong"
for char in u"偷投头透":
self.trans[char] = u"tou"
for char in u"凸秃突图徒途涂屠土吐兔":
self.trans[char] = u"tu"
for char in u"湍团":
self.trans[char] = u"tuan"
for char in u"推颓腿蜕褪退":
self.trans[char] = u"tui"
for char in u"吞屯臀":
self.trans[char] = u"tun"
for char in u"拖托脱鸵陀驮驼椭妥拓唾":
self.trans[char] = u"tuo"
for char in u"挖哇蛙洼娃瓦袜":
self.trans[char] = u"wa"
for char in u"歪外":
self.trans[char] = u"wai"
for char in u"豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕莞":
self.trans[char] = u"wan"
for char in u"汪王亡枉网往旺望忘妄":
self.trans[char] = u"wang"
for char in u"威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫":
self.trans[char] = u"wei"
for char in u"瘟温蚊文闻纹吻稳紊问":
self.trans[char] = u"wen"
for char in u"嗡翁瓮":
self.trans[char] = u"weng"
for char in u"挝蜗涡窝我斡卧握沃":
self.trans[char] = u"wo"
for char in u"巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误":
self.trans[char] = u"wu"
for char in u"昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系隙戏细":
self.trans[char] = u"xi"
for char in u"瞎虾匣霞辖暇峡侠狭下厦夏吓":
self.trans[char] = u"xia"
for char in u"掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线":
self.trans[char] = u"xian"
for char in u"相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象":
self.trans[char] = u"xiang"
for char in u"萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效":
self.trans[char] = u"xiao"
for char in u"楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑":
self.trans[char] = u"xie"
for char in u"薪芯锌欣辛新忻心信衅":
self.trans[char] = u"xin"
for char in u"星腥猩惺兴刑型形邢行醒幸杏性姓":
self.trans[char] = u"xing"
for char in u"兄凶胸匈汹雄熊":
self.trans[char] = u"xiong"
for char in u"休修羞朽嗅锈秀袖绣":
self.trans[char] = u"xiu"
for char in u"墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续":
self.trans[char] = u"xu"
for char in u"轩喧宣悬旋玄选癣眩绚":
self.trans[char] = u"xuan"
for char in u"靴薛学穴雪血":
self.trans[char] = u"xue"
for char in u"勋熏循旬询寻驯巡殉汛训讯逊迅":
self.trans[char] = u"xun"
for char in u"压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶":
self.trans[char] = u"ya"
for char in u"焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验":
self.trans[char] = u"yan"
for char in u"殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾":
self.trans[char] = u"yang"
for char in u"邀腰妖瑶摇尧遥窑谣姚咬舀药要耀":
self.trans[char] = u"yao"
for char in u"椰噎耶爷野冶也页掖业叶曳腋夜液":
self.trans[char] = u"ye"
for char in u"一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎":
self.trans[char] = u"yi"
for char in u"茵荫因殷音阴姻吟银淫寅饮尹引隐印":
self.trans[char] = u"yin"
for char in u"英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映":
self.trans[char] = u"ying"
for char in u"哟":
self.trans[char] = u"yo"
for char in u"拥佣臃痈庸雍踊蛹咏泳涌永恿勇用":
self.trans[char] = u"yong"
for char in u"幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂":
self.trans[char] = u"you"
for char in u"淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻峪御愈欲狱育誉浴寓裕预豫驭":
self.trans[char] = u"yu"
for char in u"鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院":
self.trans[char] = u"yuan"
for char in u"曰约越跃钥岳粤月悦阅":
self.trans[char] = u"yue"
for char in u"耘云郧匀陨允运蕴酝晕韵孕":
self.trans[char] = u"yun"
for char in u"匝砸杂":
self.trans[char] = u"za"
for char in u"栽哉灾宰载再在":
self.trans[char] = u"zai"
for char in u"咱攒暂赞":
self.trans[char] = u"zan"
for char in u"赃脏葬":
self.trans[char] = u"zang"
for char in u"遭糟凿藻枣早澡蚤躁噪造皂灶燥":
self.trans[char] = u"zao"
for char in u"责择则泽":
self.trans[char] = u"ze"
for char in u"贼":
self.trans[char] = u"zei"
for char in u"怎":
self.trans[char] = u"zen"
for char in u"增憎曾赠":
self.trans[char] = u"zeng"
for char in u"扎喳渣札轧铡闸眨栅榨咋乍炸诈":
self.trans[char] = u"zha"
for char in u"摘斋宅窄债寨":
self.trans[char] = u"zhai"
for char in u"瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽":
self.trans[char] = u"zhan"
for char in u"樟章彰漳张掌涨杖丈帐账仗胀瘴障":
self.trans[char] = u"zhang"
for char in u"招昭找沼赵照罩兆肇召":
self.trans[char] = u"zhao"
for char in u"遮折哲蛰辙者锗蔗这浙":
self.trans[char] = u"zhe"
for char in u"珍斟真甄砧臻贞针侦枕疹诊震振镇阵圳":
self.trans[char] = u"zhen"
for char in u"蒸挣睁征狰争怔整拯正政帧症郑证":
self.trans[char] = u"zheng"
for char in u"芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置帜峙制智秩稚质炙痔滞治窒":
self.trans[char] = u"zhi"
for char in u"中盅忠钟衷终种肿重仲众":
self.trans[char] = u"zhong"
for char in u"舟周州洲诌粥轴肘帚咒皱宙昼骤":
self.trans[char] = u"zhou"
for char in u"珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻":
self.trans[char] = u"zhu"
for char in u"抓爪":
self.trans[char] = u"zhua"
for char in u"拽":
self.trans[char] = u"zhuai"
for char in u"专砖转撰赚篆":
self.trans[char] = u"zhuan"
for char in u"桩庄装妆撞壮状":
self.trans[char] = u"zhuang"
for char in u"椎锥追赘坠缀":
self.trans[char] = u"zhui"
for char in u"谆准":
self.trans[char] = u"zhun"
for char in u"捉拙卓桌琢茁酌啄着灼浊":
self.trans[char] = u"zhuo"
for char in u"兹咨资姿滋淄孜紫仔籽滓子自渍字":
self.trans[char] = u"zi"
for char in u"鬃棕踪宗综总纵":
self.trans[char] = u"zong"
for char in u"邹走奏揍":
self.trans[char] = u"zou"
for char in u"租足卒族祖诅阻组":
self.trans[char] = u"zu"
for char in u"钻纂":
self.trans[char] = u"zuan"
for char in u"嘴醉最罪":
self.trans[char] = u"zui"
for char in u"尊遵":
self.trans[char] = u"zun"
for char in u"昨左佐柞做作坐座":
self.trans[char] = u"zuo"
# from: https://www.wikidata.org/wiki/MediaWiki:Gadget-SimpleTransliterate.js
self.trans[u"ଂ"] = "anusvara"
self.trans[u"ઇ"] = "i"
self.trans[u"എ"] = "e"
self.trans[u"ગ"] = "ga"
self.trans[u"ਜ"] = "ja"
self.trans[u"ഞ"] = "nya"
self.trans[u"ଢ"] = "ddha"
self.trans[u"ધ"] = "dha"
self.trans[u"ਬ"] = "ba"
self.trans[u"മ"] = "ma"
self.trans[u"ଲ"] = "la"
self.trans[u"ષ"] = "ssa"
self.trans[u"਼"] = "nukta"
self.trans[u"ാ"] = "aa"
self.trans[u"ୂ"] = "uu"
self.trans[u"ે"] = "e"
self.trans[u"ੌ"] = "au"
self.trans[u"ൎ"] = "reph"
self.trans[u"ੜ"] = "rra"
self.trans[u"՞"] = "?"
self.trans[u"ୢ"] = "l"
self.trans[u"૧"] = "1"
self.trans[u"੬"] = "6"
self.trans[u"൮"] = "8"
self.trans[u"୲"] = "quarter"
self.trans[u"ൾ"] = "ll"
self.trans[u"ਇ"] = "i"
self.trans[u"ഉ"] = "u"
self.trans[u"ઌ"] = "l"
self.trans[u"ਗ"] = "ga"
self.trans[u"ങ"] = "nga"
self.trans[u"ଝ"] = "jha"
self.trans[u"જ"] = "ja"
self.trans[u"؟"] = "?"
self.trans[u"ਧ"] = "dha"
self.trans[u"ഩ"] = "nnna"
self.trans[u"ଭ"] = "bha"
self.trans[u"બ"] = "ba"
self.trans[u"ഹ"] = "ha"
self.trans[u"ଽ"] = "avagraha"
self.trans[u"઼"] = "nukta"
self.trans[u"ੇ"] = "ee"
self.trans[u"୍"] = "virama"
self.trans[u"ૌ"] = "au"
self.trans[u"੧"] = "1"
self.trans[u"൩"] = "3"
self.trans[u"୭"] = "7"
self.trans[u"૬"] = "6"
self.trans[u"൹"] = "mark"
self.trans[u"ਖ਼"] = "khha"
self.trans[u"ਂ"] = "bindi"
self.trans[u"ഈ"] = "ii"
self.trans[u"ઍ"] = "e"
self.trans[u"ଌ"] = "l"
self.trans[u"ഘ"] = "gha"
self.trans[u"ઝ"] = "jha"
self.trans[u"ଡ଼"] = "rra"
self.trans[u"ਢ"] = "ddha"
self.trans[u"ന"] = "na"
self.trans[u"ભ"] = "bha"
self.trans[u"ବ"] = "ba"
self.trans[u"ਲ"] = "la"
self.trans[u"സ"] = "sa"
self.trans[u"ઽ"] = "avagraha"
self.trans[u"଼"] = "nukta"
self.trans[u"ੂ"] = "uu"
self.trans[u"ൈ"] = "ai"
self.trans[u"્"] = "virama"
self.trans[u"ୌ"] = "au"
self.trans[u"൨"] = "2"
self.trans[u"૭"] = "7"
self.trans[u"୬"] = "6"
self.trans[u"ੲ"] = "iri"
self.trans[u"ഃ"] = "visarga"
self.trans[u"ં"] = "anusvara"
self.trans[u"ଇ"] = "i"
self.trans[u"ഓ"] = "oo"
self.trans[u"ଗ"] = "ga"
self.trans[u"ਝ"] = "jha"
self.trans[u"?"] = "?"
self.trans[u"ണ"] = "nna"
self.trans[u"ઢ"] = "ddha"
self.trans[u"ଧ"] = "dha"
self.trans[u"ਭ"] = "bha"
self.trans[u"ള"] = "lla"
self.trans[u"લ"] = "la"
self.trans[u"ଷ"] = "ssa"
self.trans[u"ൃ"] = "r"
self.trans[u"ૂ"] = "uu"
self.trans[u"େ"] = "e"
self.trans[u"੍"] = "virama"
self.trans[u"ୗ"] = "mark"
self.trans[u"ൣ"] = "ll"
self.trans[u"ૢ"] = "l"
self.trans[u"୧"] = "1"
self.trans[u"੭"] = "7"
self.trans[u"൳"] = "1/4"
self.trans[u"୷"] = "sixteenths"
self.trans[u"ଆ"] = "aa"
self.trans[u"ઋ"] = "r"
self.trans[u"ഊ"] = "uu"
self.trans[u"ਐ"] = "ai"
self.trans[u"ଖ"] = "kha"
self.trans[u"છ"] = "cha"
self.trans[u"ച"] = "ca"
self.trans[u"ਠ"] = "ttha"
self.trans[u"ଦ"] = "da"
self.trans[u"ફ"] = "pha"
self.trans[u"പ"] = "pa"
self.trans[u"ਰ"] = "ra"
self.trans[u"ଶ"] = "sha"
self.trans[u"ഺ"] = "ttta"
self.trans[u"ੀ"] = "ii"
self.trans[u"ો"] = "o"
self.trans[u"ൊ"] = "o"
self.trans[u"ୖ"] = "mark"
self.trans[u"୦"] = "0"
self.trans[u"૫"] = "5"
self.trans[u"൪"] = "4"
self.trans[u"ੰ"] = "tippi"
self.trans[u"୶"] = "eighth"
self.trans[u"ൺ"] = "nn"
self.trans[u"ଁ"] = "candrabindu"
self.trans[u"അ"] = "a"
self.trans[u"ઐ"] = "ai"
self.trans[u"ക"] = "ka"
self.trans[u"ਸ਼"] = "sha"
self.trans[u"ਛ"] = "cha"
self.trans[u"ଡ"] = "dda"
self.trans[u"ઠ"] = "ttha"
self.trans[u"ഥ"] = "tha"
self.trans[u"ਫ"] = "pha"
self.trans[u"ર"] = "ra"
self.trans[u"വ"] = "va"
self.trans[u"ୁ"] = "u"
self.trans[u"ી"] = "ii"
self.trans[u"ੋ"] = "oo"
self.trans[u"ૐ"] = "om"
self.trans[u"ୡ"] = "ll"
self.trans[u"ૠ"] = "rr"
self.trans[u"੫"] = "5"
self.trans[u"ୱ"] = "wa"
self.trans[u"૰"] = "sign"
self.trans[u"൵"] = "quarters"
self.trans[u"ਫ਼"] = "fa"
self.trans[u"ઁ"] = "candrabindu"
self.trans[u"ਆ"] = "aa"
self.trans[u"ઑ"] = "o"
self.trans[u"ଐ"] = "ai"
self.trans[u"ഔ"] = "au"
self.trans[u"ਖ"] = "kha"
self.trans[u"ડ"] = "dda"
self.trans[u"ଠ"] = "ttha"
self.trans[u"ത"] = "ta"
self.trans[u"ਦ"] = "da"
self.trans[u"ର"] = "ra"
self.trans[u"ഴ"] = "llla"
self.trans[u"ુ"] = "u"
self.trans[u"ୀ"] = "ii"
self.trans[u"ൄ"] = "rr"
self.trans[u"ૡ"] = "ll"
self.trans[u"ୠ"] = "rr"
self.trans[u"੦"] = "0"
self.trans[u"૱"] = "sign"
self.trans[u"୰"] = "isshar"
self.trans[u"൴"] = "1/2"
self.trans[u"ਁ"] = "bindi"
self.trans[u"આ"] = "aa"
self.trans[u"ଋ"] = "r"
self.trans[u"ഏ"] = "ee"
self.trans[u"ખ"] = "kha"
self.trans[u"ଛ"] = "cha"
self.trans[u"ട"] = "tta"
self.trans[u"ਡ"] = "dda"
self.trans[u"દ"] = "da"
self.trans[u"ଫ"] = "pha"
self.trans[u"യ"] = "ya"
self.trans[u"શ"] = "sha"
self.trans[u"ി"] = "i"
self.trans[u"ੁ"] = "u"
self.trans[u"ୋ"] = "o"
self.trans[u"ੑ"] = "udaat"
self.trans[u"૦"] = "0"
self.trans[u"୫"] = "5"
self.trans[u"൯"] = "9"
self.trans[u"ੱ"] = "addak"
self.trans[u"ൿ"] = "k"
self.trans[u"ആ"] = "aa"
self.trans[u"ଊ"] = "uu"
self.trans[u"એ"] = "e"
self.trans[u"ਔ"] = "au"
self.trans[u"ഖ"] = "kha"
self.trans[u"ଚ"] = "ca"
self.trans[u"ટ"] = "tta"
self.trans[u"ਤ"] = "ta"
self.trans[u"ദ"] = "da"
self.trans[u"ପ"] = "pa"
self.trans[u"ય"] = "ya"
self.trans[u"ശ"] = "sha"
self.trans[u"િ"] = "i"
self.trans[u"െ"] = "e"
self.trans[u"൦"] = "0"
self.trans[u"୪"] = "4"
self.trans[u"૯"] = "9"
self.trans[u"ੴ"] = "onkar"
self.trans[u"ଅ"] = "a"
self.trans[u"ਏ"] = "ee"
self.trans[u"କ"] = "ka"
self.trans[u"ઔ"] = "au"
self.trans[u"ਟ"] = "tta"
self.trans[u"ഡ"] = "dda"
self.trans[u"ଥ"] = "tha"
self.trans[u"ત"] = "ta"
self.trans[u"ਯ"] = "ya"
self.trans[u"റ"] = "rra"
self.trans[u"ଵ"] = "va"
self.trans[u"ਿ"] = "i"
self.trans[u"ു"] = "u"
self.trans[u"ૄ"] = "rr"
self.trans[u"ൡ"] = "ll"
self.trans[u"੯"] = "9"
self.trans[u"൱"] = "100"
self.trans[u"୵"] = "sixteenth"
self.trans[u"અ"] = "a"
self.trans[u"ਊ"] = "uu"
self.trans[u"ഐ"] = "ai"
self.trans[u"ક"] = "ka"
self.trans[u"ଔ"] = "au"
self.trans[u"ਚ"] = "ca"
self.trans[u"ഠ"] = "ttha"
self.trans[u"થ"] = "tha"
self.trans[u"ତ"] = "ta"
self.trans[u"ਪ"] = "pa"
self.trans[u"ര"] = "ra"
self.trans[u"વ"] = "va"
self.trans[u"ീ"] = "ii"
self.trans[u"ૅ"] = "e"
self.trans[u"ୄ"] = "rr"
self.trans[u"ൠ"] = "rr"
self.trans[u"ਜ਼"] = "za"
self.trans[u"੪"] = "4"
self.trans[u"൰"] = "10"
self.trans[u"୴"] = "quarters"
self.trans[u"ਅ"] = "a"
self.trans[u"ഋ"] = "r"
self.trans[u"ઊ"] = "uu"
self.trans[u"ଏ"] = "e"
self.trans[u"ਕ"] = "ka"
self.trans[u"ഛ"] = "cha"
self.trans[u"ચ"] = "ca"
self.trans[u"ଟ"] = "tta"
self.trans[u"ਥ"] = "tha"
self.trans[u"ഫ"] = "pha"
self.trans[u"પ"] = "pa"
self.trans[u"ଯ"] = "ya"
self.trans[u"ਵ"] = "va"
self.trans[u"ି"] = "i"
self.trans[u"ോ"] = "oo"
self.trans[u"ୟ"] = "yya"
self.trans[u"൫"] = "5"
self.trans[u"૪"] = "4"
self.trans[u"୯"] = "9"
self.trans[u"ੵ"] = "yakash"
self.trans[u"ൻ"] = "n"
self.trans[u"ઃ"] = "visarga"
self.trans[u"ം"] = "anusvara"
self.trans[u"ਈ"] = "ii"
self.trans[u"ઓ"] = "o"
self.trans[u"ഒ"] = "o"
self.trans[u"ਘ"] = "gha"
self.trans[u"ଞ"] = "nya"
self.trans[u"ણ"] = "nna"
self.trans[u"ഢ"] = "ddha"
self.trans[u"ਲ਼"] = "lla"
self.trans[u"ਨ"] = "na"
self.trans[u"ମ"] = "ma"
self.trans[u"ળ"] = "lla"
self.trans[u"ല"] = "la"
self.trans[u"ਸ"] = "sa"
self.trans[u"¿"] = "?"
self.trans[u"ା"] = "aa"
self.trans[u"ૃ"] = "r"
self.trans[u"ൂ"] = "uu"
self.trans[u"ੈ"] = "ai"
self.trans[u"ૣ"] = "ll"
self.trans[u"ൢ"] = "l"
self.trans[u"੨"] = "2"
self.trans[u"୮"] = "8"
self.trans[u"൲"] = "1000"
self.trans[u"ਃ"] = "visarga"
self.trans[u"ଉ"] = "u"
self.trans[u"ઈ"] = "ii"
self.trans[u"ਓ"] = "oo"
self.trans[u"ଙ"] = "nga"
self.trans[u"ઘ"] = "gha"
self.trans[u"ഝ"] = "jha"
self.trans[u"ਣ"] = "nna"
self.trans[u"ન"] = "na"
self.trans[u"ഭ"] = "bha"
self.trans[u"ଜ"] = "ja"
self.trans[u"ହ"] = "ha"
self.trans[u"સ"] = "sa"
self.trans[u"ഽ"] = "avagraha"
self.trans[u"ૈ"] = "ai"
self.trans[u"്"] = "virama"
self.trans[u"୩"] = "3"
self.trans[u"૨"] = "2"
self.trans[u"൭"] = "7"
self.trans[u"ੳ"] = "ura"
self.trans[u"ൽ"] = "l"
self.trans[u"ઉ"] = "u"
self.trans[u"ଈ"] = "ii"
self.trans[u"ഌ"] = "l"
self.trans[u"ઙ"] = "nga"
self.trans[u"ଘ"] = "gha"
self.trans[u"ജ"] = "ja"
self.trans[u"ਞ"] = "nya"
self.trans[u"ନ"] = "na"
self.trans[u"ബ"] = "ba"
self.trans[u"ਮ"] = "ma"
self.trans[u"હ"] = "ha"
self.trans[u"ସ"] = "sa"
self.trans[u"ਾ"] = "aa"
self.trans[u"ૉ"] = "o"
self.trans[u"ୈ"] = "ai"
self.trans[u"ൌ"] = "au"
self.trans[u"૩"] = "3"
self.trans[u"୨"] = "2"
self.trans[u"൬"] = "6"
self.trans[u"੮"] = "8"
self.trans[u"ർ"] = "rr"
self.trans[u"ଃ"] = "visarga"
self.trans[u"ഇ"] = "i"
self.trans[u"ਉ"] = "u"
self.trans[u"ଓ"] = "o"
self.trans[u"ഗ"] = "ga"
self.trans[u"ਙ"] = "nga"
self.trans[u"ઞ"] = "nya"
self.trans[u"ଣ"] = "nna"
self.trans[u"ധ"] = "dha"
self.trans[u"મ"] = "ma"
self.trans[u"ଳ"] = "lla"
self.trans[u"ഷ"] = "ssa"
self.trans[u"ਹ"] = "ha"
self.trans[u"ਗ਼"] = "ghha"
self.trans[u"ા"] = "aa"
self.trans[u"ୃ"] = "r"
self.trans[u"േ"] = "ee"
self.trans[u"ൗ"] = "mark"
self.trans[u"ଢ଼"] = "rha"
self.trans[u"ୣ"] = "ll"
self.trans[u"൧"] = "1"
self.trans[u"੩"] = "3"
self.trans[u"૮"] = "8"
self.trans[u"୳"] = "half"
for char in self.trans:
value = self.trans[char]
if value == "?":
continue
while value.encode(encoding, 'replace').decode(encoding) == "?" and value in self.trans:
assert value != self.trans[value], "%r == self.trans[%r]!" % (value, value)
value = self.trans[value]
self.trans[char] = value
def transliterate(self, char, default="?", prev="-", next="-"):
"""
Transliterate the character.
@param char: The character to transliterate.
@type char: str
@param default: The character used when there is no transliteration.
@type default: str
@param prev: The previous character
@type prev: str
@param next: The next character
@type next: str
@return: The transliterated character which may be an empty string
@rtype: str
"""
if char in self.trans:
return self.trans[char]
# Arabic
if char == u"◌":
return prev
# Japanese
if char == u"ッ":
return self.transliterate(next)[0]
if char in u"々仝ヽヾゝゞ〱〲〳〵〴〵":
return prev
# Lao
if char == u"ຫ":
if next in u"ງຍນຣລຼຼວ":
return ""
else:
return "h"
return default
```
#### File: pywikibot-core/scripts/image.py
```python
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: b99f28d7bcaa362f192a3abec1aea87d5ee4d740 $'
#
import re
import pywikibot
from pywikibot import i18n, pagegenerators, Bot
from scripts.replace import ReplaceRobot as ReplaceBot
class ImageRobot(ReplaceBot):
"""This bot will replace or remove all occurrences of an old image."""
# Summary messages for replacing images
msg_replace = {
'ar': u'روبوت - استبدال الصورة %s مع %s',
'de': u'Bot: Ersetze Bild %s durch %s',
'en': u'Bot: Replacing image %s with %s',
'es': u'Robot - Reemplazando imagen %s por %s',
'fa': u'ربات: جایگزین کردن تصویر %s با %s',
'fr': u'Bot: Remplace image %s par %s',
'he': u'בוט: מחליף את התמונה %s בתמונה %s',
'it': u"Bot: Sostituisco l'immagine %s con %s",
'ja': u'ロボットによる:画像置き換え %s から %s へ',
'ko': u'로봇 - 그림 %s을 %s로 치환',
'lt': u'robotas: vaizdas %s keičiamas į %s',
'nn': u'robot: erstatta biletet %s med %s',
'no': u'robot: erstatter bildet %s med %s',
'nl': u'Bot: afbeelding %s vervangen door %s',
'pl': u'Robot zamienia obraz %s na %s',
'pt': u'Bot: Alterando imagem %s para %s',
'ru': u'Бот: Замена файла %s на %s',
'zh': u'機器人:取代圖像 %s 至 %s',
}
# Summary messages for removing images
msg_remove = {
'ar': u'روبوت - إزالة الصورة %s',
'de': u'Bot: Entferne Bild %s',
'en': u'Robot: Removing image %s',
'es': u'Robot - Retirando imagen %s',
'fa': u'ربات: برداشتن تصویر %s',
'fr': u'Bot: Enleve image %s',
'he': u'בוט: מסיר את התמונה %s',
'it': u"Bot: Rimuovo l'immagine %s",
'ja': u'ロボットによる:画像削除 %s',
'ko': u'로봇 - %s 그림을 제거',
'lt': u'robotas: Šalinamas vaizdas %s',
'nl': u'Bot: afbeelding %s verwijderd',
'no': u'robot: fjerner bildet %s',
'nn': u'robot: fjerna biletet %s',
'pl': u'Robot usuwa obraz %s',
'pt': u'Bot: Alterando imagem %s',
'ru': u'Бот: удалил файл %s',
'zh': u'機器人:移除圖像 %s',
}
def __init__(self, generator, old_image, new_image=None, **kwargs):
"""
Constructor.
@param generator: the pages to work on
@type generator: iterable
@param old_image: the title of the old image (without namespace)
@type old_image: unicode
@param new_image: the title of the new image (without namespace), or
None if you want to remove the image
@type new_image: unicode or None
"""
self.availableOptions.update({
'summary': None,
'loose': False,
})
Bot.__init__(self, generator=generator, **kwargs)
self.old_image = old_image
self.new_image = new_image
if not self.getOption('summary'):
self.options['summary'] = i18n.translate(
self.site, self.msg_replace,
(self.old_image, self.new_image) if self.new_image
else self.old_image,
fallback=True)
# regular expression to find the original template.
# {{vfd}} does the same thing as {{Vfd}}, so both will be found.
# The old syntax, {{msg:vfd}}, will also be found.
# The group 'parameters' will either match the parameters, or an
# empty string if there are none.
replacements = []
namespace = self.site.namespaces[6]
if namespace.case == 'first-letter':
case = re.escape(self.old_image[0].upper() +
self.old_image[0].lower())
escaped = '[' + case + ']' + re.escape(self.old_image[1:])
else:
escaped = re.escape(self.old_image)
# Be careful, spaces and _ have been converted to '\ ' and '\_'
escaped = re.sub('\\\\[_ ]', '[_ ]', escaped)
if not self.getOption('loose') or not self.new_image:
image_regex = re.compile(
r'\[\[ *(?:%s)\s*:\s*%s *(?P<parameters>\|[^\n]+|) *\]\]'
% ('|'.join(namespace), escaped))
else:
image_regex = re.compile(r'' + escaped)
if self.new_image:
if not self.getOption('loose'):
replacements.append((image_regex,
u'[[%s:%s\\g<parameters>]]'
% (self.site.namespaces.FILE,
self.new_image)))
else:
replacements.append((image_regex, self.new_image))
else:
replacements.append((image_regex, ''))
super(ImageRobot, self).__init__(self.generator, replacements,
always=self.getOption('always'),
summary=self.getOption('summary'))
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
old_image = None
new_image = None
options = {}
for arg in pywikibot.handle_args(args):
if arg == '-always':
options['always'] = True
elif arg == '-loose':
options['loose'] = True
elif arg.startswith('-summary'):
if len(arg) == len('-summary'):
options['summary'] = pywikibot.input(u'Choose an edit summary: ')
else:
options['summary'] = arg[len('-summary:'):]
elif old_image:
new_image = arg
else:
old_image = arg
if old_image:
site = pywikibot.Site()
old_imagepage = pywikibot.FilePage(site, old_image)
gen = pagegenerators.FileLinksGenerator(old_imagepage)
preloadingGen = pagegenerators.PreloadingGenerator(gen)
bot = ImageRobot(preloadingGen, old_image, new_image, **options)
bot.run()
return True
else:
pywikibot.bot.suggest_help(missing_parameters=['old image'])
return False
if __name__ == "__main__":
main()
```
#### File: pywikibot-core/scripts/movepages.py
```python
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 0a1174b1cde335df259e520369bf76f62f4a8da0 $'
#
import re
import pywikibot
from pywikibot.exceptions import ArgumentDeprecationWarning
from pywikibot.tools import issue_deprecation_warning
from pywikibot import i18n, pagegenerators
from pywikibot.bot import MultipleSitesBot
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
class MovePagesBot(MultipleSitesBot):
"""Page move bot."""
def __init__(self, generator, **kwargs):
"""Constructor."""
self.availableOptions.update({
'prefix': None,
'noredirect': False,
'movetalkpage': True,
'skipredirects': False,
'summary': None,
})
super(MovePagesBot, self).__init__(**kwargs)
self.generator = generator
self.appendAll = False
self.regexAll = False
self.noNamespace = False
def moveOne(self, page, newPageTitle):
"""Move on page to newPageTitle."""
try:
msg = self.getOption('summary')
if not msg:
msg = i18n.twtranslate(page.site, 'movepages-moving')
pywikibot.output(u'Moving page %s to [[%s]]'
% (page.title(asLink=True),
newPageTitle))
page.move(newPageTitle, reason=msg, movetalkpage=self.getOption('movetalkpage'),
deleteAndMove=self.getOption('noredirect'))
except pywikibot.PageRelatedError as error:
pywikibot.output(error)
def treat(self, page):
"""Treat a single page."""
self.current_page = page
if self.getOption('skipredirects') and page.isRedirectPage():
pywikibot.output(u'Page %s is a redirect; skipping.' % page.title())
return
pagetitle = page.title(withNamespace=False)
namesp = page.site.namespace(page.namespace())
if self.appendAll:
newPageTitle = (u'%s%s%s'
% (self.pagestart, pagetitle, self.pageend))
if not self.noNamespace and namesp:
newPageTitle = (u'%s:%s' % (namesp, newPageTitle))
elif self.regexAll:
newPageTitle = self.regex.sub(self.replacePattern, pagetitle)
if not self.noNamespace and namesp:
newPageTitle = (u'%s:%s' % (namesp, newPageTitle))
if self.getOption('prefix'):
newPageTitle = (u'%s%s' % (self.getOption('prefix'), pagetitle))
if self.getOption('prefix') or self.appendAll or self.regexAll:
if self.user_confirm('Change the page title to "%s"?'
% newPageTitle):
self.moveOne(page, newPageTitle)
else:
choice = pywikibot.input_choice(u'What do you want to do?',
[('change page name', 'c'),
('append to page name', 'a'),
('use a regular expression', 'r'),
('next page', 'n')])
if choice == 'c':
newPageTitle = pywikibot.input(u'New page name:')
self.moveOne(page, newPageTitle)
elif choice == 'a':
self.pagestart = pywikibot.input(u'Append this to the start:')
self.pageend = pywikibot.input(u'Append this to the end:')
newPageTitle = (u'%s%s%s'
% (self.pagestart, pagetitle, self.pageend))
if namesp:
if pywikibot.input_yn(u'Do you want to remove the '
'namespace prefix "%s:"?' % namesp,
automatic_quit=False):
self.noNamespace = True
else:
newPageTitle = (u'%s:%s' % (namesp, newPageTitle))
choice2 = pywikibot.input_choice(
u'Change the page title to "%s"?'
% newPageTitle, [('yes', 'y'), ('no', 'n'), ('all', 'a')])
if choice2 == 'y':
self.moveOne(page, newPageTitle)
elif choice2 == 'a':
self.appendAll = True
self.moveOne(page, newPageTitle)
elif choice == 'r':
searchPattern = pywikibot.input(u'Enter the search pattern:')
self.replacePattern = pywikibot.input(
u'Enter the replace pattern:')
self.regex = re.compile(searchPattern)
if page.title() == page.title(withNamespace=False):
newPageTitle = self.regex.sub(self.replacePattern,
page.title())
else:
if pywikibot.input_yn(u'Do you want to remove the '
'namespace prefix "%s:"?' % namesp,
automatic_quit=False):
newPageTitle = self.regex.sub(
self.replacePattern, page.title(withNamespace=False))
self.noNamespace = True
else:
newPageTitle = self.regex.sub(self.replacePattern,
page.title())
choice2 = pywikibot.input_choice(
u'Change the page title to "%s"?'
% newPageTitle, [('yes', 'y'), ('no', 'n'), ('all', 'a')])
if choice2 == 'y':
self.moveOne(page, newPageTitle)
elif choice2 == 'a':
self.regexAll = True
self.moveOne(page, newPageTitle)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
gen = None
oldName = None
options = {}
fromToPairs = []
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if arg.startswith('-pairs'):
issue_deprecation_warning(
'-pairs',
'-pairsfile',
2, ArgumentDeprecationWarning)
elif arg.startswith('-pairsfile'):
if len(arg) == len('-pairsfile'):
filename = pywikibot.input(
u'Enter the name of the file containing pairs:')
else:
filename = arg[len('-pairsfile:'):]
oldName1 = None
for page in pagegenerators.TextfilePageGenerator(filename):
if oldName1:
fromToPairs.append([oldName1, page.title()])
oldName1 = None
else:
oldName1 = page.title()
if oldName1:
pywikibot.warning(
u'file %s contains odd number of links' % filename)
elif arg == '-noredirect':
options['noredirect'] = True
elif arg == '-notalkpage':
options['movetalkpage'] = False
elif arg == '-always':
options['always'] = True
elif arg == '-skipredirects':
options['skipredirects'] = True
elif arg.startswith('-from:'):
if oldName:
pywikibot.warning(u'-from:%s without -to:' % oldName)
oldName = arg[len('-from:'):]
elif arg.startswith('-to:'):
if oldName:
fromToPairs.append([oldName, arg[len('-to:'):]])
oldName = None
else:
pywikibot.warning(u'%s without -from' % arg)
elif arg.startswith('-prefix'):
if len(arg) == len('-prefix'):
options['prefix'] = pywikibot.input(u'Enter the prefix:')
else:
options['prefix'] = arg[8:]
elif arg.startswith('-summary'):
if len(arg) == len('-summary'):
options['summary'] = pywikibot.input(u'Enter the summary:')
else:
options['summary'] = arg[9:]
else:
genFactory.handleArg(arg)
if oldName:
pywikibot.warning(u'-from:%s without -to:' % oldName)
site = pywikibot.Site()
for pair in fromToPairs:
page = pywikibot.Page(site, pair[0])
bot = MovePagesBot(None, **options)
bot.moveOne(page, pair[1])
if not gen:
gen = genFactory.getCombinedGenerator()
if gen:
preloadingGen = pagegenerators.PreloadingGenerator(gen)
bot = MovePagesBot(preloadingGen, **options)
bot.run()
return True
if not fromToPairs:
pywikibot.bot.suggest_help(missing_generator=True)
return False
else:
return True
if __name__ == '__main__':
main()
```
#### File: pywikibot-core/tests/category_bot_tests.py
```python
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 293679b0346c1ceead92c540a46a5229d06cc334 $'
from scripts.category import CategoryMoveRobot
from tests.aspects import unittest, DefaultSiteTestCase
class CfdActions(DefaultSiteTestCase):
"""Test CFD (Categories for deletion) actions."""
def test_strip_cfd_templates_does_nothing_when_no_templates(self):
"""Test that when there are no CFD templates, the page text is not changed."""
bot = CategoryMoveRobot(oldcat='Old', newcat='New')
bot.newcat.text = "Nothing should change.\n\nAnother line."
bot._strip_cfd_templates(commit=False)
self.assertEqual(bot.newcat.text,
"Nothing should change.\n\nAnother line.")
def test_strip_cfd_templates_with_spaces_in_comments(self):
"""Test that CFD templates with spaces in the syntax are removed properly."""
self._runtest_strip_cfd_templates('<!-- BEGIN CFD TEMPLATE -->',
'<!-- END CFD TEMPLATE -->')
def test_strip_cfd_templates_without_spaces_in_comments(self):
"""Test that CFD templates without spaces in the syntax are removed properly."""
self._runtest_strip_cfd_templates('<!--BEGIN CFD TEMPLATE-->',
'<!--END CFD TEMPLATE-->')
def _runtest_strip_cfd_templates(self, template_start, template_end):
"""Run a CFD template stripping test with the given CFD start/end templates."""
bot = CategoryMoveRobot(oldcat='Old', newcat='New')
bot.newcat.text = '\n'.join((
'Preamble',
template_start,
'Random text inside template',
'Even another template: {{cfr-speedy}}',
template_end,
'Footer stuff afterwards',
'',
'[[Category:Should remain]]'
))
expected = '\n'.join((
'Preamble',
'Footer stuff afterwards',
'',
'[[Category:Should remain]]'
))
bot._strip_cfd_templates(commit=False)
self.assertEqual(bot.newcat.text, expected)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
```
#### File: pywikibot-core/tests/tools_tests.py
```python
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: ed039b5863e3a6b5155667775080e2916e82da76 $'
import collections
import decimal
import inspect
import os.path
import subprocess
import tempfile
import warnings
from pywikibot import tools
from tests import join_xml_data_path
from tests.aspects import (
unittest, require_modules, DeprecationTestCase, TestCase, MetaTestCaseClass
)
from tests.utils import expected_failure_if, add_metaclass
class ContextManagerWrapperTestCase(TestCase):
"""Test that ContextManagerWrapper is working correctly."""
class DummyClass(object):
"""A dummy class which has some values and a close method."""
class_var = 42
def __init__(self):
"""Create instance with dummy values."""
self.instance_var = 1337
self.closed = False
def close(self):
"""Just store that it has been closed."""
self.closed = True
net = False
def test_wrapper(self):
"""Create a test instance and verify the wrapper redirects."""
obj = self.DummyClass()
wrapped = tools.ContextManagerWrapper(obj)
self.assertIs(wrapped.class_var, obj.class_var)
self.assertIs(wrapped.instance_var, obj.instance_var)
self.assertIs(wrapped._wrapped, obj)
self.assertFalse(obj.closed)
with wrapped as unwrapped:
self.assertFalse(obj.closed)
self.assertIs(unwrapped, obj)
unwrapped.class_var = 47
self.assertTrue(obj.closed)
self.assertEqual(wrapped.class_var, 47)
def test_exec_wrapper(self):
"""Check that the wrapper permits exceptions."""
wrapper = tools.ContextManagerWrapper(self.DummyClass())
self.assertFalse(wrapper.closed)
with self.assertRaises(ZeroDivisionError):
with wrapper:
1 / 0
self.assertTrue(wrapper.closed)
class OpenArchiveTestCase(TestCase):
"""
Unit test class for tools.
The tests for open_archive requires that article-pyrus.xml* contain all
the same content after extraction. The content itself is not important.
The file article-pyrus.xml_invalid.7z is not a valid 7z file and
open_archive will fail extracting it using 7za.
"""
net = False
@classmethod
def setUpClass(cls):
"""Define base_file and original_content."""
super(OpenArchiveTestCase, cls).setUpClass()
cls.base_file = join_xml_data_path('article-pyrus.xml')
with open(cls.base_file, 'rb') as f:
cls.original_content = f.read()
def _get_content(self, *args, **kwargs):
"""Use open_archive and return content using a with-statement."""
with tools.open_archive(*args, **kwargs) as f:
return f.read()
def test_open_archive_normal(self):
"""Test open_archive with no compression in the standard library."""
self.assertEqual(self._get_content(self.base_file), self.original_content)
def test_open_archive_bz2(self):
"""Test open_archive with bz2 compressor in the standard library."""
self.assertEqual(self._get_content(self.base_file + '.bz2'), self.original_content)
self.assertEqual(self._get_content(self.base_file + '.bz2', use_extension=False),
self.original_content)
@require_modules('bz2file')
def test_open_archive_with_bz2file(self):
"""Test open_archive when bz2file library."""
old_bz2 = tools.bz2
try:
tools.bz2 = __import__('bz2file')
self.assertEqual(self._get_content(self.base_file + '.bz2'),
self.original_content)
self.assertEqual(self._get_content(self.base_file + '.bz2',
use_extension=False),
self.original_content)
finally:
tools.bz2 = old_bz2
def test_open_archive_without_bz2(self):
"""Test open_archive when bz2 and bz2file are not available."""
old_bz2 = tools.bz2
try:
tools.bz2 = ImportError()
self.assertRaises(ImportError, self._get_content, self.base_file + '.bz2')
finally:
tools.bz2 = old_bz2
def test_open_archive_gz(self):
"""Test open_archive with gz compressor in the standard library."""
self.assertEqual(self._get_content(self.base_file + '.gz'), self.original_content)
def test_open_archive_7z(self):
"""Test open_archive with 7za if installed."""
try:
subprocess.Popen(['7za'], stdout=subprocess.PIPE).stdout.close()
except OSError:
raise unittest.SkipTest('7za not installed')
self.assertEqual(self._get_content(self.base_file + '.7z'), self.original_content)
self.assertRaises(OSError, self._get_content, self.base_file + '_invalid.7z',
use_extension=True)
class OpenCompressedTestCase(OpenArchiveTestCase, DeprecationTestCase):
"""Test opening files with the deprecated open_compressed."""
net = False
def _get_content(self, *args, **kwargs):
"""Use open_compressed and return content using a with-statement."""
# open_archive default is True, so if it's False it's not the default
# so use the non-default of open_compressed (which is True)
if kwargs.get('use_extension') is False:
kwargs['use_extension'] = True
with tools.open_compressed(*args, **kwargs) as f:
content = f.read()
self.assertOneDeprecation(self.INSTEAD)
return content
class OpenArchiveWriteTestCase(TestCase):
"""Test writing with open_archive."""
net = False
@classmethod
def setUpClass(cls):
"""Define base_file and original_content."""
super(OpenArchiveWriteTestCase, cls).setUpClass()
cls.base_file = join_xml_data_path('article-pyrus.xml')
with open(cls.base_file, 'rb') as f:
cls.original_content = f.read()
def _write_content(self, suffix):
try:
fh, fn = tempfile.mkstemp(suffix)
with tools.open_archive(fn, 'wb') as f:
f.write(self.original_content)
with tools.open_archive(fn, 'rb') as f:
self.assertEqual(f.read(), self.original_content)
with open(fn, 'rb') as f:
return f.read()
finally:
os.close(fh)
os.remove(fn)
def test_invalid_modes(self):
"""Test various invalid mode configurations."""
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'ra') # two modes besides
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'rt') # text mode
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'br') # binary at front
self.assertRaises(ValueError, tools.open_archive,
'/dev/null', 'wb', False) # writing without extension
def test_binary_mode(self):
"""Test that it uses binary mode."""
with tools.open_archive(self.base_file, 'r') as f:
self.assertEqual(f.mode, 'rb')
self.assertIsInstance(f.read(), bytes)
def test_write_archive_bz2(self):
"""Test writing a bz2 archive."""
content = self._write_content('.bz2')
with open(self.base_file + '.bz2', 'rb') as f:
self.assertEqual(content, f.read())
def test_write_archive_gz(self):
"""Test writing a gz archive."""
content = self._write_content('.gz')
self.assertEqual(content[:3], b'\x1F\x8B\x08')
def test_write_archive_7z(self):
"""Test writing an archive as a 7z archive."""
self.assertRaises(NotImplementedError, tools.open_archive,
'/dev/null.7z', mode='wb')
class MergeUniqueDicts(TestCase):
"""Test merge_unique_dicts."""
net = False
dct1 = {'foo': 'bar', '42': 'answer'}
dct2 = {47: 'Star', 74: 'Trek'}
dct_both = dct1.copy()
dct_both.update(dct2)
def test_single(self):
"""Test that it returns the dict itself when there is only one."""
self.assertEqual(tools.merge_unique_dicts(self.dct1), self.dct1)
self.assertEqual(tools.merge_unique_dicts(**self.dct1), self.dct1)
def test_multiple(self):
"""Test that it actually merges dicts."""
self.assertEqual(tools.merge_unique_dicts(self.dct1, self.dct2),
self.dct_both)
self.assertEqual(tools.merge_unique_dicts(self.dct2, **self.dct1),
self.dct_both)
def test_different_type(self):
"""Test that the keys can be different types."""
self.assertEqual(tools.merge_unique_dicts({'1': 'str'}, {1: 'int'}),
{'1': 'str', 1: 'int'})
def test_conflict(self):
"""Test that it detects conflicts."""
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, **{'42': 'bad'})
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, self.dct1)
self.assertRaisesRegex(
ValueError, '42', tools.merge_unique_dicts, self.dct1, **self.dct1)
def passthrough(x):
"""Return x."""
return x
class SkipList(set):
"""Container that ignores items."""
skip_list = [1, 3]
def __contains__(self, item):
"""Override to not process some items."""
if item in self.skip_list:
return True
else:
return super(SkipList, self).__contains__(item)
class ProcessAgainList(set):
"""Container that keeps processing certain items."""
process_again_list = [1, 3]
def add(self, item):
"""Override to not add some items."""
if item in self.process_again_list:
return
else:
return super(ProcessAgainList, self).add(item)
class ContainsStopList(set):
"""Container that stops when encountering items."""
stop_list = []
def __contains__(self, item):
"""Override to stop on encountering items."""
if item in self.stop_list:
raise StopIteration
else:
return super(ContainsStopList, self).__contains__(item)
class AddStopList(set):
"""Container that stops when encountering items."""
stop_list = []
def add(self, item):
"""Override to not continue on encountering items."""
if item in self.stop_list:
raise StopIteration
else:
super(AddStopList, self).add(item)
class TestFilterUnique(TestCase):
"""Test filter_unique."""
net = False
ints = [1, 3, 2, 1, 2, 1, 2, 4, 2]
strs = [str(i) for i in ints]
decs = [decimal.Decimal(i) for i in ints]
def _test_dedup_int(self, deduped, deduper, key=None):
"""Test filter_unique results for int."""
if not key:
key = passthrough
self.assertEqual(len(deduped), 0)
self.assertEqual(next(deduper), 1)
self.assertEqual(next(deduper), 3)
if key in (hash, passthrough):
if isinstance(deduped, tools.OrderedDict):
self.assertEqual(list(deduped.keys()), [1, 3])
elif isinstance(deduped, collections.Mapping):
self.assertCountEqual(list(deduped.keys()), [1, 3])
else:
self.assertEqual(deduped, set([1, 3]))
self.assertEqual(next(deduper), 2)
self.assertEqual(next(deduper), 4)
if key in (hash, passthrough):
if isinstance(deduped, tools.OrderedDict):
self.assertEqual(list(deduped.keys()), [1, 3, 2, 4])
elif isinstance(deduped, collections.Mapping):
self.assertCountEqual(list(deduped.keys()), [1, 2, 3, 4])
else:
self.assertEqual(deduped, set([1, 2, 3, 4]))
self.assertRaises(StopIteration, next, deduper)
def _test_dedup_str(self, deduped, deduper, key=None):
"""Test filter_unique results for str."""
if not key:
key = passthrough
self.assertEqual(len(deduped), 0)
self.assertEqual(next(deduper), '1')
self.assertEqual(next(deduper), '3')
if key in (hash, passthrough):
if isinstance(deduped, collections.Mapping):
self.assertEqual(deduped.keys(), [key('1'), key('3')])
else:
self.assertEqual(deduped, set([key('1'), key('3')]))
self.assertEqual(next(deduper), '2')
self.assertEqual(next(deduper), '4')
if key in (hash, passthrough):
if isinstance(deduped, collections.Mapping):
self.assertEqual(deduped.keys(), [key(i) for i in self.strs])
else:
self.assertEqual(deduped, set(key(i) for i in self.strs))
self.assertRaises(StopIteration, next, deduper)
def test_set(self):
"""Test filter_unique with a set."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_dict(self):
"""Test filter_unique with a dict."""
deduped = dict()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_OrderedDict(self):
"""Test filter_unique with a OrderedDict."""
deduped = tools.OrderedDict()
deduper = tools.filter_unique(self.ints, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_int_hash(self):
"""Test filter_unique with ints using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped, key=hash)
self._test_dedup_int(deduped, deduper, hash)
def test_int_id(self):
"""Test filter_unique with ints using id as key."""
deduped = set()
deduper = tools.filter_unique(self.ints, container=deduped, key=id)
self._test_dedup_int(deduped, deduper, id)
def test_obj(self):
"""Test filter_unique with objects."""
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped)
self._test_dedup_int(deduped, deduper)
def test_obj_hash(self):
"""Test filter_unique with objects using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped, key=hash)
self._test_dedup_int(deduped, deduper, hash)
@unittest.expectedFailure
def test_obj_id(self):
"""Test filter_unique with objects using id as key, which fails."""
# Two objects which may be equal do not have the same id.
deduped = set()
deduper = tools.filter_unique(self.decs, container=deduped, key=id)
self._test_dedup_int(deduped, deduper, id)
def test_str(self):
"""Test filter_unique with str."""
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped)
self._test_dedup_str(deduped, deduper)
def test_str_hash(self):
"""Test filter_unique with str using hash as key."""
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped, key=hash)
self._test_dedup_str(deduped, deduper, hash)
@expected_failure_if(not tools.PY2)
def test_str_id(self):
"""Test str using id as key fails on Python 3."""
# str in Python 3 behave like objects.
deduped = set()
deduper = tools.filter_unique(self.strs, container=deduped, key=id)
self._test_dedup_str(deduped, deduper, id)
def test_for_resumable(self):
"""Test filter_unique is resumable after a for loop."""
gen2 = tools.filter_unique(self.ints)
deduped = []
for item in gen2:
deduped.append(item)
if len(deduped) == 3:
break
self.assertEqual(deduped, [1, 3, 2])
last = next(gen2)
self.assertEqual(last, 4)
self.assertRaises(StopIteration, next, gen2)
def test_skip(self):
"""Test filter_unique with a container that skips items."""
deduped = SkipList()
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([2, 4]))
def test_process_again(self):
"""Test filter_unique with an ignoring container."""
deduped = ProcessAgainList()
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertEqual(deduped_out, [1, 3, 2, 1, 1, 4])
self.assertEqual(deduped, set([2, 4]))
def test_stop(self):
"""Test filter_unique with an ignoring container."""
deduped = ContainsStopList()
deduped.stop_list = [2]
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([1, 3]))
# And it should not resume
self.assertRaises(StopIteration, next, deduper)
deduped = AddStopList()
deduped.stop_list = [4]
deduper = tools.filter_unique(self.ints, container=deduped)
deduped_out = list(deduper)
self.assertCountEqual(deduped, deduped_out)
self.assertEqual(deduped, set([1, 2, 3]))
# And it should not resume
self.assertRaises(StopIteration, next, deduper)
class MetaTestArgSpec(MetaTestCaseClass):
"""Metaclass to create dynamically the tests. Set the net flag to false."""
def __new__(cls, name, bases, dct):
"""Create a new test case class."""
def create_test(method):
def test_method(self):
"""Test getargspec."""
# all expect at least self and param
expected = method(1, 2)
returned = self.getargspec(method)
self.assertEqual(returned, expected)
self.assertIsInstance(returned, self.expected_class)
self.assertNoDeprecation()
return test_method
for attr, tested_method in list(dct.items()):
if attr.startswith('_method_test_'):
suffix = attr[len('_method_test_'):]
cls.add_method(dct, 'test_method_' + suffix,
create_test(tested_method),
doc_suffix='on {0}'.format(suffix))
dct['net'] = False
return super(MetaTestArgSpec, cls).__new__(cls, name, bases, dct)
@add_metaclass
class TestArgSpec(DeprecationTestCase):
"""Test getargspec and ArgSpec from tools."""
__metaclass__ = MetaTestArgSpec
expected_class = tools.ArgSpec
def _method_test_args(self, param):
"""Test method with two positional arguments."""
return (['self', 'param'], None, None, None)
def _method_test_kwargs(self, param=42):
"""Test method with one positional and one keyword argument."""
return (['self', 'param'], None, None, (42,))
def _method_test_varargs(self, param, *var):
"""Test method with two positional arguments and var args."""
return (['self', 'param'], 'var', None, None)
def _method_test_varkwargs(self, param, **var):
"""Test method with two positional arguments and var kwargs."""
return (['self', 'param'], None, 'var', None)
def _method_test_vars(self, param, *args, **kwargs):
"""Test method with two positional arguments and both var args."""
return (['self', 'param'], 'args', 'kwargs', None)
def getargspec(self, method):
"""Call tested getargspec function."""
return tools.getargspec(method)
@unittest.skipIf(tools.PYTHON_VERSION >= (3, 6), 'removed in Python 3.6')
class TestPythonArgSpec(TestArgSpec):
"""Test the same tests using Python's implementation."""
expected_class = inspect.ArgSpec
def getargspec(self, method):
"""Call inspect's getargspec function."""
with warnings.catch_warnings():
if tools.PYTHON_VERSION >= (3, 5):
warnings.simplefilter('ignore', DeprecationWarning)
return inspect.getargspec(method)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
``` |
{
"source": "jkjt/ezdxf",
"score": 2
} |
#### File: examples/render/dimension_arc.py
```python
from typing import Optional
import pathlib
import math
import ezdxf
from ezdxf.math import Vec3, UCS, ConstructionArc
import logging
# ========================================
# Setup logging
# ========================================
logging.basicConfig(level="WARNING")
# ========================================
# Setup your preferred output directory
# ========================================
OUTDIR = pathlib.Path("~/Desktop/Outbox").expanduser()
if not OUTDIR.exists():
OUTDIR = pathlib.Path()
# ========================================
# Default text attributes
# ========================================
TEXT_ATTRIBS = {
"height": 0.25,
"style": ezdxf.options.default_dimension_text_style,
}
DIM_TEXT_STYLE = ezdxf.options.default_dimension_text_style
# =======================================================
# Discarding dimension rendering is possible
# for BricsCAD, but is incompatible to AutoCAD -> error
# =======================================================
BRICSCAD = False
DXFVERSION = "R2013"
def add_lines(
msp, center: Vec3, radius: float, start_angle: float, end_angle: float
):
attribs = {"color": 7}
start_point = center + Vec3.from_deg_angle(start_angle) * radius
end_point = center + Vec3.from_deg_angle(end_angle) * radius
msp.add_line(center, start_point, dxfattribs=attribs)
msp.add_line(center, end_point, dxfattribs=attribs)
def add_arc(
msp, center: Vec3, radius: float, start_angle: float, end_angle: float
):
attribs = {"color": 7}
msp.add_arc(center, radius, start_angle, end_angle, dxfattribs=attribs)
def arc_cra_default(
distance: float,
filename: str,
show_angle=True,
override: dict = None,
):
doc = ezdxf.new(DXFVERSION, setup=True)
msp = doc.modelspace()
radius = 5
data = [
[Vec3(0, 0), 60, 120],
[Vec3(10, 0), 300, 240],
[Vec3(20, 0), 240, 300],
[Vec3(30, 0), 300, 30],
]
if override is None:
override = dict()
for dimtad, offset in [(1, (0, 20)), (0, (0, 0)), (4, (0, -20))]:
for center, start_angle, end_angle in data:
center += Vec3(offset)
override["dimtad"] = dimtad
if show_angle:
add_lines(msp, center, radius, start_angle, end_angle)
add_arc(msp, center, radius, start_angle, end_angle)
# Default DimStyle EZ_CURVED:
# - angle units = degree
# - scale 1: 100
# - closed filled arrow, size = 0.25
# - text location above dimension line
# - arc symbol is disabled
#
# center:
# center of angle
# radius:
# distance from center to the start of the extension lines
# distance:
# distance from start of the extension lines to the dimension line
# start_angle:
# start angle in degrees
# end_angle:
# end angle in degrees
dim = msp.add_arc_dim_cra(
center=center,
radius=radius,
start_angle=start_angle,
end_angle=end_angle,
distance=distance,
override=override,
)
# Necessary second step, to create the BLOCK entity with the DIMENSION
# geometry. Ezdxf supports DXF R2000 attributes for DXF R12 rendering,
# but they have to be applied by the DIMSTYLE override feature, this
# additional attributes are not stored in the XDATA section of the
# DIMENSION entity, they are just used to render the DIMENSION entity.
# The return value `dim` is not a DIMENSION entity, instead a
# DimStyleOverride object is returned, the DIMENSION entity is stored
# as dim.dimension, see also ezdxf.override.DimStyleOverride class.
dim.render(discard=BRICSCAD)
doc.set_modelspace_vport(height=70)
doc.saveas(OUTDIR / f"{filename}_{DXFVERSION}.dxf")
def arc_cra_default_outside():
"""Outside means: the dimension line is farther away from the center than
the extension line start points.
"""
arc_cra_default(
distance=2.0,
filename="dim_arc_cra_outside",
show_angle=True,
)
def arc_cra_default_outside_fixed_extension_length():
"""Outside means: the dimension line is farther away from the center than
the extension line start points.
"""
arc_cra_default(
distance=2.0,
filename="dim_arc_cra_outside_fxl",
show_angle=True,
override={
"dimfxlon": 1, # use fixed length extension lines
"dimexe": 0.5, # length "above" the dimension line
"dimfxl": 1.0, # length "below" the dimension line
},
)
def arc_cra_default_inside():
"""Inside means: the dimension line is closer to the center than
the extension line start points.
"""
arc_cra_default(
distance=-2.0,
filename="dim_arc_cra_inside",
show_angle=False,
)
def arc_cra_default_inside_fixed_extension_length():
"""Inside means: the dimension line is closer to the center than
the extension line start points.
"""
arc_cra_default(
distance=-2.0,
filename="dim_arc_cra_inside_fxl",
show_angle=False,
override={
"dimfxlon": 1, # use fixed length extension lines
"dimexe": 0.5, # length "above" the dimension line
"dimfxl": 1.0, # length "below" the dimension line
},
)
def arc_3p_default(distance: float = 2.0):
doc = ezdxf.new(DXFVERSION, setup=True)
msp = doc.modelspace()
radius = 5
data = [
[Vec3(0, 0), 60, 120],
[Vec3(10, 0), 300, 240],
[Vec3(20, 0), 240, 300],
]
for dimtad, offset in [(1, (0, 20)), (0, (0, 0)), (4, (0, -20))]:
for center, start_angle, end_angle in data:
center += Vec3(offset)
dir1 = Vec3.from_deg_angle(start_angle)
dir2 = Vec3.from_deg_angle(end_angle)
# calculate defpoints from parameters of the "cra" example:
p1 = center + dir1 * radius
p2 = center + dir2 * radius
base = center + dir1.lerp(dir2) * (radius + distance)
add_lines(msp, center, radius, start_angle, end_angle)
add_arc(msp, center, radius, start_angle, end_angle)
msp.add_arc_dim_3p(
base,
center,
p1,
p2,
override={
"dimtad": dimtad,
"dimtxt": 1,
"dimasz": 1,
"dimgap": 0.25,
},
).render(discard=BRICSCAD)
doc.set_modelspace_vport(height=70)
doc.saveas(OUTDIR / f"dim_arc_3p_{DXFVERSION}.dxf")
def dim_arc_3d():
doc = ezdxf.new(DXFVERSION, setup=True)
msp = doc.modelspace()
for center, radius, sa, ea, distance in [
[Vec3(0, 0), 5, 60, 90, 2]
]:
arc = ConstructionArc(center, radius, sa, ea)
ucs = UCS(origin=center + (5, 5)).rotate_local_x(math.radians(45))
msp.add_line(arc.center, arc.start_point).transform(ucs.matrix)
msp.add_line(arc.center, arc.end_point).transform(ucs.matrix)
dim = msp.add_arc_dim_arc(
arc=arc,
distance=distance, dimstyle="EZ_CURVED"
)
dim.render(discard=BRICSCAD, ucs=ucs)
doc.set_modelspace_vport(height=30)
doc.saveas(OUTDIR / f"dim_arc_3d_{DXFVERSION}.dxf")
def arc_units_tol_limits():
doc = ezdxf.new(DXFVERSION, setup=True)
msp = doc.modelspace()
radius = 5
distance = 2
data = [
[Vec3(0, 0), 60, 120, 0, 0],
[Vec3(10, 0), 300, 240, 0, 0],
[Vec3(20, 0), 240, 300, 1, 0], # tolerance
[Vec3(30, 0), 300, 30, 0, 1], # limits
]
for dimaunit, offset in [
[0, Vec3(0, 0)],
[1, Vec3(0, 20)],
[2, Vec3(0, 40)],
[3, Vec3(0, 60)],
]:
for center, start_angle, end_angle, dimtol, dimlim in data:
center += offset
add_lines(msp, center, radius, start_angle, end_angle)
dim = msp.add_arc_dim_cra(
center,
radius,
start_angle,
end_angle,
distance,
override={
"dimaunit": dimaunit,
"dimtol": dimtol,
"dimtp": 1.0,
"dimtm": 2.0,
"dimlim": dimlim,
},
)
dim.render(discard=BRICSCAD)
doc.set_modelspace_vport(height=70)
doc.saveas(OUTDIR / f"dim_arc_units_tol_limits_{DXFVERSION}.dxf")
def add_arc_dim(
msp,
center: Vec3,
angle: float,
delta: float,
radius: float,
distance: float,
text_rotation: Optional[float],
override: dict,
):
start_angle = angle - delta
end_angle = angle + delta
add_lines(msp, center, radius, start_angle, end_angle)
dim = msp.add_arc_dim_cra(
center,
radius,
start_angle,
end_angle,
distance,
text_rotation=text_rotation,
override=override,
)
return dim
def measure_fixed_angle(angle: float):
doc = ezdxf.new(DXFVERSION, setup=True)
msp = doc.modelspace()
x_dist = 15
for dimtad, y_dist in [[0, 0], [1, 20], [4, 40]]:
for count in range(8):
dim = add_arc_dim(
msp,
center=Vec3(x_dist * count, y_dist),
angle=45.0 * count,
delta=angle / 2.0,
radius=3.0,
distance=1.0,
text_rotation=None,
override={"dimtad": dimtad},
)
dim.render(discard=BRICSCAD)
doc.set_modelspace_vport(height=100, center=(x_dist * 4, 20))
doc.saveas(OUTDIR / f"dim_arc_deg_{angle:.0f}_{DXFVERSION}.dxf")
def usr_location_absolute(angle: float, rotation: float = None):
doc = ezdxf.new(DXFVERSION, setup=True)
msp = doc.modelspace()
x_dist = 15
radius = 3.0
distance = 1.0
for dimtad, y_dist, leader in [
[0, 0, False],
[0, 20, True],
[4, 40, True],
]:
for count in range(8):
center = Vec3(x_dist * count, y_dist)
main_angle = 45.0 * count
dim = add_arc_dim(
msp,
center=center,
angle=main_angle,
delta=angle / 2.0,
radius=radius,
distance=distance,
text_rotation=rotation,
override={"dimtad": dimtad},
)
# user location in WCS coordinates, absolut location:
usr_location = center + Vec3.from_deg_angle(
main_angle, radius + distance * 2.0
)
dim.set_location(usr_location, leader=leader)
dim.render(discard=BRICSCAD)
doc.set_modelspace_vport(height=100, center=(x_dist * 4, 40))
rstr = ""
if rotation is not None:
rstr = f"rot_{rotation}_"
doc.saveas(OUTDIR / f"dim_arc_usr_loc_absolute_{rstr}_{DXFVERSION}.dxf")
def usr_location_relative(angle: float, rotation: float = None):
doc = ezdxf.new(DXFVERSION, setup=True)
msp = doc.modelspace()
x_dist = 10
radius = 3.0
distance = 1.0
for dimtad, y_dist, leader in [
[0, 0, False],
[0, 10, True],
[4, 20, True],
]:
for count in range(8):
center = Vec3(x_dist * count, y_dist)
main_angle = 45.0 * count
dim = add_arc_dim(
msp,
center=center,
angle=main_angle,
delta=angle / 2.0,
radius=radius,
distance=distance,
text_rotation=rotation,
override={"dimtad": dimtad},
)
# user location relative to center of dimension line:
usr_location = Vec3.from_deg_angle(main_angle, 2.0)
dim.set_location(usr_location, leader=leader, relative=True)
dim.render(discard=BRICSCAD)
doc.set_modelspace_vport(height=100, center=(x_dist * 4, 40))
rstr = ""
if rotation is not None:
rstr = f"rot_{rotation}_"
doc.saveas(OUTDIR / f"dim_arc_usr_loc_relative_{rstr}_{DXFVERSION}.dxf")
def show_all_arrow_heads():
doc = ezdxf.new(DXFVERSION, setup=True)
msp = doc.modelspace()
x_dist = 4.0
y_dist = 5.0
for x, arrow_name in enumerate(sorted(ezdxf.ARROWS.__all_arrows__)):
for y, angle in enumerate((3.0, 30.0)):
center = Vec3(x * x_dist, y * y_dist)
dim = add_arc_dim(
msp,
center=center,
angle=90.0,
delta=angle / 2.0,
radius=3.0,
distance=1.0,
text_rotation=None,
override={"dimblk": arrow_name},
)
dim.render(discard=BRICSCAD)
doc.set_modelspace_vport(height=40, center=(50, 5))
doc.saveas(OUTDIR / f"dim_arc_all_arrows_{DXFVERSION}.dxf")
if __name__ == "__main__":
arc_cra_default_outside()
arc_cra_default_outside_fixed_extension_length()
arc_cra_default_inside()
arc_cra_default_inside_fixed_extension_length()
arc_3p_default()
dim_arc_3d()
arc_units_tol_limits()
measure_fixed_angle(3.0)
measure_fixed_angle(6.0)
measure_fixed_angle(9.0)
usr_location_absolute(6)
usr_location_absolute(6, rotation=15)
usr_location_relative(30)
usr_location_relative(30, rotation=345)
show_all_arrow_heads()
```
#### File: examples/render/dimension_ordinate.py
```python
import pathlib
import ezdxf
from ezdxf.math import Vec3, UCS
import logging
# ========================================
# Setup logging
# ========================================
logging.basicConfig(level="WARNING")
# ========================================
# Setup your preferred output directory
# ========================================
OUTDIR = pathlib.Path("~/Desktop/Outbox").expanduser()
if not OUTDIR.exists():
OUTDIR = pathlib.Path()
# =======================================================
# Discarding dimension rendering is possible
# for BricsCAD, but is incompatible to AutoCAD -> error
# =======================================================
BRICSCAD = False
DXFVERSION = "R2013"
def add_x_and_y_type(
msp, feature_location: Vec3, offset: Vec3, rotate: float, override
):
# Default DimStyle EZDXF:
# - linear units = 1 drawing unit = 1 m
# - scale 1:100
# - closed filled arrow, size = 0.25
# - text location above dimension line
#
# feature_location:
# measured location - measurement is the x- or y-distance from the
# origin
# offset:
# offset from the feature location to the end of the leader as vector
# origin:
# defines the origin in the render UCS (=WCS by default),
# the default origin is (0, 0)
dim = msp.add_ordinate_x_dim(
feature_location=feature_location,
offset=offset,
rotation=rotate,
override=override,
)
# Necessary second step, to create the BLOCK entity with the DIMENSION
# geometry. Ezdxf supports DXF R2000 attributes for DXF R12 rendering,
# but they have to be applied by the DIMSTYLE override feature, this
# additional attributes are not stored in the XDATA section of the
# DIMENSION entity, they are just used to render the DIMENSION entity.
# The return value `dim` is not a DIMENSION entity, instead a
# DimStyleOverride object is returned, the DIMENSION entity is stored
# as dim.dimension, see also ezdxf.override.DimStyleOverride class.
dim.render(discard=BRICSCAD)
# swap x, y axis of the offset for the y-type
offset = Vec3(offset.y, offset.x)
msp.add_ordinate_y_dim(
feature_location=feature_location,
offset=offset,
rotation=rotate,
override=override,
).render()
def ordinate_wcs(
filename: str,
rotate: float = 0.0,
override: dict = None,
):
doc = ezdxf.new(DXFVERSION, setup=True)
msp = doc.modelspace()
if override is None:
override = dict()
for dimtad, feature_location in [(1, (5, 20)), (0, (0, 0)), (4, (-5, -20))]:
override["dimtad"] = dimtad
add_x_and_y_type(
msp, Vec3(feature_location), Vec3(1, 3), rotate, override
)
add_x_and_y_type(
msp, Vec3(feature_location), Vec3(-1, 3), rotate, override
)
add_x_and_y_type(
msp, Vec3(feature_location), Vec3(1, -3), rotate, override
)
add_x_and_y_type(
msp, Vec3(feature_location), Vec3(-1, -3), rotate, override
)
doc.set_modelspace_vport(height=70)
doc.saveas(OUTDIR / f"{filename}_{DXFVERSION}.dxf")
def ordinate_ucs(
filename: str,
rotate: float = 30.0,
):
doc = ezdxf.new(DXFVERSION, setup=True)
dimstyle = doc.dimstyles.duplicate_entry("EZDXF", "ORD_CENTER")
dimstyle.dxf.dimtad = 0
msp = doc.modelspace()
for origin in [Vec3(5, 20), Vec3(0, 0), Vec3(-5, -20)]:
ucs = UCS(origin, ux=Vec3.from_deg_angle(rotate), uz=(0, 0, 1))
msp.add_ordinate_x_dim(
feature_location=(3, 2),
offset=(1, 2),
dimstyle="ORD_CENTER"
).render(ucs=ucs)
msp.add_ordinate_y_dim(
feature_location=(3, 2),
offset=(1, -2),
dimstyle="ORD_CENTER"
).render(ucs=ucs)
doc.set_modelspace_vport(height=70)
doc.saveas(OUTDIR / f"{filename}_{DXFVERSION}.dxf")
if __name__ == "__main__":
ordinate_wcs(filename="ordinate_wcs")
ordinate_wcs(filename="ordinate_rot_30_deg_wcs", rotate=30)
ordinate_ucs(filename="ordinate_ucs", rotate=30)
```
#### File: ezdxf/examples/spline_interpolation_of_sine_wave.py
```python
from typing import Iterable
from pathlib import Path
import math
import ezdxf
from ezdxf import zoom
from ezdxf.math import (
Vec3,
estimate_tangents,
linspace,
estimate_end_tangent_magnitude,
)
from ezdxf.math import (
local_cubic_bspline_interpolation,
global_bspline_interpolation,
)
DIR = Path("~/Desktop/Outbox").expanduser()
def sine_wave(count: int, scale: float = 1.0) -> Iterable[Vec3]:
for t in linspace(0, math.tau, count):
yield Vec3(t * scale, math.sin(t) * scale)
doc = ezdxf.new()
msp = doc.modelspace()
# Calculate 8 points on sine wave as interpolation data
data = list(sine_wave(count=8, scale=2.0))
# Reference curve as fine approximation
msp.add_lwpolyline(
sine_wave(count=800, scale=2.0),
dxfattribs={"color": 1, "layer": "Reference curve (LWPolyline)"},
)
# AutoCAD/BricsCAD interpolation
msp.add_spline(data, dxfattribs={"layer": "BricsCAD B-spline", "color": 2})
# tangent estimation method
METHOD = "5-p"
# create not normalized tangents (default is normalized)
tangents = estimate_tangents(data, METHOD, normalize=False)
# show tangents
for p, t in zip(data, tangents):
msp.add_line(
p,
p + t,
dxfattribs={"color": 5, "layer": f"Estimated tangents ({METHOD})"},
)
# local interpolation: a normalized tangent vector for each data point is required,
s = local_cubic_bspline_interpolation(
data, tangents=[t.normalize() for t in tangents]
)
# or set argument 'method' for automatic tangent estimation, default method is
# '5-points' interpolation
# s = local_cubic_bspline_interpolation(data, method=METHOD)
msp.add_spline(
dxfattribs={"color": 3, "layer": f"Local interpolation ({METHOD})"}
).apply_construction_tool(s)
# global interpolation: take first and last vector from 'tangents' as start-
# and end tangent
m1, m2 = estimate_end_tangent_magnitude(data, method="chord")
s = global_bspline_interpolation(
data, tangents=(tangents[0].normalize(m1), tangents[-1].normalize(m2))
)
msp.add_spline(
dxfattribs={"color": 4, "layer": f"Global interpolation ({METHOD})"}
).apply_construction_tool(s)
zoom.extents(msp, factor=1.1)
doc.saveas(DIR / f"sine-wave-{METHOD}.dxf")
```
#### File: examples/tools/diff_entity_tags.py
```python
from typing import Optional, Iterable, Tuple
import sys
from ezdxf.lldxf.tags import Tags
from ezdxf.lldxf.tagger import tag_compiler
from ezdxf.tools.rawloader import raw_structure_loader
from ezdxf.tools.difftags import diff_tags, print_diff
def main(filename1: str, filename2: str, handle: str):
doc1 = raw_structure_loader(filename1)
doc2 = raw_structure_loader(filename2)
try:
a, b = get_entities(doc1, doc2, handle)
except ValueError as e:
print(str(e))
sys.exit(1)
print_diff(a, b, diff_tags(a, b, ndigits=6))
def get_entities(doc1, doc2, handle: str) -> Tuple[Tags, Tags]:
a = entity_tags(doc1["ENTITIES"], handle)
b = entity_tags(doc2["ENTITIES"], handle)
if a is None or b is None:
raise ValueError(f"Entity #{handle} not present in both files")
return a, b
def entity_tags(entities: Iterable[Tags], handle: str) -> Optional[Tags]:
def get_handle(tags: Tags):
try:
return tags.get_handle()
except ValueError:
return "0"
for e in entities:
if get_handle(e) == handle:
return Tags(tag_compiler(iter(e)))
return None
FILE1 = r"C:\Users\manfred\Desktop\Outbox\bottom_line_R2013.dxf"
FILE2 = r"C:\Users\manfred\Desktop\Outbox\bottom_line_brics.dxf"
HANDLE = "8A"
if __name__ == "__main__":
main(FILE1, FILE2, HANDLE)
```
#### File: ezdxf/profiling/raw_data_reading.py
```python
import os
import time
from ezdxf import EZDXF_TEST_FILES
BIG_FILE = os.path.join(EZDXF_TEST_FILES, "CADKitSamples", "torso_uniform.dxf")
def load_ascii():
with open(BIG_FILE, "rt", encoding="cp1252") as fp:
while True:
line = fp.readline()
if not line:
break
def load_bytes():
with open(BIG_FILE, "rb") as fp:
while True:
line = fp.readline()
if not line:
break
def print_result(time, text):
print(f"Operation: {text} takes {time:.6f} s\n")
def run(func):
start = time.perf_counter()
func()
end = time.perf_counter()
return end - start
if __name__ == "__main__":
print_result(run(load_ascii), "ascii stream reader")
print_result(run(load_bytes), "byte stream reader")
```
#### File: ezdxf/profiling/setup_new_drawing.py
```python
from timeit import Timer
import ezdxf
SETUP = """
from __main__ import setup_drawing
"""
def setup_drawing():
doc = ezdxf.new()
_ = doc.dxfversion
def main(count):
t = Timer("setup_drawing()", SETUP)
time2 = t.timeit(count)
print_result(time2, f"setup {count} new style DXF")
def print_result(time, text):
print(f"Profiling: {text}; takes {time:.2f} seconds")
if __name__ == "__main__":
main(300)
```
#### File: ezdxf/profiling/stress.py
```python
import pytest
import sys
import argparse
import os
import glob
import time
import ezdxf
from ezdxf import recover
from ezdxf import EZDXF_TEST_FILES
from itertools import chain
DIRS = [
"AutodeskSamples/*.dxf",
"AutodeskProducts/*.dxf",
"CADKitSamples/*.dxf",
"*.dxf",
]
files = list(
chain(*[glob.glob(os.path.join(EZDXF_TEST_FILES, d)) for d in DIRS])
)
@pytest.mark.parametrize("filename", files)
def test_readfile(filename):
try:
recover.readfile(filename)
except ezdxf.DXFStructureError:
pytest.fail(f"{filename}: DXFStructureError in recover mode.")
else:
assert True
if __name__ == "__main__":
import logging
from ezdxf import bbox, print_config
from ezdxf.math import Vec3
import warnings
# Suppress Matplotlib font replacement warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser("stress")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="give more output",
)
parser.add_argument(
"-e",
"--extends",
action="store_true",
help="perform extends calculation",
)
parser.add_argument(
"-c",
"--cadkit",
action="store_true",
help="use only CADKit samples",
)
parser.add_argument(
"-l",
"--log",
action="store_true",
help="turn logging on",
)
args = parser.parse_args(sys.argv[1:])
print_config()
print("-" * 79)
if args.cadkit: # only CADKit samples
files = glob.glob(os.path.join(EZDXF_TEST_FILES, "CADKitSamples/*.dxf"))
if args.log:
logging.basicConfig(level=logging.WARNING)
for name in files:
print(f'Loading file: "{name}"')
try:
t_start = time.perf_counter()
doc = ezdxf.readfile(name)
t_read = time.perf_counter()
auditor = doc.audit()
t_audit = time.perf_counter()
except ezdxf.DXFStructureError:
if args.verbose:
print("Regular loading function failed, using recover mode.")
t_start = time.perf_counter()
doc, auditor = recover.readfile(name)
t_read = time.perf_counter()
t_audit = t_read
if auditor.has_errors and args.verbose:
print(f"Found {len(auditor.errors)} unrecoverable error(s).")
if auditor.has_fixes and args.verbose:
print(f"Fixed {len(auditor.fixes)} error(s).")
ex_run = 0
if args.extends:
ex_start = time.perf_counter()
extends = bbox.extents(doc.modelspace())
ex_run = time.perf_counter() - t_start
if args.verbose:
extmin = doc.header.get("$EXTMIN")
extmax = doc.header.get("$EXTMAX")
if extmin is not None:
e1 = Vec3(extmin).round(3)
e2 = Vec3(extmax).round(3)
print(f"Header var $EXTMIN/$EXTMAX: {e1}; {e2}")
if extends.has_data:
e1 = extends.extmin.round(3)
e2 = extends.extmax.round(3)
print(f"Calculated $EXTMIN/$EXTMAX: {e1}; {e2}")
if args.verbose:
print("Timing: ", end="")
t_run = t_read - t_start
print(f" loading: {t_run:.3f}s", end="")
if t_read != t_audit:
print(f" audit: {t_audit - t_read:.3f}s", end="")
if ex_run:
print(f" extends: {ex_run:.3f}s", end="")
print()
print("-" * 79)
```
#### File: ezdxf/profiling/tag_compiler.py
```python
import os
import time
from ezdxf.lldxf.tagger import ascii_tags_loader, tag_compiler
from ezdxf.recover import safe_tag_loader
from ezdxf import EZDXF_TEST_FILES
BIG_FILE = os.path.join(EZDXF_TEST_FILES, "CADKitSamples", "torso_uniform.dxf")
def load_ascii():
with open(BIG_FILE, "rt") as fp:
list(tag_compiler(iter(ascii_tags_loader(fp))))
def safe_load_bytes():
with open(BIG_FILE, "rb") as fp:
list(safe_tag_loader(fp))
def print_result(time, text):
print(f"Operation: {text} takes {time:.2f} s\n")
def run(func):
start = time.perf_counter()
func()
end = time.perf_counter()
return end - start
if __name__ == "__main__":
print_result(run(safe_load_bytes), "safe_tag_loader()")
print_result(run(load_ascii), "ascii_tag_compiler()")
```
#### File: jkjt/ezdxf/setup.py
```python
import os
import sys
from setuptools import setup, find_packages
from setuptools import Extension
# setuptools docs: https://setuptools.readthedocs.io/en/latest/setuptools.html
# All Cython accelerated modules are optional:
ext_modules = [
Extension(
"ezdxf.acc.vector",
[
"src/ezdxf/acc/vector.pyx",
],
optional=True,
language="c++",
),
Extension(
"ezdxf.acc.matrix44",
[
"src/ezdxf/acc/matrix44.pyx",
],
optional=True,
language="c++",
),
Extension(
"ezdxf.acc.bezier4p",
[
"src/ezdxf/acc/bezier4p.pyx",
"src/ezdxf/acc/_cpp_cubic_bezier.cpp",
],
optional=True,
language="c++",
),
Extension(
"ezdxf.acc.bezier3p",
[
"src/ezdxf/acc/bezier3p.pyx",
"src/ezdxf/acc/_cpp_quad_bezier.cpp",
],
optional=True,
language="c++",
),
Extension(
"ezdxf.acc.bspline",
[
"src/ezdxf/acc/bspline.pyx",
],
optional=True,
language="c++",
),
Extension(
"ezdxf.acc.construct",
[
"src/ezdxf/acc/construct.pyx",
],
optional=True,
language="c++",
),
]
try:
from Cython.Distutils import build_ext
commands = {"build_ext": build_ext}
except ImportError:
ext_modules = []
commands = {}
PYPY = hasattr(sys, "pypy_version_info")
if PYPY:
print(
"C-extensions are disabled for pypy, because JIT complied Python code "
"is much faster!"
)
ext_modules = []
commands = {}
def get_version():
v = {}
for line in open("./src/ezdxf/version.py").readlines():
if line.strip().startswith("__version__"):
exec(line, v)
return v["__version__"]
raise IOError("__version__ string not found")
def read(fname, until=""):
def read_until(lines):
last_index = -1
for index, line in enumerate(lines):
if line.startswith(until):
last_index = index
break
return "".join(lines[:last_index])
try:
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return read_until(f.readlines()) if until else f.read()
except IOError:
return "File '%s' not found.\n" % fname
DRAW = ["matplotlib", "PySide6"]
DRAW5 = ["matplotlib", "PyQt5"]
TEST = ["pytest", "geomdl"]
DEV = ["setuptools", "wheel", "Cython"]
setup(
name="ezdxf",
version=get_version(),
description="A Python package to create/manipulate DXF drawings.",
author="<NAME>",
url="https://ezdxf.mozman.at",
download_url="https://pypi.org/project/ezdxf/",
author_email="<EMAIL>",
python_requires=">=3.7",
package_dir={"": "src"},
packages=find_packages("src"),
zip_safe=False,
package_data={
"ezdxf": [
"pp/*.html",
"pp/*.js",
"pp/*.css",
"tools/font_face_cache.json",
"tools/font_measurement_cache.json",
"resources/*.png",
"py.typed",
]
},
entry_points={
"console_scripts": [
"ezdxf = ezdxf.__main__:main", # ezdxf launcher
]
},
provides=["ezdxf"],
cmdclass=commands,
ext_modules=ext_modules,
install_requires=["pyparsing>=2.0.1", "typing_extensions"],
setup_requires=["wheel"],
tests_require=["pytest", "geomdl"],
extras_require={
"draw": DRAW,
"draw5": DRAW5,
"test": TEST,
"dev": DEV + TEST,
"all": DRAW + DEV + TEST,
"all5": DRAW5 + DEV + TEST,
},
keywords=["DXF", "CAD"],
long_description=read("README.md")
+ read("NEWS.md", until="Version 0.10.0"),
long_description_content_type="text/markdown",
platforms="OS Independent",
license="MIT License",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
# Development Status :: 3 - Alpha
# Development Status :: 4 - Beta
# Development Status :: 5 - Production/Stable
# Development Status :: 6 - Mature
# Development Status :: 7 - Inactive
```
#### File: addons/browser/data.py
```python
from typing import Optional, Dict, List, Tuple, Iterable, Any
from pathlib import Path
from ezdxf.lldxf.loader import SectionDict
from ezdxf.addons.browser.loader import load_section_dict
from ezdxf.lldxf.types import DXFVertex, tag_type
from ezdxf.lldxf.tags import Tags
__all__ = [
"DXFDocument",
"IndexEntry",
"get_row_from_line_number",
"dxfstr",
"EntityHistory",
"SearchIndex",
]
class DXFDocument:
def __init__(self, sections: SectionDict = None):
# Important: the section dict has to store the raw string tags
# else an association of line numbers to entities is not possible.
# Comment tags (999) are ignored, because the load_section_dict()
# function can not handle and store comments.
# Therefore comments causes incorrect results for the line number
# associations and should be stripped off before processing for precise
# debugging of DXF files (-b for backup):
# ezdxf strip -b <your.dxf>
self.sections: SectionDict = dict()
self.entity_index: Optional[EntityIndex] = None
self.valid_handles = None
self.filename = ""
if sections:
self.update(sections)
@property
def filepath(self):
return Path(self.filename)
@property
def max_line_number(self) -> int:
if self.entity_index:
return self.entity_index.max_line_number
else:
return 1
def load(self, filename: str):
self.filename = filename
self.update(load_section_dict(filename))
def update(self, sections: SectionDict):
self.sections = sections
self.entity_index = EntityIndex(self.sections)
def absolute_filepath(self):
return self.filepath.absolute()
def get_section(self, name: str) -> List[Tags]:
return self.sections.get(name) # type: ignore
def get_entity(self, handle: str) -> Optional[Tags]:
if self.entity_index:
return self.entity_index.get(handle)
return None
def get_line_number(self, entity: Tags, offset: int = 0) -> int:
if self.entity_index:
return (
self.entity_index.get_start_line_for_entity(entity) + offset * 2
)
return 0
def get_entity_at_line(self, number: int) -> Optional[Tags]:
if self.entity_index:
return self.entity_index.get_entity_at_line(number)
return None
def next_entity(self, entity: Tags) -> Optional[Tags]:
return self.entity_index.next_entity(entity) # type: ignore
def previous_entity(self, entity: Tags) -> Optional[Tags]:
return self.entity_index.previous_entity(entity) # type: ignore
def get_handle(self, entity) -> Optional[str]:
return self.entity_index.get_handle(entity) # type: ignore
class IndexEntry:
def __init__(self, tags: Tags, line: int = 0):
self.tags: Tags = tags
self.start_line_number: int = line
self.prev: Optional["IndexEntry"] = None
self.next: Optional["IndexEntry"] = None
class EntityIndex:
def __init__(self, sections: SectionDict):
# dict() entries have to be ordered since Python 3.6!
# Therefore _index.values() returns the DXF entities in file order!
self._index: Dict[str, IndexEntry] = dict()
# Index dummy handle of entities without handles by the id of the
# first tag for faster retrieval of the dummy handle from tags:
# dict items: (id, handle)
self._dummy_handle_index: Dict[int, str] = dict()
self._max_line_number: int = 0
self._build(sections)
def _build(self, sections: SectionDict) -> None:
start_line_number = 1
dummy_handle = 1
entity_index: Dict[str, IndexEntry] = dict()
dummy_handle_index: Dict[int, str] = dict()
prev_entry: Optional[IndexEntry] = None
for section in sections.values():
for tags in section:
assert isinstance(tags, Tags), "expected class Tags"
assert len(tags) > 0, "empty tags should not be possible"
try:
handle = tags.get_handle().upper()
except ValueError:
handle = f"*{dummy_handle:X}"
# index dummy handle by id of the first tag:
dummy_handle_index[id(tags[0])] = handle
dummy_handle += 1
next_entry = IndexEntry(tags, start_line_number)
if prev_entry is not None:
next_entry.prev = prev_entry
prev_entry.next = next_entry
entity_index[handle] = next_entry
prev_entry = next_entry
# calculate next start line number:
# add 2 lines for each tag: group code, value
start_line_number += len(tags) * 2
start_line_number += 2 # for removed ENDSEC tag
# subtract 1 and 2 for the last ENDSEC tag!
self._max_line_number = start_line_number - 3
self._index = entity_index
self._dummy_handle_index = dummy_handle_index
def __contains__(self, handle: str) -> bool:
return handle.upper() in self._index
@property
def max_line_number(self) -> int:
return self._max_line_number
def get(self, handle: str) -> Optional[Tags]:
index_entry = self._index.get(handle.upper())
if index_entry is not None:
return index_entry.tags
else:
return None
def get_handle(self, entity: Tags) -> Optional[str]:
if not len(entity):
return None
try:
return entity.get_handle()
except ValueError:
# fast retrieval of dummy handle which isn't stored in tags:
return self._dummy_handle_index.get(id(entity[0]))
def next_entity(self, entity: Tags) -> Tags:
handle = self.get_handle(entity)
if handle:
index_entry = self._index.get(handle)
next_entry = index_entry.next # type: ignore
# next of last entity is None!
if next_entry:
return next_entry.tags
return entity
def previous_entity(self, entity: Tags) -> Tags:
handle = self.get_handle(entity)
if handle:
index_entry = self._index.get(handle)
prev_entry = index_entry.prev # type: ignore
# prev of first entity is None!
if prev_entry:
return prev_entry.tags
return entity
def get_start_line_for_entity(self, entity: Tags) -> int:
handle = self.get_handle(entity)
if handle:
index_entry = self._index.get(handle)
if index_entry:
return index_entry.start_line_number
return 0
def get_entity_at_line(self, number: int) -> Optional[Tags]:
tags = None
for index_entry in self._index.values():
if index_entry.start_line_number > number:
return tags # tags of previous entry!
tags = index_entry.tags
return tags
def get_row_from_line_number(
entity: Tags, start_line_number: int, select_line_number: int
) -> int:
count = select_line_number - start_line_number
lines = 0
row = 0
for tag in entity:
if lines >= count:
return row
if isinstance(tag, DXFVertex):
lines += len(tag.value) * 2
else:
lines += 2
row += 1
return row
def dxfstr(tags: Tags) -> str:
return "".join(tag.dxfstr() for tag in tags)
class EntityHistory:
def __init__(self):
self._history: List[Tags] = list()
self._index: int = 0
self._time_travel: List[Tags] = list()
def __len__(self):
return len(self._history)
@property
def index(self):
return self._index
def clear(self):
self._history.clear()
self._time_travel.clear()
self._index = 0
def append(self, entity: Tags):
if self._time_travel:
self._history.extend(self._time_travel)
self._time_travel.clear()
count = len(self._history)
if count:
# only append if different to last entity
if self._history[-1] is entity:
return
self._index = count
self._history.append(entity)
def back(self) -> Optional[Tags]:
entity = None
if self._history:
index = self._index - 1
if index >= 0:
entity = self._time_wrap(index)
else:
entity = self._history[0]
return entity
def forward(self) -> Tags:
entity = None
history = self._history
if history:
index = self._index + 1
if index < len(history):
entity = self._time_wrap(index)
else:
entity = history[-1]
return entity # type: ignore
def _time_wrap(self, index) -> Tags:
self._index = index
entity = self._history[index]
self._time_travel.append(entity)
return entity
def content(self) -> List[Tags]:
return list(self._history)
class SearchIndex:
NOT_FOUND = None, -1
def __init__(self, entities: Iterable[Tags]):
self.entities: List[Tags] = list(entities)
self._current_entity_index: int = 0
self._current_tag_index: int = 0
self._search_term: Optional[str] = None
self._search_term_lower: Optional[str] = None
self._backward = False
self._end_of_index = not bool(self.entities)
self.case_insensitive = True
self.whole_words = False
self.numbers = False
self.regex = False # False = normal mode
@property
def is_end_of_index(self) -> bool:
return self._end_of_index
@property
def search_term(self) -> Optional[str]:
return self._search_term
def set_current_entity(self, entity: Tags, tag_index: int = 0):
self._current_tag_index = tag_index
try:
self._current_entity_index = self.entities.index(entity)
except ValueError:
self.reset_cursor()
def update_entities(self, entities: List[Tags]):
current_entity, index = self.current_entity()
self.entities = entities
if current_entity:
self.set_current_entity(current_entity, index)
def current_entity(self) -> Tuple[Optional[Tags], int]:
if self.entities and not self._end_of_index:
return (
self.entities[self._current_entity_index],
self._current_tag_index,
)
return self.NOT_FOUND
def reset_cursor(self, backward: bool = False):
self._current_entity_index = 0
self._current_tag_index = 0
count = len(self.entities)
if count:
self._end_of_index = False
if backward:
self._current_entity_index = count - 1
entity = self.entities[-1]
self._current_tag_index = len(entity) - 1
else:
self._end_of_index = True
def cursor(self) -> Tuple[int, int]:
return self._current_entity_index, self._current_tag_index
def move_cursor_forward(self) -> None:
if self.entities:
entity: Tags = self.entities[self._current_entity_index]
tag_index = self._current_tag_index + 1
if tag_index >= len(entity):
entity_index = self._current_entity_index + 1
if entity_index < len(self.entities):
self._current_entity_index = entity_index
self._current_tag_index = 0
else:
self._end_of_index = True
else:
self._current_tag_index = tag_index
def move_cursor_backward(self) -> None:
if self.entities:
tag_index = self._current_tag_index - 1
if tag_index < 0:
entity_index = self._current_entity_index - 1
if entity_index >= 0:
self._current_entity_index = entity_index
self._current_tag_index = (
len(self.entities[entity_index]) - 1
)
else:
self._end_of_index = True
else:
self._current_tag_index = tag_index
def reset_search_term(self, term: str) -> None:
self._search_term = str(term)
self._search_term_lower = self._search_term.lower()
def find(
self, term: str, backward: bool = False, reset_index: bool = True
) -> Tuple[Optional[Tags], int]:
self.reset_search_term(term)
if reset_index:
self.reset_cursor(backward)
if len(self.entities) and not self._end_of_index:
if backward:
return self.find_backwards()
else:
return self.find_forward()
else:
return self.NOT_FOUND
def find_forward(self) -> Tuple[Optional[Tags], int]:
return self._find(self.move_cursor_forward)
def find_backwards(self) -> Tuple[Optional[Tags], int]:
return self._find(self.move_cursor_backward)
def _find(self, move_cursor) -> Tuple[Optional[Tags], int]:
if self.entities and self._search_term and not self._end_of_index:
while not self._end_of_index:
entity, tag_index = self.current_entity()
move_cursor()
if self._match(*entity[tag_index]): # type: ignore
return entity, tag_index
return self.NOT_FOUND
def _match(self, code: int, value: Any) -> bool:
if tag_type(code) is not str:
if not self.numbers:
return False
value = str(value)
if self.case_insensitive:
search_term = self._search_term_lower
value = value.lower()
else:
search_term = self._search_term
if self.whole_words:
return any(search_term == word for word in value.split())
else:
return search_term in value
```
#### File: addons/browser/model.py
```python
from typing import Any, List, Dict, Optional
import textwrap
from ezdxf.lldxf.types import (
render_tag,
DXFVertex,
GROUP_MARKERS,
POINTER_CODES,
)
from ezdxf.addons.xqt import QModelIndex, QAbstractTableModel, Qt
from ezdxf.addons.xqt import QStandardItemModel, QStandardItem, QColor
from .tags import compile_tags, Tags
__all__ = [
"DXFTagsModel",
"DXFStructureModel",
"EntityContainer",
"Entity",
"DXFTagsRole",
]
DXFTagsRole = Qt.UserRole + 1
def name_fmt(handle, name: str) -> str:
if handle is None:
return name
else:
return f"<{handle}> {name}"
HEADER_LABELS = ["Group Code", "Data Type", "Content", "4", "5"]
def calc_line_numbers(start: int, tags: Tags) -> List[int]:
numbers = [start]
index = start
for tag in tags:
if isinstance(tag, DXFVertex):
index += len(tag.value) * 2
else:
index += 2
numbers.append(index)
return numbers
class DXFTagsModel(QAbstractTableModel):
def __init__(
self, tags: Tags, start_line_number: int = 1, valid_handles=None
):
super().__init__()
self._tags = compile_tags(tags)
self._line_numbers = calc_line_numbers(start_line_number, self._tags)
self._valid_handles = valid_handles or set()
def data(self, index: QModelIndex, role: int = ...) -> Any: # type: ignore
def is_invalid_handle(tag):
if (
tag.code in POINTER_CODES
and not tag.value.upper() in self._valid_handles
):
return True
return False
if role == Qt.DisplayRole:
tag = self._tags[index.row()]
return render_tag(tag, index.column())
elif role == Qt.ForegroundRole:
tag = self._tags[index.row()]
if tag.code in GROUP_MARKERS:
return QColor("blue")
elif is_invalid_handle(tag):
return QColor("red")
elif role == DXFTagsRole:
return self._tags[index.row()]
elif role == Qt.ToolTipRole:
code, value = self._tags[index.row()]
if index.column() == 0: # group code column
return GROUP_CODE_TOOLTIPS_DICT.get(code)
code, value = self._tags[index.row()]
if code in POINTER_CODES:
if value.upper() in self._valid_handles:
return f"Double click to go to the referenced entity"
else:
return f"Handle does not exist"
elif code == 0:
return f"Double click to go to the DXF reference provided by Autodesk"
def headerData(
self, section: int, orientation: Qt.Orientation, role: int = ... # type: ignore
) -> Any:
if orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
return HEADER_LABELS[section]
elif role == Qt.TextAlignmentRole:
return Qt.AlignLeft
elif orientation == Qt.Vertical:
if role == Qt.DisplayRole:
return self._line_numbers[section]
elif role == Qt.ToolTipRole:
return "Line number in DXF file"
def rowCount(self, parent: QModelIndex = ...) -> int: # type: ignore
return len(self._tags)
def columnCount(self, parent: QModelIndex = ...) -> int: # type: ignore
return 3
def compiled_tags(self) -> Tags:
"""Returns the compiled tags. Only points codes are compiled, group
code 10, ...
"""
return self._tags
def line_number(self, row: int) -> int:
"""Return the DXF file line number of the widget-row."""
try:
return self._line_numbers[row]
except IndexError:
return 0
class EntityContainer(QStandardItem):
def __init__(self, name: str, entities: List[Tags]):
super().__init__()
self.setEditable(False)
self.setText(name + f" ({len(entities)})")
self.setup_content(entities)
def setup_content(self, entities):
self.appendRows([Entity(e) for e in entities])
class Classes(EntityContainer):
def setup_content(self, entities):
self.appendRows([Class(e) for e in entities])
class AcDsData(EntityContainer):
def setup_content(self, entities):
self.appendRows([AcDsEntry(e) for e in entities])
class NamedEntityContainer(EntityContainer):
def setup_content(self, entities):
self.appendRows([NamedEntity(e) for e in entities])
class Tables(EntityContainer):
def setup_content(self, entities):
container = []
name = ""
for e in entities:
container.append(e)
dxftype = e.dxftype()
if dxftype == "TABLE":
try:
handle = e.get_handle()
except ValueError:
handle = None
name = e.get_first_value(2, default="UNDEFINED")
name = name_fmt(handle, name)
elif dxftype == "ENDTAB":
if container:
container.pop() # remove ENDTAB
self.appendRow(NamedEntityContainer(name, container))
container.clear()
class Blocks(EntityContainer):
def setup_content(self, entities):
container = []
name = "UNDEFINED"
for e in entities:
container.append(e)
dxftype = e.dxftype()
if dxftype == "BLOCK":
try:
handle = e.get_handle()
except ValueError:
handle = None
name = e.get_first_value(2, default="UNDEFINED")
name = name_fmt(handle, name)
elif dxftype == "ENDBLK":
if container:
self.appendRow(EntityContainer(name, container))
container.clear()
def get_section_name(section: List[Tags]) -> str:
if len(section) > 0:
header = section[0]
if len(header) > 1 and header[0].code == 0 and header[1].code == 2:
return header[1].value
return "INVALID SECTION HEADER!"
class Entity(QStandardItem):
def __init__(self, tags: Tags):
super().__init__()
self.setEditable(False)
self._tags = tags
self._handle: Optional[str]
try:
self._handle = tags.get_handle()
except ValueError:
self._handle = None
self.setText(self.entity_name())
def entity_name(self):
name = "INVALID ENTITY!"
tags = self._tags
if tags and tags[0].code == 0:
name = name_fmt(self._handle, tags[0].value)
return name
def data(self, role: int = ...) -> Any: # type: ignore
if role == DXFTagsRole:
return self._tags
else:
return super().data(role)
class Header(Entity):
def entity_name(self):
return "HEADER"
class ThumbnailImage(Entity):
def entity_name(self):
return "THUMBNAILIMAGE"
class NamedEntity(Entity):
def entity_name(self):
name = self._tags.get_first_value(2, "<noname>")
return name_fmt(str(self._handle), name)
class Class(Entity):
def entity_name(self):
tags = self._tags
name = "INVALID CLASS!"
if len(tags) > 1 and tags[0].code == 0 and tags[1].code == 1:
name = tags[1].value
return name
class AcDsEntry(Entity):
def entity_name(self):
return self._tags[0].value
class DXFStructureModel(QStandardItemModel):
def __init__(self, filename: str, doc):
super().__init__()
root = QStandardItem(filename)
root.setEditable(False)
self.appendRow(root)
row: Any
for section in doc.sections.values():
name = get_section_name(section)
if name == "HEADER":
row = Header(section[0])
elif name == "THUMBNAILIMAGE":
row = ThumbnailImage(section[0])
elif name == "CLASSES":
row = Classes(name, section[1:])
elif name == "TABLES":
row = Tables(name, section[1:])
elif name == "BLOCKS":
row = Blocks(name, section[1:])
elif name == "ACDSDATA":
row = AcDsData(name, section[1:])
else:
row = EntityContainer(name, section[1:])
root.appendRow(row)
def index_of_entity(self, entity: Tags) -> QModelIndex:
root = self.item(0, 0)
index = find_index(root, entity)
if index is None:
return root.index()
else:
return index
def find_index(item: QStandardItem, entity: Tags) -> Optional[QModelIndex]:
def _find(sub_item: QStandardItem):
for index in range(sub_item.rowCount()):
child = sub_item.child(index, 0)
tags = child.data(DXFTagsRole)
if tags and tags is entity:
return child.index()
if child.rowCount() > 0:
index2 = _find(child)
if index2 is not None:
return index2
return None
return _find(item)
GROUP_CODE_TOOLTIPS = [
(0, "Text string indicating the entity type (fixed)"),
(1, "Primary text value for an entity"),
(2, "Name (attribute tag, block name, and so on)"),
((3, 4), "Other text or name values"),
(5, "Entity handle; text string of up to 16 hexadecimal digits (fixed)"),
(6, "Linetype name (fixed)"),
(7, "Text style name (fixed)"),
(8, "Layer name (fixed)"),
(
9,
"DXF: variable name identifier (used only in HEADER section of the DXF file)",
),
(
10,
"Primary point; this is the start point of a line or text entity, center "
"of a circle, and so on DXF: X value of the primary point (followed by Y "
"and Z value codes 20 and 30) APP: 3D point (list of three reals)",
),
(
(11, 18),
"Other points DXF: X value of other points (followed by Y value codes "
"21-28 and Z value codes 31-38) APP: 3D point (list of three reals)",
),
(20, "DXF: Y value of the primary point"),
(30, "DXF: Z value of the primary point"),
((21, 28), "DXF: Y values of other points"),
((31, 37), "DXF: Z values of other points"),
(38, "DXF: entity's elevation if nonzero"),
(39, "Entity's thickness if nonzero (fixed)"),
(
(40, 47),
"Double-precision floating-point values (text height, scale factors, and so on)",
),
(48, "Linetype scale; default value is defined for all entity types"),
(
49,
"Multiple 49 groups may appear in one entity for variable-length tables "
"(such as the dash lengths in the LTYPE table). A 7x group always appears "
"before the first 49 group to specify the table length",
),
(
(50, 58),
"Angles (output in degrees to DXF files and radians through AutoLISP and ObjectARX applications)",
),
(
60,
"Entity visibility; absence or 0 indicates visibility; 1 indicates invisibility",
),
(62, "Color number (fixed)"),
(66, "Entities follow flag (fixed)"),
(67, "0 for model space or 1 for paper space (fixed)"),
(
68,
"APP: identifies whether viewport is on but fully off screen; is not active or is off",
),
(69, "APP: viewport identification number"),
((70, 79), "Integer values, such as repeat counts, flag bits, or modes"),
((90, 99), "32-bit integer values"),
(
100,
"Subclass data marker (with derived class name as a string). "
"Required for all objects and entity classes that are derived from "
"another concrete class. The subclass data marker segregates data defined by different "
"classes in the inheritance chain for the same object. This is in addition "
"to the requirement for DXF names for each distinct concrete class derived "
"from ObjectARX (see Subclass Markers)",
),
(101, "Embedded object marker"),
(
102,
"Control string, followed by '{arbitrary name' or '}'. Similar to the "
"xdata 1002 group code, except that when the string begins with '{', it "
"can be followed by an arbitrary string whose interpretation is up to the "
"application. The only other control string allowed is '}' as a group "
"terminator. AutoCAD does not interpret these strings except during d"
"rawing audit operations. They are for application use.",
),
(105, "Object handle for DIMVAR symbol table entry"),
(
110,
"UCS origin (appears only if code 72 is set to 1); DXF: X value; APP: 3D point",
),
(
111,
"UCS Y-axis (appears only if code 72 is set to 1); DXF: Y value; APP: 3D vector",
),
(
112,
"UCS Z-axis (appears only if code 72 is set to 1); DXF: Z value; APP: 3D vector",
),
((120, 122), "DXF: Y value of UCS origin, UCS X-axis, and UCS Y-axis"),
((130, 132), "DXF: Z value of UCS origin, UCS X-axis, and UCS Y-axis"),
(
(140, 149),
"Double-precision floating-point values (points, elevation, and DIMSTYLE settings, for example)",
),
(
(170, 179),
"16-bit integer values, such as flag bits representing DIMSTYLE settings",
),
(
210,
"Extrusion direction (fixed) "
+ "DXF: X value of extrusion direction "
+ "APP: 3D extrusion direction vector",
),
(220, "DXF: Y value of the extrusion direction"),
(230, "DXF: Z value of the extrusion direction"),
((270, 279), "16-bit integer values"),
((280, 289), "16-bit integer value"),
((290, 299), "Boolean flag value; 0 = False; 1 = True"),
((300, 309), "Arbitrary text strings"),
(
(310, 319),
"Arbitrary binary chunks with same representation and limits as 1004 "
"group codes: hexadecimal strings of up to 254 characters represent data "
"chunks of up to 127 bytes",
),
(
(320, 329),
"Arbitrary object handles; handle values that are taken 'as is'. They "
"are not translated during INSERT and XREF operations",
),
(
(330, 339),
"Soft-pointer handle; arbitrary soft pointers to other objects within "
"same DXF file or drawing. Translated during INSERT and XREF operations",
),
(
(340, 349),
"Hard-pointer handle; arbitrary hard pointers to other objects within "
"same DXF file or drawing. Translated during INSERT and XREF operations",
),
(
(350, 359),
"Soft-owner handle; arbitrary soft ownership links to other objects "
"within same DXF file or drawing. Translated during INSERT and XREF "
"operations",
),
(
(360, 369),
"Hard-owner handle; arbitrary hard ownership links to other objects within "
"same DXF file or drawing. Translated during INSERT and XREF operations",
),
(
(370, 379),
"Lineweight enum value (AcDb::LineWeight). Stored and moved around as a 16-bit integer. "
"Custom non-entity objects may use the full range, but entity classes only use 371-379 DXF "
"group codes in their representation, because AutoCAD and AutoLISP both always assume a 370 "
"group code is the entity's lineweight. This allows 370 to behave like other 'common' entity fields",
),
(
(380, 389),
"PlotStyleName type enum (AcDb::PlotStyleNameType). Stored and moved around as a 16-bit integer. "
"Custom non-entity objects may use the full range, but entity classes only use 381-389 "
"DXF group codes in their representation, for the same reason as the lineweight range",
),
(
(390, 399),
"String representing handle value of the PlotStyleName object, basically a hard pointer, but has "
"a different range to make backward compatibility easier to deal with. Stored and moved around "
"as an object ID (a handle in DXF files) and a special type in AutoLISP. Custom non-entity objects "
"may use the full range, but entity classes only use 391-399 DXF group codes in their representation, "
"for the same reason as the lineweight range",
),
((400, 409), "16-bit integers"),
((410, 419), "String"),
(
(420, 427),
"32-bit integer value. When used with True Color; a 32-bit integer representing a 24-bit color value. "
"The high-order byte (8 bits) is 0, the low-order byte an unsigned char holding the Blue value (0-255), "
"then the Green value, and the next-to-high order byte is the Red Value. Converting this integer value to "
"hexadecimal yields the following bit mask: 0x00RRGGBB. "
"For example, a true color with Red==200, Green==100 and Blue==50 is 0x00C86432, and in DXF, in decimal, 13132850",
),
(
(430, 437),
"String; when used for True Color, a string representing the name of the color",
),
(
(440, 447),
"32-bit integer value. When used for True Color, the transparency value",
),
((450, 459), "Long"),
((460, 469), "Double-precision floating-point value"),
((470, 479), "String"),
(
(480, 481),
"Hard-pointer handle; arbitrary hard pointers to other objects within same DXF file or drawing. "
"Translated during INSERT and XREF operations",
),
(
999,
"DXF: The 999 group code indicates that the line following it is a comment string. SAVEAS does "
"not include such groups in a DXF output file, but OPEN honors them and ignores the comments. "
"You can use the 999 group to include comments in a DXF file that you have edited",
),
(1000, "ASCII string (up to 255 bytes long) in extended data"),
(
1001,
"Registered application name (ASCII string up to 31 bytes long) for extended data",
),
(1002, "Extended data control string ('{' or '}')"),
(1003, "Extended data layer name"),
(1004, "Chunk of bytes (up to 127 bytes long) in extended data"),
(
1005,
"Entity handle in extended data; text string of up to 16 hexadecimal digits",
),
(
1010,
"A point in extended data; DXF: X value (followed by 1020 and 1030 groups); APP: 3D point",
),
(1020, "DXF: Y values of a point"),
(1030, "DXF: Z values of a point"),
(
1011,
"A 3D world space position in extended data "
"DXF: X value (followed by 1021 and 1031 groups) "
"APP: 3D point",
),
(1021, "DXF: Y value of a world space position"),
(1031, "DXF: Z value of a world space position"),
(
1012,
"A 3D world space displacement in extended data "
"DXF: X value (followed by 1022 and 1032 groups) "
"APP: 3D vector",
),
(1022, "DXF: Y value of a world space displacement"),
(1032, "DXF: Z value of a world space displacement"),
(
1013,
"A 3D world space direction in extended data "
"DXF: X value (followed by 1022 and 1032 groups) "
"APP: 3D vector",
),
(1023, "DXF: Y value of a world space direction"),
(1033, "DXF: Z value of a world space direction"),
(1040, "Extended data double-precision floating-point value"),
(1041, "Extended data distance value"),
(1042, "Extended data scale factor"),
(1070, "Extended data 16-bit signed integer"),
(1071, "Extended data 32-bit signed long"),
]
def build_group_code_tooltip_dict() -> Dict[int, str]:
tooltips = dict()
for code, tooltip in GROUP_CODE_TOOLTIPS:
tooltip = "\n".join(textwrap.wrap(tooltip, width=80))
if isinstance(code, int):
tooltips[code] = tooltip
elif isinstance(code, tuple):
s, e = code
for group_code in range(s, e + 1):
tooltips[group_code] = tooltip
else:
raise ValueError(type(code))
return tooltips
GROUP_CODE_TOOLTIPS_DICT = build_group_code_tooltip_dict()
```
#### File: addons/drawing/properties.py
```python
import re
from typing import (
TYPE_CHECKING,
Dict,
Optional,
Tuple,
Union,
List,
Set,
cast,
Sequence,
)
from ezdxf.addons import acadctb
from ezdxf.addons.drawing.config import Configuration
from ezdxf.addons.drawing.type_hints import Color, RGB
from ezdxf.colors import luminance, DXF_DEFAULT_COLORS, int2rgb
from ezdxf.entities import Attrib, Insert, Face3d, Linetype
from ezdxf.entities.ltype import CONTINUOUS_PATTERN
from ezdxf.entities.polygon import DXFPolygon
from ezdxf.enums import InsertUnits, Measurement
from ezdxf.lldxf import const
from ezdxf.lldxf.validator import make_table_key as layer_key
from ezdxf.tools import fonts
from ezdxf.tools.pattern import scale_pattern, HatchPatternType
if TYPE_CHECKING:
from ezdxf.eztypes import (
DXFGraphic,
Layout,
Table,
Layer,
Drawing,
Textstyle,
)
__all__ = [
"Properties",
"LayerProperties",
"LayoutProperties",
"RenderContext",
"layer_key",
"is_valid_color",
"rgb_to_hex",
"hex_to_rgb",
"MODEL_SPACE_BG_COLOR",
"PAPER_SPACE_BG_COLOR",
"VIEWPORT_COLOR",
"OLE2FRAME_COLOR",
"set_color_alpha",
"Filling",
]
table_key = layer_key
MODEL_SPACE_BG_COLOR = "#212830"
PAPER_SPACE_BG_COLOR = "#ffffff"
VIEWPORT_COLOR = "#aaaaaa" # arbitrary choice
OLE2FRAME_COLOR = "#89adba" # arbitrary choice
def is_dark_color(color: Color, dark: float = 0.2) -> bool:
luma = luminance(hex_to_rgb(color[:7]))
return luma <= dark
class Filling:
SOLID = 0
PATTERN = 1
GRADIENT = 2
def __init__(self):
# Solid fill color is stored in Properties.color attribute
self.type = Filling.SOLID
# Gradient- or pattern name
self.name: str = "SOLID"
# Gradient- or pattern angle
self.angle: float = 0.0 # in degrees
self.gradient_color1: Optional[Color] = None
self.gradient_color2: Optional[Color] = None
self.gradient_centered: float = 0.0 # todo: what's the meaning?
self.pattern_scale: float = 1.0
# Regular HATCH pattern definition:
self.pattern: HatchPatternType = []
class Properties:
"""An implementation agnostic representation of entity properties like
color and linetype.
"""
def __init__(self):
self.color: str = "#ffffff" # format #RRGGBB or #RRGGBBAA
# Color names should be resolved into a actual color value
# Store linetype name for backends which don't have the ability to use
# user-defined linetypes, but have some predefined linetypes, maybe
# matching most common AutoCAD linetypes is possible.
# Store linetype names in UPPERCASE.
self.linetype_name: str = "CONTINUOUS"
# Linetypes: Complex DXF linetypes are not supported:
# 1. Don't know if there are any backends which can use linetypes
# including text or shapes
# 2. No decoder for SHX files available, which are the source for
# shapes in linetypes
# 3. SHX files are copyrighted - including in ezdxf not possible
#
# Simplified DXF linetype definition:
# all line elements >= 0.0, 0.0 = point
# all gap elements > 0.0
# Usage as alternating line - gap sequence: line-gap-line-gap ....
# (line could be a point 0.0), line-line or gap-gap - makes no sense
# Examples:
# DXF: ("DASHED", "Dashed __ __ __ __ __ __ __ __ __ __ __ __ __ _",
# [0.6, 0.5, -0.1])
# first entry 0.6 is the total pattern length = sum(linetype_pattern)
# linetype_pattern: [0.5, 0.1] = line-gap
# DXF: ("DASHDOTX2", "Dash dot (2x) ____ . ____ . ____ . ____",
# [2.4, 2.0, -0.2, 0.0, -0.2])
# linetype_pattern: [2.0, 0.2, 0.0, 0.2] = line-gap-point-gap
# Stored as tuple, so pattern could be used as key for caching.
# SVG dash-pattern does not support points, so a minimal line length
# (maybe inferred from linewidth?) has to be used, which may alter the
# overall line appearance - but linetype mapping will never be perfect.
# The continuous pattern is an empty tuple ()
self.linetype_pattern: Sequence[float] = CONTINUOUS_PATTERN
self.linetype_scale: float = 1.0
# line weight in mm, todo: default lineweight is 0.25?
self.lineweight: float = 0.25
self.is_visible = True
# The 'layer' attribute stores the resolved layer of an entity:
# Entities inside of a block references get properties from the layer
# of the INSERT entity, if they reside on the layer '0'.
# To get the "real" layer of an entity, you have to use `entity.dxf.layer`
self.layer: str = "0"
# Font definition object for text entities:
# `None` is for the default font
self.font: Optional[fonts.FontFace] = None
# Filling properties: Solid, Pattern, Gradient
self.filling: Optional[Filling] = None
self.units = InsertUnits.Unitless
def __str__(self):
return (
f"({self.color}, {self.linetype_name}, {self.lineweight}, "
f'"{self.layer}")'
)
@property
def rgb(self) -> RGB:
"""Returns color as RGB tuple."""
return hex_to_rgb(self.color[:7]) # ignore alpha if present
@property
def luminance(self) -> float:
"""Returns perceived color luminance in range [0, 1] from dark to light."""
return luminance(self.rgb)
class LayerProperties(Properties):
"""Modified attribute meaning:
is_visible: Whether entities belonging to this layer should be drawn
layer: Stores real layer name (mixed case)
"""
def __init__(self):
super().__init__()
self.has_aci_color_7 = False
def get_entity_color_from_layer(self, fg: Color) -> Color:
"""Returns the layer color or if layer color is ACI color 7 the
given layout default foreground color `fg`.
"""
if self.has_aci_color_7:
return fg
else:
return self.color
DEFAULT_LAYER_PROPERTIES = LayerProperties()
class LayoutProperties:
# The LAYOUT, BLOCK and BLOCK_RECORD entities do not have
# explicit graphic properties.
def __init__(
self,
name: str,
background_color: Color,
foreground_color: Optional[Color] = None,
units=InsertUnits.Unitless,
dark_background: Optional[bool] = None,
):
"""
Args:
name: tab/display name
units: enum :class:`ezdxf.enums.InsertUnits`
"""
self.name = name
self.units: InsertUnits = units
self._background_color = ""
self._default_color = ""
self._has_dark_background = False
self.set_colors(background_color, foreground_color)
if dark_background is not None:
self._has_dark_background = dark_background
@property
def background_color(self) -> Color:
"""Returns the default layout background color."""
return self._background_color
@property
def default_color(self) -> Color:
"""Returns the default layout foreground color."""
return self._default_color
@property
def has_dark_background(self) -> bool:
"""Returns ``True`` if the actual background-color is "dark"."""
return self._has_dark_background
@staticmethod
def modelspace(units=InsertUnits.Unitless) -> "LayoutProperties":
return LayoutProperties("Model", MODEL_SPACE_BG_COLOR, units=units)
@staticmethod
def paperspace(
name: str = "", units=InsertUnits.Unitless
) -> "LayoutProperties":
return LayoutProperties(name, PAPER_SPACE_BG_COLOR, units=units)
@staticmethod
def from_layout(
layout: "Layout", units: Optional[int] = None
) -> "LayoutProperties":
"""Setup default layout properties."""
if layout.name == "Model":
bg = MODEL_SPACE_BG_COLOR
else:
bg = PAPER_SPACE_BG_COLOR
if units is None:
units = layout.units
return LayoutProperties(layout.name, bg, units=units)
def set_colors(self, bg: Color, fg: Color = None) -> None:
"""Setup default layout colors.
Required color format "#RRGGBB" or including alpha transparency
"#RRGGBBAA".
"""
if not is_valid_color(bg):
raise ValueError(f"Invalid background color: {bg}")
self._background_color = bg
if len(bg) == 9: # including transparency
bg = bg[:7]
self._has_dark_background = is_dark_color(bg)
if fg is not None:
if not is_valid_color(fg):
raise ValueError(f"Invalid foreground color: {fg}")
self._default_color = fg
else:
self._default_color = (
"#ffffff" if self._has_dark_background else "#000000"
)
class RenderContext:
def __init__(
self,
doc: Optional["Drawing"] = None,
*,
ctb: str = "",
export_mode: bool = False,
):
"""Represents the render context for the DXF document `doc`.
A given `ctb` file (plot style file) overrides the default properties.
Args:
doc: The document that is being drawn
ctb: A path to a plot style table to use
export_mode: Whether to render the document as it would look when
exported (plotted) by a CAD application to a file such as pdf,
or whether to render the document as it would appear inside a
CAD application.
"""
self._saved_states: List[Properties] = []
self.line_pattern: Dict[str, Sequence[float]] = (
_load_line_pattern(doc.linetypes) if doc else dict()
)
self.current_layout_properties = LayoutProperties.modelspace()
self.current_block_reference_properties: Optional[Properties] = None
self.plot_styles = self._load_plot_style_table(ctb)
self.export_mode = export_mode
# Always consider: entity layer may not exist
# Layer name as key is normalized, most likely name.lower(), but may
# change in the future.
self.layers: Dict[str, LayerProperties] = dict()
# Text-style -> font mapping
self.fonts: Dict[str, fonts.FontFace] = dict()
self.units = InsertUnits.Unitless
self.linetype_scale: float = 1.0 # overall modelspace linetype scaling
self.measurement = Measurement.Imperial
self.pdsize = 0
self.pdmode = 0
if doc:
self.linetype_scale = doc.header.get("$LTSCALE", 1.0)
try:
self.units = InsertUnits(doc.header.get("$INSUNITS", 0))
except ValueError:
self.units = InsertUnits.Unitless
try:
self.measurement = Measurement(
doc.header.get("$MEASUREMENT", 0)
)
except ValueError:
self.measurement = Measurement.Imperial
self.pdsize = doc.header.get("$PDSIZE", 1.0)
self.pdmode = doc.header.get("$PDMODE", 0)
self._setup_layers(doc)
self._setup_text_styles(doc)
if self.units == InsertUnits.Unitless:
if self.measurement == Measurement.Metric:
self.units = InsertUnits.Meters
else:
self.units = InsertUnits.Inches
self.current_layout_properties.units = self.units
self._hatch_pattern_cache: Dict[str, HatchPatternType] = dict()
def update_configuration(self, config: Configuration) -> Configuration:
"""Where the user has not specified a value, populate configuration
fields based on the dxf header values
"""
changes = {}
if config.pdsize is None:
changes["pdsize"] = self.pdsize
if config.pdmode is None:
changes["pdmode"] = self.pdmode
if config.measurement is None:
changes["measurement"] = self.measurement
return config.with_changes(**changes)
def _setup_layers(self, doc: "Drawing"):
for layer in doc.layers:
self.add_layer(cast("Layer", layer))
def _setup_text_styles(self, doc: "Drawing"):
for text_style in doc.styles:
self.add_text_style(cast("Textstyle", text_style))
def add_layer(self, layer: "Layer") -> None:
"""Setup layer properties."""
properties = LayerProperties()
name = layer_key(layer.dxf.name)
# Store real layer name (mixed case):
properties.layer = layer.dxf.name
properties.color = self._true_layer_color(layer)
# set layer transparency
alpha = transparency_to_alpha(layer.transparency)
if alpha < 255:
properties.color = set_color_alpha(properties.color, alpha)
# Depend layer ACI color from layout background color?
# True color overrides ACI color and layers with only true color set
# have default ACI color 7!
if not layer.has_dxf_attrib("true_color"):
properties.has_aci_color_7 = layer.dxf.color == 7
# Normalize linetype names to UPPERCASE:
properties.linetype_name = str(layer.dxf.linetype).upper()
properties.linetype_pattern = self.line_pattern.get(
properties.linetype_name, CONTINUOUS_PATTERN
)
properties.lineweight = self._true_layer_lineweight(
layer.dxf.lineweight
)
properties.is_visible = layer.is_on() and not layer.is_frozen()
if self.export_mode:
properties.is_visible &= bool(layer.dxf.plot)
self.layers[name] = properties
def add_text_style(self, text_style: "Textstyle"):
"""Setup text style properties."""
name = table_key(text_style.dxf.name)
font_file = text_style.dxf.font
font_face = None
if font_file == "": # Font family stored in XDATA?
family, italic, bold = text_style.get_extended_font_data()
if family:
font_face = fonts.find_font_face_by_family(family, italic, bold)
else:
font_face = fonts.get_font_face(font_file, map_shx=True)
if font_face is None: # fall back to default font
font_face = fonts.FontFace()
self.fonts[name] = font_face
def _true_layer_color(self, layer: "Layer") -> Color:
if layer.dxf.hasattr("true_color"):
return rgb_to_hex(layer.rgb) # type: ignore
else:
# Don't use layer.dxf.color: color < 0 is layer state off
aci = layer.color
# aci: 0=BYBLOCK, 256=BYLAYER, 257=BYOBJECT
if aci < 1 or aci > 255:
aci = 7 # default layer color
return self._aci_to_true_color(aci)
def _true_layer_lineweight(self, lineweight: int) -> float:
if lineweight < 0:
return self.default_lineweight()
else:
return float(lineweight) / 100.0
@staticmethod
def _load_plot_style_table(filename: str):
# Each layout can have a different plot style table stored in
# Layout.dxf.current_style_sheet.
# HEADER var $STYLESHEET stores the default ctb-file name.
try:
ctb = acadctb.load(filename)
except IOError:
ctb = acadctb.new_ctb()
# Colors in CTB files can be RGB colors but don't have to,
# therefore initialize color without RGB values by the
# default AutoCAD palette:
for aci in range(1, 256):
entry = ctb[aci] # type: ignore
if entry.has_object_color():
# initialize with default AutoCAD palette
entry.color = int2rgb(DXF_DEFAULT_COLORS[aci])
return ctb
def set_layers_state(self, layers: Set[str], state=True):
"""Set layer state of `layers` to on/off.
Args:
layers: set of layer names
state: `True` turn this `layers` on and others off,
`False` turn this `layers` off and others on
"""
layers = {layer_key(name) for name in layers}
for name, layer in self.layers.items():
if name in layers:
layer.is_visible = state
else:
layer.is_visible = not state
def set_current_layout(self, layout: "Layout"):
self.current_layout_properties = LayoutProperties.from_layout(
layout, units=self.units
)
@property
def inside_block_reference(self) -> bool:
"""Returns ``True`` if current processing state is inside of a block
reference (INSERT).
"""
return bool(self.current_block_reference_properties)
def push_state(self, block_reference: Properties) -> None:
self._saved_states.append(self.current_block_reference_properties) # type: ignore
self.current_block_reference_properties = block_reference
def pop_state(self) -> None:
self.current_block_reference_properties = self._saved_states.pop()
def resolve_all(self, entity: "DXFGraphic") -> Properties:
"""Resolve all properties of `entity`."""
p = Properties()
p.layer = self.resolve_layer(entity)
resolved_layer = layer_key(p.layer)
p.units = self.resolve_units()
p.color = self.resolve_color(entity, resolved_layer=resolved_layer)
p.linetype_name, p.linetype_pattern = self.resolve_linetype(
entity, resolved_layer=resolved_layer
)
p.lineweight = self.resolve_lineweight(
entity, resolved_layer=resolved_layer
)
p.linetype_scale = self.resolve_linetype_scale(entity)
p.is_visible = self.resolve_visible(
entity, resolved_layer=resolved_layer
)
if entity.is_supported_dxf_attrib("style"):
p.font = self.resolve_font(entity)
if isinstance(entity, DXFPolygon):
p.filling = self.resolve_filling(entity)
return p
def resolve_units(self) -> InsertUnits:
return self.current_layout_properties.units
def resolve_linetype_scale(self, entity: "DXFGraphic") -> float:
return entity.dxf.ltscale * self.linetype_scale
def resolve_visible(
self, entity: "DXFGraphic", *, resolved_layer: Optional[str] = None
) -> bool:
"""Resolve the visibility state of `entity`. Returns ``True`` if
`entity` is visible.
"""
if isinstance(entity, Insert):
# depends only on the invisible flag, the layer state has no effect!
return not bool(entity.dxf.invisible)
elif isinstance(entity, Face3d):
return any(entity.get_edges_visibility())
entity_layer = resolved_layer or layer_key(self.resolve_layer(entity))
layer_properties = self.layers.get(entity_layer)
if layer_properties and not layer_properties.is_visible:
return False
elif isinstance(entity, Attrib):
return not bool(entity.dxf.invisible) and not entity.is_invisible
else:
return not bool(entity.dxf.invisible)
def resolve_layer(self, entity: "DXFGraphic") -> str:
"""Resolve the layer of `entity`, this is only relevant for entities
inside of block references.
"""
layer = entity.dxf.layer
if layer == "0" and self.inside_block_reference:
layer = self.current_block_reference_properties.layer # type: ignore
return layer
def resolve_color(
self, entity: "DXFGraphic", *, resolved_layer: Optional[str] = None
) -> Color:
"""Resolve the rgb-color of `entity` as hex color string:
"#RRGGBB" or "#RRGGBBAA".
"""
if entity.dxf.hasattr("true_color"):
# An existing true color value always overrides ACI color!
# Do not default to BYLAYER or BYBLOCK, this ACI value is ignored!
aci = 7
else:
aci = entity.dxf.color # defaults to BYLAYER
entity_layer = resolved_layer or layer_key(self.resolve_layer(entity))
layer_properties = self.layers.get(
entity_layer, DEFAULT_LAYER_PROPERTIES
)
if aci == const.BYLAYER:
color = layer_properties.get_entity_color_from_layer(
self.current_layout_properties.default_color
)
elif aci == const.BYBLOCK:
if not self.inside_block_reference:
color = self.current_layout_properties.default_color
else:
color = self.current_block_reference_properties.color # type: ignore
else: # BYOBJECT
color = self._true_entity_color(entity.rgb, aci)
alpha = self._entity_alpha_str(
entity.dxf.get("transparency"), layer_properties.color
)
return color[:7] + alpha
def _entity_alpha_str(
self, raw_transparency: Optional[int], layer_color: Color
) -> str:
"""Returns the alpha value as hex string "xx" or empty string if opaque."""
# alpha 0 = fully transparent
# alpha 255 = opaque
# DXF Transparency 0 = fully transparent
# DXF Transparency 255 = opaque
if raw_transparency == const.TRANSPARENCY_BYBLOCK:
if self.inside_block_reference:
return self.current_block_reference_properties.color[7:] # type: ignore
# else: entity is not in a block
# There is no default transparency value for layouts, AutoCAD and
# BricsCAD shows opaque entities!
return ""
# No transparency attribute means "by layer"
elif raw_transparency is None:
return layer_color[7:]
alpha = raw_transparency & 0xFF
if alpha < 255:
return f"{alpha:02x}"
return ""
def resolve_aci_color(self, aci: int, resolved_layer: str) -> Color:
"""Resolve the `aci` color as hex color string: "#RRGGBB" """
if aci == const.BYLAYER:
layer = self.layers.get(
layer_key(resolved_layer), DEFAULT_LAYER_PROPERTIES
)
color = layer.get_entity_color_from_layer(
self.current_layout_properties.default_color
)
elif aci == const.BYBLOCK:
if not self.inside_block_reference:
color = self.current_layout_properties.default_color
else:
color = self.current_block_reference_properties.color # type: ignore
else: # BYOBJECT
color = self._true_entity_color(None, aci)
return color
def _true_entity_color(
self, true_color: Optional[Tuple[int, int, int]], aci: int
) -> Color:
"""Returns rgb color in hex format: "#RRGGBB".
`true_color` has higher priority than `aci`.
"""
if true_color is not None:
return rgb_to_hex(true_color)
elif 0 < aci < 256:
return self._aci_to_true_color(aci)
else:
return (
self.current_layout_properties.default_color
) # unknown / invalid
def _aci_to_true_color(self, aci: int) -> Color:
"""Returns the `aci` value (AutoCAD Color Index) as rgb value in
hex format: "#RRGGBB".
"""
if aci == 7: # black/white; todo: this bypasses the plot style table
if self.current_layout_properties.has_dark_background:
return "#ffffff"
else:
return "#000000"
else:
return rgb_to_hex(self.plot_styles[aci].color)
def resolve_linetype(
self, entity: "DXFGraphic", *, resolved_layer: str = None
) -> Tuple[str, Sequence[float]]:
"""Resolve the linetype of `entity`. Returns a tuple of the linetype
name as upper-case string and the simplified linetype pattern as tuple
of floats.
"""
aci = entity.dxf.color
# Not sure if plotstyle table overrides actual entity setting?
if (0 < aci < 256) and self.plot_styles[
aci
].linetype != acadctb.OBJECT_LINETYPE:
# todo: return special line types - overriding linetypes by
# plotstyle table
pass
name = entity.dxf.linetype.upper() # default is 'BYLAYER'
if name == "BYLAYER":
entity_layer = resolved_layer or layer_key(
self.resolve_layer(entity)
)
layer = self.layers.get(entity_layer, DEFAULT_LAYER_PROPERTIES)
name = layer.linetype_name
pattern = layer.linetype_pattern
elif name == "BYBLOCK":
if self.inside_block_reference:
name = self.current_block_reference_properties.linetype_name # type: ignore
pattern = (
self.current_block_reference_properties.linetype_pattern # type: ignore
)
else:
# There is no default layout linetype
name = "STANDARD"
pattern = CONTINUOUS_PATTERN
else:
pattern = self.line_pattern.get(name, CONTINUOUS_PATTERN)
return name, pattern
def resolve_lineweight(
self, entity: "DXFGraphic", *, resolved_layer: str = None
) -> float:
"""Resolve the lineweight of `entity` in mm.
DXF stores the lineweight in mm times 100 (e.g. 0.13mm = 13).
The smallest line weight is 0 and the biggest line weight is 211.
The DXF/DWG format is limited to a fixed value table,
see: :attr:`ezdxf.lldxf.const.VALID_DXF_LINEWEIGHTS`
CAD applications draw lineweight 0mm as an undefined small value, to
prevent backends to draw nothing for lineweight 0mm the smallest
return value is 0.01mm.
"""
def lineweight():
aci = entity.dxf.color
# Not sure if plotstyle table overrides actual entity setting?
if (0 < aci < 256) and self.plot_styles[
aci
].lineweight != acadctb.OBJECT_LINEWEIGHT:
# overriding lineweight by plotstyle table
return self.plot_styles.get_lineweight(aci)
lineweight = entity.dxf.lineweight # default is BYLAYER
if lineweight == const.LINEWEIGHT_BYLAYER:
entity_layer = resolved_layer or layer_key(
self.resolve_layer(entity)
)
return self.layers.get(
entity_layer, DEFAULT_LAYER_PROPERTIES
).lineweight
elif lineweight == const.LINEWEIGHT_BYBLOCK:
if self.inside_block_reference:
return self.current_block_reference_properties.lineweight
else:
# There is no default layout lineweight
return self.default_lineweight()
elif lineweight == const.LINEWEIGHT_DEFAULT:
return self.default_lineweight()
else:
return float(lineweight) / 100.0
return max(0.01, lineweight())
def default_lineweight(self):
"""Returns the default lineweight of the document."""
# todo: is this value stored anywhere (e.g. HEADER section)?
return 0.25
def resolve_font(self, entity: "DXFGraphic") -> Optional[fonts.FontFace]:
"""Resolve the text style of `entity` to a font name.
Returns ``None`` for the default font.
"""
# todo: extended font data
style = entity.dxf.get("style", "Standard")
return self.fonts.get(table_key(style))
def resolve_filling(self, entity: "DXFGraphic") -> Optional[Filling]:
"""Resolve filling properties (SOLID, GRADIENT, PATTERN) of `entity`."""
def setup_gradient():
filling.type = Filling.GRADIENT
filling.name = gradient.name.upper()
# todo: no idea when to use aci1 and aci2
filling.gradient_color1 = rgb_to_hex(gradient.color1)
if gradient.one_color:
c = round(gradient.tint * 255) # channel value
filling.gradient_color2 = rgb_to_hex((c, c, c))
else:
filling.gradient_color2 = rgb_to_hex(gradient.color2)
filling.angle = gradient.rotation
filling.gradient_centered = gradient.centered
def setup_pattern():
filling.type = Filling.PATTERN
filling.name = polygon.dxf.pattern_name.upper()
filling.pattern_scale = polygon.dxf.pattern_scale
filling.angle = polygon.dxf.pattern_angle
if polygon.dxf.pattern_double:
# This value is not editable by CAD-App-GUI:
filling.pattern_scale *= 2 # todo: is this correct?
filling.pattern = self._hatch_pattern_cache.get(filling.name)
if filling.pattern:
return
pattern = polygon.pattern
if not pattern:
return
# DXF stores the hatch pattern already rotated and scaled,
# pattern_scale and pattern_rotation are just hints for the CAD
# application to modify the pattern if required.
# It's better to revert the scaling and rotation, because in general
# back-ends do not handle pattern that way, they need a base-pattern
# and separated scaling and rotation attributes and these
# base-pattern could be cached by their name.
#
# There is no advantage of simplifying the hatch line pattern and
# this format is required by the PatternAnalyser():
filling.pattern = scale_pattern(
pattern.as_list(), 1.0 / filling.pattern_scale, -filling.angle
)
self._hatch_pattern_cache[filling.name] = filling.pattern
if not isinstance(entity, DXFPolygon):
return None
polygon = cast(DXFPolygon, entity)
filling = Filling()
if polygon.dxf.solid_fill:
gradient = polygon.gradient
if gradient is None:
filling.type = Filling.SOLID
else:
if gradient.kind == 0: # Solid
filling.type = Filling.SOLID
filling.gradient_color1 = rgb_to_hex(gradient.color1)
else:
setup_gradient()
else:
setup_pattern()
return filling
COLOR_PATTERN = re.compile("#[0-9A-Fa-f]{6,8}")
def is_valid_color(color: Color) -> bool:
if type(color) is not Color:
raise TypeError(f"Invalid argument type: {type(color)}.")
if len(color) in (7, 9):
return bool(COLOR_PATTERN.fullmatch(color))
return False
def rgb_to_hex(
rgb: Union[Tuple[int, int, int], Tuple[float, float, float]]
) -> Color:
"""Returns color in hex format: "#RRGGBB"."""
assert all(0 <= x <= 255 for x in rgb), f"invalid RGB color: {rgb}"
r, g, b = rgb
return f"#{r:02x}{g:02x}{b:02x}"
def hex_to_rgb(hex_string: Color) -> RGB:
"""Returns hex string color as (r, g, b) tuple."""
hex_string = hex_string.lstrip("#")
assert len(hex_string) == 6
r = int(hex_string[0:2], 16)
g = int(hex_string[2:4], 16)
b = int(hex_string[4:6], 16)
return r, g, b
def set_color_alpha(color: Color, alpha: int) -> Color:
"""Returns `color` including the new `alpha` channel in hex format:
"#RRGGBBAA".
Args:
color: may be an RGB or RGBA hex color string
alpha: the new alpha value (0-255)
"""
assert color.startswith("#") and len(color) in (
7,
9,
), f'invalid RGB color: "{color}"'
assert 0 <= alpha < 256, f"alpha out of range: {alpha}"
return f"{color[:7]}{alpha:02x}"
def transparency_to_alpha(value: float) -> int:
# clamp into range [0, 1]
value = min(max(0.0, value), 1.0)
return int(round((1.0 - value) * 255))
def _load_line_pattern(linetypes: "Table") -> Dict[str, Sequence[float]]:
"""Load linetypes defined in a DXF document into as dictionary,
key is the upper case linetype name, value is the simplified line pattern,
see :func:`compile_line_pattern`.
"""
pattern: Dict[str, Sequence[float]] = dict()
for linetype in linetypes:
assert isinstance(linetype, Linetype)
name = linetype.dxf.name.upper()
pattern[name] = linetype.pattern_tags.compile()
return pattern
```
#### File: addons/dwg/crc.py
```python
__all__ = ["crc8", "crc32"]
from .const import Bytes
def crc8(data: Bytes, seed: int = 0) -> int:
for byte in data:
index = byte ^ (seed & 0xFF)
seed = (seed >> 8) & 0xFF
seed ^= CRC8_TABLE[index & 0xFF]
return seed
def crc32(data: Bytes, seed: int = 0) -> int:
inverted_crc = ~seed
for byte in data:
inverted_crc = (inverted_crc >> 8) ^ CRC32_TABLE[
(inverted_crc ^ byte) & 0xFF
]
return ~inverted_crc
# fmt: off
# Source: Open Design Specification for .dwg
CRC8_TABLE = [
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1,
0xC481, 0x0440, 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, 0x0A00, 0xCAC1, 0xCB81, 0x0B40,
0xC901, 0x09C0, 0x0880, 0xC841, 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, 0x1E00, 0xDEC1,
0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1,
0xF281, 0x3240, 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, 0x3C00, 0xFCC1, 0xFD81, 0x3D40,
0xFF01, 0x3FC0, 0x3E80, 0xFE41, 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, 0x2800, 0xE8C1,
0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0,
0x2080, 0xE041, 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, 0x6600, 0xA6C1, 0xA781, 0x6740,
0xA501, 0x65C0, 0x6480, 0xA441, 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, 0xAA01, 0x6AC0,
0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1,
0xB681, 0x7640, 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, 0x5000, 0x90C1, 0x9181, 0x5140,
0x9301, 0x53C0, 0x5280, 0x9241, 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, 0x9C01, 0x5CC0,
0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0,
0x4C80, 0x8C41, 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, 0x8201, 0x42C0, 0x4380, 0x8341,
0x4100, 0x81C1, 0x8081, 0x4040,
]
# Source: Open Design Specification for .dwg
CRC32_TABLE = [
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3, 0x0edb8832,
0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, 0x136c9856, 0x646ba8c0, 0xfd62f97a,
0x8a65c9ec, 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3,
0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, 0x2f6f7c87, 0x58684c11, 0xc1611dab,
0xb6662d3d, 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, 0x6b6b51f4,
0x1c6c6162, 0x856530d8, 0xf262004e, 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, 0x4db26158, 0x3ab551ce, 0xa3bc0074,
0xd4bb30e2, 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086, 0x5768b525,
0x206f85b3, 0xb966d409, 0xce61e49f, 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, 0xead54739, 0x9dd277af, 0x04db2615,
0x73dc1683, 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7, 0xfed41b76,
0x89d32be0, 0x10da7a5a, 0x67dd4acc, 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, 0xd80d2bda, 0xaf0a1b4c, 0x36034af6,
0x41047a60, 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7,
0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7,
0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45, 0xa00ae278,
0xd70dd2ee, 0x4e048354, 0x3903b3c2, 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, 0xbdbdf21c, 0xcabac28a, 0x53b39330,
0x24b4a3a6, 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d,
]
# fmt: on
```
#### File: ezdxf/addons/odafc.py
```python
import logging
import os
import platform
import shutil
import subprocess
import tempfile
import time
from contextlib import contextmanager
from pathlib import Path
from typing import Optional, List
import ezdxf
from ezdxf.document import Drawing
from ezdxf.lldxf.validator import (
is_dxf_file,
dxf_info,
is_binary_dxf_file,
dwg_version,
)
logger = logging.getLogger("ezdxf")
win_exec_path = ezdxf.options.get("odafc-addon", "win_exec_path").strip('"')
class ODAFCError(IOError):
pass
VERSION_MAP = {
"R12": "ACAD12",
"R13": "ACAD13",
"R14": "ACAD14",
"R2000": "ACAD2000",
"R2004": "ACAD2004",
"R2007": "ACAD2007",
"R2010": "ACAD2010",
"R2013": "ACAD2013",
"R2018": "ACAD2018",
"AC1004": "ACAD9",
"AC1006": "ACAD10",
"AC1009": "ACAD12",
"AC1012": "ACAD13",
"AC1014": "ACAD14",
"AC1015": "ACAD2000",
"AC1018": "ACAD2004",
"AC1021": "ACAD2007",
"AC1024": "ACAD2010",
"AC1027": "ACAD2013",
"AC1032": "ACAD2018",
}
VALID_VERSIONS = {
"ACAD9",
"ACAD10",
"ACAD12",
"ACAD13",
"ACAD14",
"ACAD2000",
"ACAD2004",
"ACAD2007",
"ACAD2010",
"ACAD2013",
"ACAD2018",
}
def map_version(version: str) -> str:
return VERSION_MAP.get(version.upper(), version.upper())
def readfile(
filename: str, version: Optional[str] = None, *, audit: bool = False
) -> Optional[Drawing]:
"""Use an installed `ODA File Converter`_ to convert a DWG/DXB/DXF file
into a temporary DXF file and load this file by `ezdxf`.
Args:
filename: file to load by ODA File Converter
version: load file as specific DXF version, by default the same version
as the source file or if not detectable the latest by `ezdxf`
supported version.
audit: audit source file before loading
"""
infile = Path(filename).absolute()
if not infile.is_file():
raise FileNotFoundError(f"No such file: '{infile}'")
version = _detect_version(filename) if version is None else version
with tempfile.TemporaryDirectory(prefix="odafc_") as tmp_dir:
args = _odafc_arguments(
infile.name,
str(infile.parent),
tmp_dir,
output_format="DXF",
version=version,
audit=audit,
)
_execute_odafc(args)
out_file = Path(tmp_dir) / infile.with_suffix(".dxf").name
if out_file.exists():
doc = ezdxf.readfile(str(out_file))
doc.filename = infile.with_suffix(".dxf") #type: ignore
return doc
raise ODAFCError("Failed to convert file: Unknown Error")
def export_dwg(
doc: Drawing,
filename: str,
version: Optional[str] = None,
*,
audit: bool = False,
replace: bool = False,
) -> None:
"""Use an installed `ODA File Converter`_ to export a DXF document `doc`
as a DWG file.
Saves a temporary DXF file and convert this DXF file into a DWG file by the
ODA File Converter. If `version` is not specified the DXF version of the
source document is used.
Args:
doc: `ezdxf` DXF document as :class:`~ezdxf.drawing.Drawing` object
filename: export filename of DWG file, extension will be changed to ".dwg"
version: export file as specific version, by default the same version as
the source document.
audit: audit source file by ODA File Converter at exporting
replace: replace existing DWG file if ``True``
.. versionchanged:: 0.15
added `replace` option
"""
if version is None:
version = doc.dxfversion
export_version = VERSION_MAP[version]
dwg_file = Path(filename).absolute()
out_folder = Path(dwg_file.parent)
if dwg_file.exists():
if replace:
dwg_file.unlink()
else:
raise FileExistsError(f"File already exists: {dwg_file}")
if out_folder.exists():
with tempfile.TemporaryDirectory(prefix="odafc_") as tmp_dir:
dxf_file = Path(tmp_dir) / dwg_file.with_suffix(".dxf").name
# Save DXF document
old_filename = doc.filename
doc.saveas(dxf_file)
doc.filename = old_filename
arguments = _odafc_arguments(
dxf_file.name,
tmp_dir,
str(out_folder),
output_format="DWG",
version=export_version,
audit=audit,
)
_execute_odafc(arguments)
else:
raise FileNotFoundError(
f"No such file or directory: '{str(out_folder)}'"
)
def _detect_version(path: str) -> str:
version = "ACAD2018"
ext = os.path.splitext(path)[1].lower()
if ext == ".dxf":
if is_binary_dxf_file(path):
pass
elif is_dxf_file(path):
with open(path, "rt") as fp:
info = dxf_info(fp)
version = VERSION_MAP[info.version]
elif ext == ".dwg":
version = dwg_version(path) # type: ignore
if version is None:
raise ValueError("Unknown or unsupported DWG version.")
else:
raise ValueError(f"Unsupported file format: '{ext}'")
return map_version(version)
def _odafc_arguments(
filename: str,
in_folder: str,
out_folder: str,
output_format: str = "DXF",
version: str = "ACAD2013",
audit: bool = False,
) -> List[str]:
"""
ODA File Converter command line format:
---------------------------------------
OdaFC "Input Folder" "Output Folder" version type recurse audit [filter]
version - Output version: "ACAD9" - "ACAD2018"
type - Output file type: "DWG", "DXF", "DXB"
recurse - Recurse Input Folder: "0" or "1"
audit - audit each file: "0" or "1"
optional Input files filter: default "*.DWG,*.DXF"
"""
recurse = "0"
audit_str = "1" if audit else "0"
return [
in_folder,
out_folder,
version,
output_format,
recurse,
audit_str,
filename,
]
def _get_odafc_path(system: str) -> str:
path = shutil.which("ODAFileConverter")
if not path and system == "Windows":
path = win_exec_path
if not Path(path).is_file():
path = None
if not path:
raise FileNotFoundError(
f"Could not find ODAFileConverter in the path. "
f"Install application from https://www.opendesign.com/guestfiles/oda_file_converter"
)
return path
@contextmanager
def _linux_dummy_display():
"""See xvbfwrapper library for a more feature complete xvfb interface."""
if shutil.which("Xvfb"):
display = ":123" # arbitrary choice
proc = subprocess.Popen(
["Xvfb", display, "-screen", "0", "800x600x24"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
time.sleep(0.1)
yield display
try:
proc.terminate()
proc.wait()
except OSError:
pass
else:
logger.warning(
f"Install xvfb to prevent the ODAFileConverter GUI from opening"
)
yield os.environ["DISPLAY"]
def _run_with_no_gui(
system: str, command: str, arguments: List[str]
) -> subprocess.Popen:
if system == "Linux":
with _linux_dummy_display() as display:
env = os.environ.copy()
env["DISPLAY"] = display
proc = subprocess.Popen(
[command] + arguments,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
proc.wait()
elif system == "Darwin":
# TODO: unknown how to prevent the GUI from appearing on OSX
proc = subprocess.Popen(
[command] + arguments,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
proc.wait()
elif system == "Windows":
# New code from George-Jiang to solve the GUI pop-up problem
startupinfo = subprocess.STARTUPINFO() # type:ignore # only a Linux issue?
startupinfo.dwFlags = (
subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW # type:ignore # only a Linux issue?
)
startupinfo.wShowWindow = subprocess.SW_HIDE # type:ignore # only a Linux issue?
proc = subprocess.Popen(
[command] + arguments,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=startupinfo,
)
proc.wait()
else:
# ODAFileConverter only has Linux, OSX and Windows versions
raise ODAFCError(f"Unsupported platform: {system}")
return proc
def _odafc_failed(system: str, proc: subprocess.Popen, stderr: str) -> bool:
if proc.returncode != 0:
# note: currently, ODAFileConverter does not set the return code
return True
stderr = stderr.strip()
if system == "Linux":
# ODAFileConverter *always* crashes on Linux even if the output was successful
return stderr != "" and stderr != "Quit (core dumped)"
else:
return stderr != ""
def _execute_odafc(arguments: List[str]) -> Optional[bytes]:
logger.debug(f"Running ODAFileConverter with arguments: {arguments}")
system = platform.system()
oda_fc = _get_odafc_path(system)
proc = _run_with_no_gui(system, oda_fc, arguments)
stdout = proc.stdout.read().decode("utf-8") # type: ignore
stderr = proc.stderr.read().decode("utf-8") # type: ignore
if _odafc_failed(system, proc, stderr):
msg = (
f"ODA File Converter failed: return code = {proc.returncode}.\n"
f"stdout: {stdout}\nstderr: {stderr}"
)
logger.debug(msg)
raise ODAFCError(msg)
return proc.stdout # type: ignore
```
#### File: src/ezdxf/comments.py
```python
from typing import TYPE_CHECKING, TextIO, Iterable, Set
from ezdxf.lldxf.validator import is_dxf_file
from ezdxf.filemanagement import dxf_file_info
from ezdxf.lldxf.tagger import ascii_tags_loader
if TYPE_CHECKING:
from ezdxf.eztypes import DXFTag
def from_stream(stream: TextIO, codes: Set[int] = None) -> Iterable['DXFTag']:
"""
Yields comment tags from text `stream` as :class:`~ezdxf.lldxf.types.DXFTag` objects.
Args:
stream: input text stream
codes: set of group codes to yield additional DXF tags e.g. {5, 0} to also yield handle and structure tags
"""
codes = codes or set()
codes.add(999)
return (tag for tag in ascii_tags_loader(stream, skip_comments=False) if tag.code in codes)
def from_file(filename: str, codes: Set[int] = None) -> Iterable['DXFTag']:
"""
Yields comment tags from file `filename` as :class:`~ezdxf.lldxf.types.DXFTag` objects.
Args:
filename: filename as string
codes: yields also additional tags with specified group codes e.g. {5, 0} to also yield handle and
structure tags
"""
if is_dxf_file(filename):
info = dxf_file_info(filename)
with open(filename, mode='rt', encoding=info.encoding) as fp:
yield from from_stream(fp, codes=codes)
else:
raise IOError(f'File "{filename}" is not a DXF file.')
```
#### File: ezdxf/entities/acad_proxy_entity.py
```python
from typing import TYPE_CHECKING, Optional, Iterable
from ezdxf.lldxf import const
from ezdxf.lldxf.tags import Tags
from .dxfentity import SubclassProcessor
from .dxfgfx import DXFGraphic
from . import factory
if TYPE_CHECKING:
from ezdxf.eztypes import DXFNamespace, TagWriter, DXFEntity
# Group Codes of AcDbProxyEntity
# https://help.autodesk.com/view/OARX/2018/ENU/?guid=GUID-89A690F9-E859-4D57-89EA-750F3FB76C6B
# 100 AcDbProxyEntity
# 90 Proxy entity class ID (always 498)
# 91 Application entity's class ID. Class IDs are based on the order of
# the class in the CLASSES section. The first class is given the ID of
# 500, the next is 501, and so on
#
# 92 Size of graphics data in bytes < R2010; R2010+ = 160
# 310 Binary graphics data (multiple entries can appear) (optional)
#
# 96 Size of unknown data in bytes < R2010; R2010+ = 162
# 311 Binary entity data (multiple entries can appear) (optional)
#
# 93 Size of entity data in bits <R2010; R2010+ = 161
# 310 Binary entity data (multiple entries can appear) (optional)
#
# 330 or 340 or 350 or 360 - An object ID (multiple entries can appear) (optional)
# 94 0 (indicates end of object ID section)
# 95 Object drawing format when it becomes a proxy (a 32-bit unsigned integer):
# Low word is AcDbDwgVersion
# High word is MaintenanceReleaseVersion
# 70 Original custom object data format:
# 0 = DWG format
# 1 = DXF format
@factory.register_entity
class ACADProxyEntity(DXFGraphic):
"""READ ONLY ACAD_PROXY_ENTITY CLASS! DO NOT MODIFY!"""
DXFTYPE = "ACAD_PROXY_ENTITY"
MIN_DXF_VERSION_FOR_EXPORT = const.DXF2000
def __init__(self):
super().__init__()
self.acdb_proxy_entity: Optional[Tags] = None
def copy(self):
raise const.DXFTypeError(f"Cloning of {self.dxftype()} not supported.")
def load_dxf_attribs(
self, processor: SubclassProcessor = None
) -> "DXFNamespace":
dxf = super().load_dxf_attribs(processor)
if processor:
self.acdb_proxy_entity = processor.subclass_by_index(2)
self.load_proxy_graphic(processor.dxfversion)
return dxf
def load_proxy_graphic(self, dxfversion: Optional[str]) -> None:
if self.acdb_proxy_entity is not None:
if not dxfversion:
dxfversion = _detect_dxf_version(self.acdb_proxy_entity)
length_code = 92 if dxfversion < const.DXF2013 else 160
self.proxy_graphic = load_proxy_data(
self.acdb_proxy_entity, length_code, 310
)
def export_dxf(self, tagwriter: "TagWriter") -> None:
# Proxy graphic is stored in AcDbProxyEntity and not as usual in
# AcDbEntity!
preserve_proxy_graphic = self.proxy_graphic
self.proxy_graphic = None
super().export_dxf(tagwriter)
self.proxy_graphic = preserve_proxy_graphic
def export_entity(self, tagwriter: "TagWriter") -> None:
"""Export entity specific data as DXF tags. (internal API)"""
# Base class and AcDbEntity export is done by parent class
super().export_entity(tagwriter)
if self.acdb_proxy_entity is not None:
tagwriter.write_tags(self.acdb_proxy_entity)
# XDATA export is done by the parent class
def __virtual_entities__(self) -> Iterable[DXFGraphic]:
"""Implements the SupportsVirtualEntities protocol. """
from ezdxf.proxygraphic import ProxyGraphic
if self.proxy_graphic:
for e in ProxyGraphic(self.proxy_graphic, self.doc).virtual_entities():
e.set_source_of_copy(self)
yield e
return []
def virtual_entities(self) -> Iterable[DXFGraphic]:
"""Yields proxy graphic as "virtual" entities. """
return self.__virtual_entities__()
def load_proxy_data(
tags: Tags, length_code: int, data_code: int = 310
) -> Optional[bytes]:
try:
index = tags.tag_index(length_code)
except const.DXFValueError:
return None
binary_data = []
for code, value in tags[index + 1:]:
if code == data_code:
binary_data.append(value)
else:
break # at first tag with group code != data_code
return b"".join(binary_data)
def _detect_dxf_version(tags: Tags) -> str:
for tag in tags:
if 160 <= tag.code < 163:
return const.DXF2013
return const.DXF2000
```
#### File: ezdxf/entities/factory.py
```python
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ezdxf.eztypes import Drawing, DXFEntity, ExtendedTags
__all__ = [
"register_entity",
"ENTITY_CLASSES",
"replace_entity",
"new",
"cls",
"is_bound",
"create_db_entry",
"load",
"bind",
]
# Stores all registered classes:
ENTITY_CLASSES = {}
# use @set_default_class to register the default entity class:
DEFAULT_CLASS = None
def set_default_class(cls):
global DEFAULT_CLASS
DEFAULT_CLASS = cls
return cls
def replace_entity(cls):
name = cls.DXFTYPE
ENTITY_CLASSES[name] = cls
return cls
def register_entity(cls):
name = cls.DXFTYPE
if name in ENTITY_CLASSES:
raise TypeError(f"Double registration for DXF type {name}.")
ENTITY_CLASSES[name] = cls
return cls
def new(
dxftype: str, dxfattribs: dict = None, doc: "Drawing" = None
) -> "DXFEntity":
"""Create a new entity, does not require an instantiated DXF document."""
entity = cls(dxftype).new(
handle=None,
owner=None,
dxfattribs=dxfattribs,
doc=doc,
)
return entity.cast() if hasattr(entity, "cast") else entity # type: ignore
def create_db_entry(dxftype, dxfattribs: dict, doc: "Drawing") -> "DXFEntity":
entity = new(dxftype=dxftype, dxfattribs=dxfattribs)
bind(entity, doc)
return entity
def load(tags: "ExtendedTags", doc: "Drawing" = None) -> "DXFEntity":
entity = cls(tags.dxftype()).load(tags, doc)
return entity.cast() if hasattr(entity, "cast") else entity # type: ignore
def cls(dxftype: str) -> "DXFEntity":
"""Returns registered class for `dxftype`."""
return ENTITY_CLASSES.get(dxftype, DEFAULT_CLASS)
def bind(entity: "DXFEntity", doc: "Drawing") -> None:
"""Bind `entity` to the DXF document `doc`.
The bind process stores the DXF `entity` in the entity database of the DXF
document.
"""
assert entity.is_alive, "Can not bind destroyed entity."
assert doc.entitydb is not None, "Missing entity database."
entity.doc = doc
doc.entitydb.add(entity)
# Do not call the post_bind_hook() while loading from external sources,
# not all entities and resources are loaded at this point of time!
if not doc.is_loading:
# bind extension dictionary
if entity.extension_dict is not None:
xdict = entity.extension_dict
if xdict.has_valid_dictionary:
xdict.update_owner(entity.dxf.handle)
dictionary = xdict.dictionary
if not is_bound(dictionary, doc):
bind(dictionary, doc)
doc.objects.add_object(dictionary)
entity.post_bind_hook()
def unbind(entity: "DXFEntity"):
"""Unbind `entity` from document and layout, but does not destroy the
entity.
Turns `entity` into a virtual entity: no handle, no owner, no document.
"""
if entity.is_alive and not entity.is_virtual:
doc = entity.doc
if entity.dxf.owner is not None:
try:
layout = doc.layouts.get_layout_for_entity(entity) # type: ignore
except KeyError:
pass
else:
layout.unlink_entity(entity) # type: ignore
process_sub_entities = getattr(entity, "process_sub_entities", None)
if process_sub_entities:
process_sub_entities(lambda e: unbind(e))
doc.entitydb.discard(entity) # type: ignore
entity.doc = None
def is_bound(entity: "DXFEntity", doc: "Drawing") -> bool:
"""Returns ``True`` if `entity`is bound to DXF document `doc`."""
if not entity.is_alive:
return False
if entity.is_virtual or entity.doc is not doc:
return False
assert doc.entitydb, "Missing entity database."
return entity.dxf.handle in doc.entitydb
```
#### File: ezdxf/entities/tolerance.py
```python
from typing import TYPE_CHECKING
from ezdxf.lldxf import validator
from ezdxf.lldxf.attributes import (
DXFAttr,
DXFAttributes,
DefSubclass,
XType,
RETURN_DEFAULT,
group_code_mapping,
)
from ezdxf.lldxf.const import SUBCLASS_MARKER, DXF2000
from ezdxf.math import NULLVEC, Z_AXIS, X_AXIS
from ezdxf.math.transformtools import transform_extrusion
from .dxfentity import base_class, SubclassProcessor
from .dxfgfx import DXFGraphic, acdb_entity
from .factory import register_entity
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter, DXFNamespace, Matrix44
__all__ = ["Tolerance"]
acdb_tolerance = DefSubclass(
"AcDbFcf",
{
"dimstyle": DXFAttr(
3,
default="Standard",
validator=validator.is_valid_table_name,
),
# Insertion point (in WCS):
"insert": DXFAttr(10, xtype=XType.point3d, default=NULLVEC),
# String representing the visual representation of the tolerance:
"content": DXFAttr(1, default=""),
"extrusion": DXFAttr(
210,
xtype=XType.point3d,
default=Z_AXIS,
optional=True,
validator=validator.is_not_null_vector,
fixer=RETURN_DEFAULT,
),
# X-axis direction vector (in WCS):
"x_axis_vector": DXFAttr(
11,
xtype=XType.point3d,
default=X_AXIS,
validator=validator.is_not_null_vector,
fixer=RETURN_DEFAULT,
),
},
)
acdb_tolerance_group_codes = group_code_mapping(acdb_tolerance)
@register_entity
class Tolerance(DXFGraphic):
"""DXF TOLERANCE entity"""
DXFTYPE = "TOLERANCE"
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_tolerance)
MIN_DXF_VERSION_FOR_EXPORT = DXF2000
def load_dxf_attribs(
self, processor: SubclassProcessor = None
) -> "DXFNamespace":
dxf = super().load_dxf_attribs(processor)
if processor:
processor.fast_load_dxfattribs(
dxf, acdb_tolerance_group_codes, subclass=2, recover=True
)
return dxf
def export_entity(self, tagwriter: "TagWriter") -> None:
"""Export entity specific data as DXF tags."""
super().export_entity(tagwriter)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_tolerance.name)
self.dxf.export_dxf_attribs(
tagwriter,
["dimstyle", "insert", "content", "extrusion", "x_axis_vector"],
)
def transform(self, m: "Matrix44") -> "Tolerance":
"""Transform the TOLERANCE entity by transformation matrix `m` inplace."""
self.dxf.insert = m.transform(self.dxf.insert)
self.dxf.x_axis_vector = m.transform_direction(self.dxf.x_axis_vector)
self.dxf.extrusion, _ = transform_extrusion(self.dxf.extrusion, m)
self.post_transform(m)
return self
```
#### File: ezdxf/entities/xdata.py
```python
from typing import (
TYPE_CHECKING,
List,
Iterable,
Tuple,
Any,
Dict,
MutableSequence,
MutableMapping,
Iterator,
Union,
)
from collections import OrderedDict
from contextlib import contextmanager
from ezdxf.math import Vec3, Matrix44
from ezdxf.lldxf.types import dxftag, VALID_XDATA_GROUP_CODES, DXFTag
from ezdxf.lldxf.tags import Tags
from ezdxf.lldxf.const import XDATA_MARKER, DXFValueError, DXFTypeError
from ezdxf.lldxf.tags import (
xdata_list,
remove_named_list_from_xdata,
get_named_list_from_xdata,
NotFoundException,
)
from ezdxf.tools import take2
from ezdxf import options
from ezdxf.lldxf.repair import filter_invalid_xdata_group_codes
import logging
logger = logging.getLogger("ezdxf")
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter, DXFEntity
__all__ = ["XData", "XDataUserList", "XDataUserDict"]
def has_valid_xdata_group_codes(tags: Tags) -> bool:
return all(tag.code in VALID_XDATA_GROUP_CODES for tag in tags)
class XData:
def __init__(self, xdata: Iterable[Tags] = None):
self.data: Dict[str, Tags] = OrderedDict()
if xdata is not None:
for data in xdata:
self._add(data)
@classmethod
def safe_init(cls, xdata: Iterable[Tags]):
return cls(
[Tags(filter_invalid_xdata_group_codes(tags)) for tags in xdata]
)
def __len__(self):
return len(self.data)
def __contains__(self, appid: str) -> bool:
"""Returns ``True`` if DXF tags for `appid` exist."""
return appid in self.data
def _add(self, tags: Tags) -> None:
tags = Tags(tags)
if len(tags):
appid = tags[0].value
if appid in self.data:
logger.info(f"Duplicate XDATA appid {appid} in one entity")
if has_valid_xdata_group_codes(tags):
self.data[appid] = tags
else:
raise DXFValueError(f"found invalid XDATA group code in {tags}")
def add(
self, appid: str, tags: Iterable[Union[Tuple[int, Any], "DXFTag"]]
) -> None:
"""Add a list of DXF tags for `appid`. The `tags` argument is an
iterable of (group code, value) tuples, where the group code has to be
an integer value. The mandatory XDATA marker (1001, appid) is added
automatically if front of the tags if missing.
Each entity can contain only one list of tags for each `appid`.
Adding a second list of tags for the same `appid` replaces the
existing list of tags.
The valid XDATA group codes are restricted to some specific values in
the range from 1000 to 1071, for more information see also the
internals about :ref:`xdata_internals`.
"""
data = Tags(dxftag(code, value) for code, value in tags)
if len(data) == 0 or data[0] != (XDATA_MARKER, appid):
data.insert(0, dxftag(XDATA_MARKER, appid))
self._add(data)
def get(self, appid: str) -> Tags:
"""Returns the DXF tags as :class:`~ezdxf.lldxf.tags.Tags` list
stored by `appid`.
Raises:
DXFValueError: no data for `appid` exist
"""
if appid in self.data:
return self.data[appid]
else:
raise DXFValueError(appid)
def discard(self, appid):
"""Delete DXF tags for `appid`. None existing appids are silently
ignored.
"""
if appid in self.data:
del self.data[appid]
def export_dxf(self, tagwriter: "TagWriter") -> None:
for appid, tags in self.data.items():
if options.filter_invalid_xdata_group_codes:
tags = Tags(filter_invalid_xdata_group_codes(tags))
tagwriter.write_tags(tags)
def has_xlist(self, appid: str, name: str) -> bool:
"""Returns ``True`` if list `name` from XDATA `appid` exists.
Args:
appid: APPID
name: list name
"""
try:
self.get_xlist(appid, name)
except DXFValueError:
return False
else:
return True
def get_xlist(self, appid: str, name: str) -> List[Tuple]:
"""Get list `name` from XDATA `appid`.
Args:
appid: APPID
name: list name
Returns: list of DXFTags including list name and curly braces '{' '}' tags
Raises:
DXFKeyError: XDATA `appid` does not exist
DXFValueError: list `name` does not exist
"""
xdata = self.get(appid)
try:
return get_named_list_from_xdata(name, xdata)
except NotFoundException:
raise DXFValueError(
f'No data list "{name}" not found for APPID "{appid}"'
)
def set_xlist(self, appid: str, name: str, tags: Iterable) -> None:
"""Create new list `name` of XDATA `appid` with `xdata_tags` and
replaces list `name` if already exists.
Args:
appid: APPID
name: list name
tags: list content as DXFTags or (code, value) tuples, list name and
curly braces '{' '}' tags will be added
"""
if appid not in self.data:
data = [(XDATA_MARKER, appid)]
data.extend(xdata_list(name, tags))
self.add(appid, data)
else:
self.replace_xlist(appid, name, tags)
def discard_xlist(self, appid: str, name: str) -> None:
"""Deletes list `name` from XDATA `appid`. Ignores silently if XDATA
`appid` or list `name` not exist.
Args:
appid: APPID
name: list name
"""
try:
xdata = self.get(appid)
except DXFValueError:
pass
else:
try:
tags = remove_named_list_from_xdata(name, xdata)
except NotFoundException:
pass
else:
self.add(appid, tags)
def replace_xlist(self, appid: str, name: str, tags: Iterable) -> None:
"""Replaces list `name` of existing XDATA `appid` by `tags`. Appends
new list if list `name` do not exist, but raises `DXFValueError` if
XDATA `appid` do not exist.
Low level interface, if not sure use `set_xdata_list()` instead.
Args:
appid: APPID
name: list name
tags: list content as DXFTags or (code, value) tuples, list name and
curly braces '{' '}' tags will be added
Raises:
DXFValueError: XDATA `appid` do not exist
"""
xdata = self.get(appid)
try:
data = remove_named_list_from_xdata(name, xdata)
except NotFoundException:
data = xdata
xlist = xdata_list(name, tags)
data.extend(xlist)
self.add(appid, data)
def transform(self, m: Matrix44) -> None:
"""Transform XDATA tags with group codes 1011, 1012, 1013, 1041 and
1042 inplace. For more information see :ref:`xdata_internals` Internals.
"""
transformed_data = OrderedDict()
for key, tags in self.data.items():
transformed_data[key] = Tags(transform_xdata_tags(tags, m))
self.data = transformed_data
def transform_xdata_tags(tags: Tags, m: Matrix44) -> Iterator[DXFTag]:
for tag in tags:
code, value = tag
if code == 1011:
# move, scale, rotate and mirror
yield dxftag(code, m.transform(Vec3(value)))
elif code == 1012:
# scale, rotate and mirror
yield dxftag(code, m.transform_direction(Vec3(value)))
elif code == 1013:
# rotate and mirror
vec = Vec3(value)
length = vec.magnitude
if length > 1e-12:
vec = m.transform_direction(vec).normalize(length)
yield dxftag(code, vec)
else:
yield tag
elif code == 1041 or code == 1042:
# scale distance and factor, works only for uniform scaling
vec = m.transform_direction(Vec3(value, 0, 0))
yield dxftag(code, vec.magnitude)
else:
yield tag
class XDataUserList(MutableSequence):
"""Manage named XDATA lists as a list-like object.
Stores just a few data types with fixed group codes:
1000 str
1010 Vec3
1040 float
1071 32bit int
"""
converter = {
1000: str,
1010: Vec3,
1040: float,
1071: int,
}
group_codes = {
str: 1000,
Vec3: 1010,
float: 1040,
int: 1071,
}
def __init__(self, xdata: XData = None, name="DefaultList", appid="EZDXF"):
"""Setup a XDATA user list `name` for the given `appid`.
The data is stored in the given `xdata` object, or in a new created
:class:`XData` instance if ``None``.
Changes of the content has to be committed at the end to be stored in
the underlying `xdata` object.
Args:
xdata (XData): underlying :class:`XData` instance, if ``None`` a
new one will be created
name (str): name of the user list
appid (str): application specific AppID
"""
if xdata is None:
xdata = XData()
self.xdata = xdata
self._appid = str(appid)
self._name = str(name)
try:
data = xdata.get_xlist(self._appid, self._name)
except DXFValueError:
data = []
self._data: List = self._parse_list(data)
@classmethod
@contextmanager
def entity(
cls, entity: "DXFEntity", name="DefaultList", appid="EZDXF"
) -> Iterator["XDataUserList"]:
"""Context manager to manage a XDATA list `name` for a given DXF
`entity`. Appends the user list to the existing :class:`XData` instance
or creates new :class:`XData` instance.
Args:
entity (DXFEntity): target DXF entity for the XDATA
name (str): name of the user list
appid (str): application specific AppID
"""
xdata = entity.xdata
if xdata is None:
xdata = XData()
entity.xdata = xdata
xlist = cls(xdata, name, appid)
yield xlist
xlist.commit()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.commit()
def __str__(self):
"""Return str(self)."""
return str(self._data)
def insert(self, index: int, value) -> None:
self._data.insert(index, value)
def __getitem__(self, item):
"""Get self[item]."""
return self._data[item]
def __setitem__(self, item, value):
"""Set self[item] to value."""
self._data.__setitem__(item, value)
def __delitem__(self, item):
"""Delete self[item]."""
self._data.__delitem__(item)
def _parse_list(self, tags: Iterable[Tuple]) -> List:
data = list(tags)
content = []
for code, value in data[2:-1]:
factory = self.converter.get(code)
if factory:
content.append(factory(value))
else:
raise DXFValueError(f"unsupported group code: {code}")
return content
def __len__(self) -> int:
"""Returns len(self)."""
return len(self._data)
def commit(self) -> None:
"""Store all changes to the underlying :class:`XData` instance.
This call is not required if using the :meth:`entity` context manager.
Raises:
DXFValueError: invalid chars ``"\\n"`` or ``"\\r"`` in a string
DXFTypeError: invalid data type
"""
data = []
for value in self._data:
if isinstance(value, str):
if len(value) > 255: # XDATA limit for group code 1000
raise DXFValueError("string too long, max. 255 characters")
if "\n" in value or "\r" in value:
raise DXFValueError(
"found invalid line break '\\n' or '\\r'"
)
code = self.group_codes.get(type(value))
if code:
data.append(dxftag(code, value))
else:
raise DXFTypeError(f"invalid type: {type(value)}")
self.xdata.set_xlist(self._appid, self._name, data)
class XDataUserDict(MutableMapping):
"""Manage named XDATA lists as a dict-like object.
Uses XDataUserList to store key, value pairs in XDATA.
This class does not create the required AppID table entry, only the
default AppID "EZDXF" exist by default.
Implements the :class:`MutableMapping` interface.
"""
def __init__(self, xdata: XData = None, name="DefaultDict", appid="EZDXF"):
"""Setup a XDATA user dict `name` for the given `appid`.
The data is stored in the given `xdata` object, or in a new created
:class:`XData` instance if ``None``.
Changes of the content has to be committed at the end to be stored in
the underlying `xdata` object.
Args:
xdata (XData): underlying :class:`XData` instance, if ``None`` a
new one will be created
name (str): name of the user list
appid (str): application specific AppID
"""
self._xlist = XDataUserList(xdata, name, appid)
self._user_dict: Dict[str, Any] = self._parse_xlist()
def _parse_xlist(self) -> Dict:
if self._xlist:
return dict(take2(self._xlist))
else:
return dict()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.commit()
def __str__(self):
"""Return str(self)."""
return str(self._user_dict)
@classmethod
@contextmanager
def entity(
cls, entity: "DXFEntity", name="DefaultDict", appid="EZDXF"
) -> Iterator["XDataUserDict"]:
"""Context manager to manage a XDATA dict `name` for a given DXF
`entity`. Appends the user dict to the existing :class:`XData` instance
or creates new :class:`XData` instance.
Args:
entity (DXFEntity): target DXF entity for the XDATA
name (str): name of the user list
appid (str): application specific AppID
"""
xdata = entity.xdata
if xdata is None:
xdata = XData()
entity.xdata = xdata
xdict = cls(xdata, name, appid)
yield xdict
xdict.commit()
@property
def xdata(self):
return self._xlist.xdata
def __len__(self):
"""Returns len(self)."""
return len(self._user_dict)
def __getitem__(self, key):
"""Get self[key]."""
return self._user_dict[key]
def __setitem__(self, key, item):
"""Set self[key] to value, key has to be a string.
Raises:
DXFTypeError: key is not a string
"""
if not isinstance(key, str):
raise DXFTypeError("key is not a string")
self._user_dict[key] = item
def __delitem__(self, key):
"""Delete self[key]."""
del self._user_dict[key]
def __iter__(self):
"""Implement iter(self)."""
return iter(self._user_dict)
def discard(self, key):
"""Delete self[key], without raising a :class:`KeyError` if `key` does
not exist.
"""
try:
del self._user_dict[key]
except KeyError:
pass
def commit(self) -> None:
"""Store all changes to the underlying :class:`XData` instance.
This call is not required if using the :meth:`entity` context manager.
Raises:
DXFValueError: invalid chars ``"\\n"`` or ``"\\r"`` in a string
DXFTypeError: invalid data type
"""
xlist = self._xlist
xlist.clear()
for key, value in self._user_dict.items():
xlist.append(key)
xlist.append(value)
xlist.commit()
```
#### File: src/ezdxf/explode.py
```python
import logging
from typing import (
TYPE_CHECKING,
Iterable,
Callable,
Optional,
cast,
Dict,
List,
Any,
)
from ezdxf.entities import factory
from ezdxf.entities.boundary_paths import (
PolylinePath,
EdgePath,
LineEdge,
ArcEdge,
EllipseEdge,
SplineEdge,
)
from ezdxf.lldxf.const import DXFStructureError, DXFTypeError
from ezdxf.math import OCS, Vec3, ABS_TOL
from ezdxf.math.transformtools import (
NonUniformScalingError,
InsertTransformationError,
)
from ezdxf.query import EntityQuery
logger = logging.getLogger("ezdxf")
if TYPE_CHECKING:
from ezdxf.entities.polygon import DXFPolygon
from ezdxf.eztypes import (
Insert,
BaseLayout,
DXFGraphic,
Attrib,
Text,
LWPolyline,
)
__all__ = [
"virtual_block_reference_entities",
"virtual_boundary_path_entities",
"explode_block_reference",
"explode_entity",
"attrib_to_text",
]
def default_logging_callback(entity, reason):
logger.debug(
f'(Virtual Block Reference Entities) Ignoring {str(entity)}: "{reason}"'
)
def explode_block_reference(
block_ref: "Insert", target_layout: "BaseLayout"
) -> EntityQuery:
"""Explode a block reference into DXF primitives.
Transforms the block entities into the required WCS location by applying the
block reference attributes `insert`, `extrusion`, `rotation` and the scaling
values `xscale`, `yscale` and `zscale`.
Returns an EntityQuery() container with all exploded DXF entities.
Attached ATTRIB entities are converted to TEXT entities, this is the
behavior of the BURST command of the AutoCAD Express Tools.
Args:
block_ref: Block reference entity (INSERT)
target_layout: explicit target layout for exploded DXF entities
.. warning::
**Non uniform scaling** may lead to incorrect results for text entities
(TEXT, MTEXT, ATTRIB) and maybe some other entities.
(internal API)
"""
if target_layout is None:
raise DXFStructureError("Target layout is None.")
if block_ref.doc is None:
raise DXFStructureError(
"Block reference has to be assigned to a DXF document."
)
def _explode_single_block_ref(block_ref):
for entity in virtual_block_reference_entities(block_ref):
dxftype = entity.dxftype()
target_layout.add_entity(entity)
if dxftype == "DIMENSION":
# Render a graphical representation for each exploded DIMENSION
# entity as anonymous block.
cast("Dimension", entity).render()
entities.append(entity)
# Convert attached ATTRIB entities to TEXT entities:
# This is the behavior of the BURST command of the AutoCAD Express Tools
for attrib in block_ref.attribs:
# Attached ATTRIB entities are already located in the WCS
text = attrib_to_text(attrib)
target_layout.add_entity(text)
entities.append(text)
entitydb = block_ref.doc.entitydb
assert (
entitydb is not None
), "Exploding a block reference requires an entity database."
entities: List["DXFGraphic"] = []
if block_ref.mcount > 1:
for virtual_insert in block_ref.multi_insert():
_explode_single_block_ref(virtual_insert)
else:
_explode_single_block_ref(block_ref)
source_layout = block_ref.get_layout()
if source_layout is not None:
# Remove and destroy exploded INSERT if assigned to a layout
source_layout.delete_entity(block_ref)
else:
entitydb.delete_entity(block_ref)
return EntityQuery(entities)
IGNORE_FROM_ATTRIB = {
"version",
"prompt",
"tag",
"flags",
"field_length",
"lock_position",
}
def attrib_to_text(attrib: "Attrib") -> "Text":
dxfattribs = attrib.dxfattribs(drop=IGNORE_FROM_ATTRIB)
# ATTRIB has same owner as INSERT but does not reside in any EntitySpace()
# and must not deleted from any layout.
# New TEXT entity has same handle as the replaced ATTRIB entity and replaces
# the ATTRIB entity in the database.
text = factory.new("TEXT", dxfattribs=dxfattribs)
if attrib.doc:
factory.bind(text, attrib.doc)
return cast("Text", text)
def virtual_block_reference_entities(
block_ref: "Insert",
skipped_entity_callback: Optional[
Callable[["DXFGraphic", str], None]
] = None,
) -> Iterable["DXFGraphic"]:
"""Yields 'virtual' parts of block reference `block_ref`. This method is meant
to examine the the block reference entities without the need to explode the
block reference. The `skipped_entity_callback()` will be called for all
entities which are not processed, signature:
:code:`skipped_entity_callback(entity: DXFGraphic, reason: str)`,
`entity` is the original (untransformed) DXF entity of the block definition,
the `reason` string is an explanation why the entity was skipped.
This entities are located at the 'exploded' positions, but are not stored in
the entity database, have no handle and are not assigned to any layout.
Args:
block_ref: Block reference entity (INSERT)
skipped_entity_callback: called whenever the transformation of an entity
is not supported and so was skipped.
.. warning::
**Non uniform scaling** may lead to incorrect results for text entities
(TEXT, MTEXT, ATTRIB) and maybe some other entities.
(internal API)
"""
assert block_ref.dxftype() == "INSERT"
from ezdxf.entities import Ellipse
skipped_entity_callback = (
skipped_entity_callback or default_logging_callback
)
def disassemble(layout) -> Iterable["DXFGraphic"]:
for entity in layout:
# Do not explode ATTDEF entities. Already available in Insert.attribs
if entity.dxftype() == "ATTDEF":
continue
try:
copy = entity.copy()
except DXFTypeError:
if hasattr(entity, "virtual_entities"):
yield from entity.virtual_entities()
else:
skipped_entity_callback(entity, "non copyable") # type: ignore
else:
if hasattr(copy, "remove_association"):
copy.remove_association()
yield copy
def transform(entities):
for entity in entities:
try:
entity.transform(m)
except NotImplementedError:
skipped_entity_callback(entity, "non transformable")
except NonUniformScalingError:
dxftype = entity.dxftype()
if dxftype in {"ARC", "CIRCLE"}:
if abs(entity.dxf.radius) > ABS_TOL:
yield Ellipse.from_arc(entity).transform(m)
else:
skipped_entity_callback(
entity, f"Invalid radius in entity {str(entity)}."
)
elif dxftype in {"LWPOLYLINE", "POLYLINE"}: # has arcs
yield from transform(entity.virtual_entities())
else:
skipped_entity_callback(
entity, "unsupported non-uniform scaling"
)
except InsertTransformationError:
# INSERT entity can not represented in the target coordinate
# system defined by transformation matrix `m`.
# Yield transformed sub-entities of the INSERT entity:
yield from transform(
virtual_block_reference_entities(
entity, skipped_entity_callback
)
)
else:
yield entity
m = block_ref.matrix44()
block_layout = block_ref.block()
if block_layout is None:
raise DXFStructureError(
f'Required block definition for "{block_ref.dxf.name}" does not exist.'
)
yield from transform(disassemble(block_layout))
EXCLUDE_FROM_EXPLODE = {"POINT"}
def explode_entity(
entity: "DXFGraphic", target_layout: "BaseLayout" = None
) -> "EntityQuery":
"""Explode parts of an entity as primitives into target layout, if target
layout is ``None``, the target layout is the layout of the source entity.
Returns an :class:`~ezdxf.query.EntityQuery` container with all DXF parts.
Args:
entity: DXF entity to explode, has to have a :meth:`virtual_entities()`
method
target_layout: target layout for DXF parts, ``None`` for same layout as
source entity
(internal API)
"""
dxftype = entity.dxftype()
virtual_entities = getattr(entity, "virtual_entities")
if virtual_entities is None or dxftype in EXCLUDE_FROM_EXPLODE:
raise DXFTypeError(f"Can not explode entity {dxftype}.")
if entity.doc is None:
raise DXFStructureError(
f"{dxftype} has to be assigned to a DXF document."
)
entitydb = entity.doc.entitydb
if entitydb is None:
raise DXFStructureError(
f"Exploding {dxftype} requires an entity database."
)
if target_layout is None:
target_layout = entity.get_layout()
if target_layout is None:
raise DXFStructureError(
f"{dxftype} without layout assignment, specify target layout."
)
entities = []
for e in virtual_entities():
target_layout.add_entity(e)
entities.append(e)
source_layout = entity.get_layout()
if source_layout is not None:
source_layout.delete_entity(entity)
else:
entitydb.delete_entity(entity)
return EntityQuery(entities)
def virtual_boundary_path_entities(
polygon: "DXFPolygon",
) -> List[List["DXFGraphic"]]:
from ezdxf.entities import LWPolyline
def polyline():
p = LWPolyline.new(dxfattribs=dict(graphic_attribs))
p.append_formatted_vertices(path.vertices, format="xyb")
p.dxf.extrusion = ocs.uz
p.dxf.elevation = elevation
p.closed = path.is_closed
return p
graphic_attribs = polygon.graphic_properties()
elevation = float(polygon.dxf.elevation.z)
ocs = polygon.ocs()
entities = []
for path in polygon.paths:
if isinstance(path, PolylinePath):
entities.append([polyline()])
elif isinstance(path, EdgePath):
entities.append(
_virtual_edge_path(path, dict(graphic_attribs), ocs, elevation)
)
return entities
def _virtual_edge_path(
path: EdgePath, dxfattribs: Dict, ocs: OCS, elevation: float
) -> List["DXFGraphic"]:
from ezdxf.entities import Line, Arc, Ellipse, Spline
def pnt_to_wcs(v):
return ocs.to_wcs(Vec3(v).replace(z=elevation))
def dir_to_wcs(v):
return ocs.to_wcs(v)
edges: List["DXFGraphic"] = []
for edge in path.edges:
attribs = dict(dxfattribs)
if isinstance(edge, LineEdge):
attribs["start"] = pnt_to_wcs(edge.start)
attribs["end"] = pnt_to_wcs(edge.end)
edges.append(Line.new(dxfattribs=attribs))
elif isinstance(edge, ArcEdge):
attribs["center"] = edge.center
attribs["radius"] = edge.radius
attribs["elevation"] = elevation
# Arcs angles are always stored in counter clockwise orientation
# around the extrusion vector!
attribs["start_angle"] = edge.start_angle
attribs["end_angle"] = edge.end_angle
attribs["extrusion"] = ocs.uz
edges.append(Arc.new(dxfattribs=attribs))
elif isinstance(edge, EllipseEdge):
attribs["center"] = pnt_to_wcs(edge.center)
attribs["major_axis"] = dir_to_wcs(edge.major_axis)
attribs["ratio"] = edge.ratio
# Ellipse angles are always stored in counter clockwise orientation
# around the extrusion vector!
attribs["start_param"] = edge.start_param
attribs["end_param"] = edge.end_param
attribs["extrusion"] = ocs.uz
edges.append(Ellipse.new(dxfattribs=attribs))
elif isinstance(edge, SplineEdge):
spline = Spline.new(dxfattribs=attribs)
spline.dxf.degree = edge.degree
spline.knots = edge.knot_values
spline.control_points = [pnt_to_wcs(v) for v in edge.control_points]
if edge.weights:
spline.weights = edge.weights
if edge.fit_points:
spline.fit_points = [pnt_to_wcs(v) for v in edge.fit_points]
if edge.start_tangent is not None:
spline.dxf.start_tangent = dir_to_wcs(edge.start_tangent)
if edge.end_tangent is not None:
spline.dxf.end_tangent = dir_to_wcs(edge.end_tangent)
edges.append(spline)
return edges
```
#### File: src/ezdxf/groupby.py
```python
from typing import Iterable, Hashable, Dict, List, TYPE_CHECKING
from ezdxf.lldxf.const import DXFValueError, DXFAttributeError
if TYPE_CHECKING:
from ezdxf.eztypes import DXFEntity, KeyFunc
def groupby(
entities: Iterable["DXFEntity"], dxfattrib: str = "", key: "KeyFunc" = None
) -> Dict[Hashable, List["DXFEntity"]]:
"""
Groups a sequence of DXF entities by a DXF attribute like ``'layer'``,
returns a dict with `dxfattrib` values as key and a list of entities
matching this `dxfattrib`.
A `key` function can be used to combine some DXF attributes (e.g. layer and
color) and should return a hashable data type like a tuple of strings,
integers or floats, `key` function example::
def group_key(entity: DXFEntity):
return entity.dxf.layer, entity.dxf.color
For not suitable DXF entities return ``None`` to exclude this entity, in
this case it's not required, because :func:`groupby` catches
:class:`DXFAttributeError` exceptions to exclude entities, which do not
provide layer and/or color attributes, automatically.
Result dict for `dxfattrib` = ``'layer'`` may look like this::
{
'0': [ ... list of entities ],
'ExampleLayer1': [ ... ],
'ExampleLayer2': [ ... ],
...
}
Result dict for `key` = `group_key`, which returns a ``(layer, color)``
tuple, may look like this::
{
('0', 1): [ ... list of entities ],
('0', 3): [ ... ],
('0', 7): [ ... ],
('ExampleLayer1', 1): [ ... ],
('ExampleLayer1', 2): [ ... ],
('ExampleLayer1', 5): [ ... ],
('ExampleLayer2', 7): [ ... ],
...
}
All entity containers (modelspace, paperspace layouts and blocks) and the
:class:`~ezdxf.query.EntityQuery` object have a dedicated :meth:`groupby`
method.
Args:
entities: sequence of DXF entities to group by a DXF attribute or a
`key` function
dxfattrib: grouping DXF attribute like ``'layer'``
key: key function, which accepts a :class:`DXFEntity` as argument and
returns a hashable grouping key or ``None`` to ignore this entity
"""
if all((dxfattrib, key)):
raise DXFValueError(
"Specify a dxfattrib or a key function, but not both."
)
if dxfattrib != "":
key = lambda entity: entity.dxf.get_default(dxfattrib)
if key is None:
raise DXFValueError(
"no valid argument found, specify a dxfattrib or a key function, "
"but not both."
)
result: Dict[Hashable, List["DXFEntity"]] = dict()
for dxf_entity in entities:
if not dxf_entity.is_alive:
continue
try:
group_key = key(dxf_entity)
except DXFAttributeError:
# ignore DXF entities, which do not support all query attributes
continue
if group_key is not None:
group = result.setdefault(group_key, [])
group.append(dxf_entity)
return result
```
#### File: ezdxf/lldxf/tagwriter.py
```python
from typing import Any, TextIO, TYPE_CHECKING, Union, List, Iterable, BinaryIO
import abc
from .types import TAG_STRING_FORMAT, cast_tag_value, DXFVertex
from .types import BYTES, INT16, INT32, INT64, DOUBLE, BINARY_DATA
from .tags import DXFTag, Tags
from .const import LATEST_DXF_VERSION
from ezdxf.tools import take2
import struct
if TYPE_CHECKING:
from ezdxf.eztypes import ExtendedTags, DXFEntity
__all__ = [
"TagWriter",
"BinaryTagWriter",
"TagCollector",
"basic_tags_from_text",
"AbstractTagWriter",
]
CRLF = b"\r\n"
class AbstractTagWriter:
# Options for functions using an inherited class for DXF export:
dxfversion = LATEST_DXF_VERSION
write_handles = True
# Force writing optional values if equal to default value when True.
# True is only used for testing scenarios!
force_optional = False
# Start of low level interface:
@abc.abstractmethod
def write_tag(self, tag: DXFTag) -> None:
...
@abc.abstractmethod
def write_tag2(self, code: int, value: Any) -> None:
...
@abc.abstractmethod
def write_str(self, s: str) -> None:
...
# End of low level interface
# Tag export based on low level tag export:
def write_tags(self, tags: Union["Tags", "ExtendedTags"]) -> None:
for tag in tags:
self.write_tag(tag)
def write_vertex(self, code: int, vertex: Iterable[float]) -> None:
for index, value in enumerate(vertex):
self.write_tag2(code + index * 10, value)
class TagWriter(AbstractTagWriter):
"""Writes DXF tags into a text stream."""
def __init__(
self,
stream: TextIO,
dxfversion: str = LATEST_DXF_VERSION,
write_handles: bool = True,
):
self._stream: TextIO = stream
self.dxfversion: str = dxfversion
self.write_handles: bool = write_handles
self.force_optional: bool = False
# Start of low level interface:
def write_tag(self, tag: DXFTag) -> None:
self._stream.write(tag.dxfstr())
def write_tag2(self, code: int, value: Any) -> None:
self._stream.write(TAG_STRING_FORMAT % (code, value))
def write_str(self, s: str) -> None:
self._stream.write(s)
# End of low level interface
def write_vertex(self, code: int, vertex: Iterable[float]) -> None:
"""Optimized vertex export."""
write = self._stream.write
for index, value in enumerate(vertex):
write(TAG_STRING_FORMAT % (code + index * 10, value))
class BinaryTagWriter(AbstractTagWriter):
"""Write binary encoded DXF tags into a binary stream.
.. warning::
DXF files containing ``ACSH_SWEEP_CLASS`` entities and saved as Binary
DXF by `ezdxf` can not be opened with AutoCAD, this is maybe also true
for other 3rd party entities. BricsCAD opens this binary DXF files
without complaining, but saves the ``ACSH_SWEEP_CLASS`` entities as
``ACAD_PROXY_OBJECT`` when writing back, so error analyzing is not
possible without the full version of AutoCAD.
I have no clue why, because converting this DXF files from binary
format back to ASCII format by `ezdxf` produces a valid DXF for
AutoCAD - so all required information is preserved.
Two examples available:
- AutodeskSamples\visualization_-_condominium_with_skylight.dxf
- AutodeskSamples\visualization_-_conference_room.dxf
"""
def __init__(
self,
stream: BinaryIO,
dxfversion=LATEST_DXF_VERSION,
write_handles: bool = True,
encoding="utf8",
):
self._stream = stream
self.dxfversion = dxfversion
self.write_handles = write_handles
self._encoding = encoding # output encoding
self._r12 = self.dxfversion <= "AC1009"
def write_signature(self) -> None:
self._stream.write(b"AutoCAD Binary DXF\r\n\x1a\x00")
# Start of low level interface:
def write_tag(self, tag: DXFTag) -> None:
if isinstance(tag, DXFVertex):
for code, value in tag.dxftags():
self.write_tag2(code, value)
else:
self.write_tag2(tag.code, tag.value)
def write_str(self, s: str) -> None:
data = s.split("\n")
for code, value in take2(data):
self.write_tag2(int(code), value)
def write_tag2(self, code: int, value: Any) -> None:
# Binary DXF files do not support comments!
assert code != 999
if code in BINARY_DATA:
self._write_binary_chunks(code, value)
return
stream = self._stream
# write group code
if self._r12:
# Special group code handling if DXF R12 and older
if code >= 1000: # extended data
stream.write(b"\xff")
# always 2-byte group code for extended data
stream.write(code.to_bytes(2, "little"))
else:
stream.write(code.to_bytes(1, "little"))
else: # for R2000+ do not need a leading 0xff in front of extended data
stream.write(code.to_bytes(2, "little"))
# write tag content
if code in BYTES:
stream.write(int(value).to_bytes(1, "little"))
elif code in INT16:
stream.write(int(value).to_bytes(2, "little", signed=True))
elif code in INT32:
stream.write(int(value).to_bytes(4, "little", signed=True))
elif code in INT64:
stream.write(int(value).to_bytes(8, "little", signed=True))
elif code in DOUBLE:
stream.write(struct.pack("<d", float(value)))
else: # write zero terminated string
stream.write(str(value).encode(self._encoding, errors="dxfreplace"))
stream.write(b"\x00")
# End of low level interface
def _write_binary_chunks(self, code: int, data: bytes) -> None:
# Split binary data into small chunks, 127 bytes is the
# regular size of binary data in ASCII DXF files.
CHUNK_SIZE = 127
index = 0
size = len(data)
stream = self._stream
while index < size:
# write group code
if self._r12 and code >= 1000: # extended data, just 1004?
stream.write(b"\xff") # extended data marker
# binary data does not exist in regular R12 entities,
# only 2-byte group codes required
stream.write(code.to_bytes(2, "little"))
# write max CHUNK_SIZE bytes of binary data in one tag
chunk = data[index: index + CHUNK_SIZE]
# write actual chunk size
stream.write(len(chunk).to_bytes(1, "little"))
stream.write(chunk)
index += CHUNK_SIZE
class TagCollector(AbstractTagWriter):
"""Collect DXF tags as DXFTag() entities for testing."""
def __init__(
self,
dxfversion: str = LATEST_DXF_VERSION,
write_handles: bool = True,
optional: bool = True,
):
self.tags: List[DXFTag] = []
self.dxfversion: str = dxfversion
self.write_handles: bool = write_handles
self.force_optional: bool = optional
# Start of low level interface:
def write_tag(self, tag: DXFTag) -> None:
if hasattr(tag, "dxftags"):
self.tags.extend(tag.dxftags()) # type: ignore
else:
self.tags.append(tag)
def write_tag2(self, code: int, value: Any) -> None:
self.tags.append(DXFTag(code, cast_tag_value(int(code), value)))
def write_str(self, s: str) -> None:
self.write_tags(Tags.from_text(s))
# End of low level interface
def has_all_tags(self, other: "TagCollector"):
return all(tag in self.tags for tag in other.tags)
def reset(self):
self.tags = []
@classmethod
def dxftags(cls, entity: "DXFEntity", dxfversion=LATEST_DXF_VERSION):
collector = cls(dxfversion=dxfversion)
entity.export_dxf(collector)
return Tags(collector.tags)
def basic_tags_from_text(text: str) -> List[DXFTag]:
"""Returns all tags from `text` as basic DXFTags(). All complex tags are
resolved into basic (code, value) tags (e.g. DXFVertex(10, (1, 2, 3)) ->
DXFTag(10, 1), DXFTag(20, 2), DXFTag(30, 3).
Args:
text: DXF data as string
Returns: List of basic DXF tags (code, value)
"""
collector = TagCollector()
collector.write_tags(Tags.from_text(text))
return collector.tags
```
#### File: ezdxf/math/eulerspiral.py
```python
from typing import Dict, Iterable, List
from ezdxf.math import Vec3
from ezdxf.math.bspline import global_bspline_interpolation, BSpline
__all__ = ["EulerSpiral"]
def powers(base: float, count: int) -> List[float]:
assert count > 2, "requires count > 2"
values = [1.0, base]
next_value = base
for _ in range(count - 2):
next_value *= base
values.append(next_value)
return values
def _params(length: float, segments: int) -> Iterable[float]:
delta_l = float(length) / float(segments)
for index in range(0, segments + 1):
yield delta_l * index
class EulerSpiral:
"""
This class represents an euler spiral (clothoid) for `curvature` (Radius of
curvature).
This is a parametric curve, which always starts at the origin = ``(0, 0)``.
Args:
curvature: radius of curvature
"""
def __init__(self, curvature: float = 1.0):
curvature = float(curvature)
self.curvature = curvature # Radius of curvature
self.curvature_powers: List[float] = powers(curvature, 19)
self._cache: Dict[float, Vec3] = {} # coordinates cache
def radius(self, t: float) -> float:
"""Get radius of circle at distance `t`."""
if t > 0.0:
return self.curvature_powers[2] / t
else:
return 0.0 # radius = infinite
def tangent(self, t: float) -> Vec3:
"""Get tangent at distance `t` as :class.`Vec3` object."""
angle = t ** 2 / (2.0 * self.curvature_powers[2])
return Vec3.from_angle(angle)
def distance(self, radius: float) -> float:
"""Get distance L from origin for `radius`."""
return self.curvature_powers[2] / float(radius)
def point(self, t: float) -> Vec3:
"""Get point at distance `t` as :class.`Vec3`."""
def term(length_power, curvature_power, const):
return t ** length_power / (
const * self.curvature_powers[curvature_power]
)
if t not in self._cache:
y = (
term(3, 2, 6.0)
- term(7, 6, 336.0)
+ term(11, 10, 42240.0)
- term(15, 14, 9676800.0)
+ term(19, 18, 3530096640.0)
)
x = (
t
- term(5, 4, 40.0)
+ term(9, 8, 3456.0)
- term(13, 12, 599040.0)
+ term(17, 16, 175472640.0)
)
self._cache[t] = Vec3(x, y)
return self._cache[t]
def approximate(self, length: float, segments: int) -> Iterable[Vec3]:
"""Approximate curve of length with line segments.
Generates segments+1 vertices as :class:`Vec3` objects.
"""
for t in _params(length, segments):
yield self.point(t)
def circle_center(self, t: float) -> Vec3:
"""Get circle center at distance `t`."""
p = self.point(t)
r = self.radius(t)
return p + self.tangent(t).normalize(r).orthogonal()
def bspline(
self,
length: float,
segments: int = 10,
degree: int = 3,
method: str = "uniform",
) -> BSpline:
"""Approximate euler spiral as B-spline.
Args:
length: length of euler spiral
segments: count of fit points for B-spline calculation
degree: degree of BSpline
method: calculation method for parameter vector t
Returns:
:class:`BSpline`
"""
length = float(length)
fit_points = list(self.approximate(length, segments=segments))
derivatives = [
# Scaling derivatives by chord length (< real length) is suggested
# by Piegl & Tiller.
self.tangent(t).normalize(length)
for t in _params(length, segments)
]
spline = global_bspline_interpolation(
fit_points, degree, method=method, tangents=derivatives
)
return BSpline(
spline.control_points,
spline.order,
# Scale knot values to length:
[v * length for v in spline.knots()],
)
```
#### File: ezdxf/path/shapes.py
```python
import math
from ezdxf.math import (
cubic_bezier_arc_parameters,
Matrix44,
Vertex,
basic_transformation,
)
from ezdxf.render import forms
from .path import Path
from . import converter
__all__ = [
"unit_circle",
"elliptic_transformation",
"rect",
"ngon",
"wedge",
"star",
"gear",
]
def unit_circle(
start_angle: float = 0,
end_angle: float = math.tau,
segments: int = 1,
transform: Matrix44 = None,
) -> Path:
"""Returns an unit circle as a :class:`Path` object, with the center at
(0, 0, 0) and the radius of 1 drawing unit.
The arc spans from the start- to the end angle in counter clockwise
orientation. The end angle has to be greater than the start angle and the
angle span has to be greater than 0.
Args:
start_angle: start angle in radians
end_angle: end angle in radians (end_angle > start_angle!)
segments: count of Bèzier-curve segments, default is one segment for
each arc quarter (π/2)
transform: transformation Matrix applied to the unit circle
"""
path = Path()
start_flag = True
for start, ctrl1, ctrl2, end in cubic_bezier_arc_parameters(
start_angle, end_angle, segments
):
if start_flag:
path.start = start
start_flag = False
path.curve4_to(end, ctrl1, ctrl2)
if transform is None:
return path
else:
return path.transform(transform)
def wedge(
start_angle: float,
end_angle: float,
segments: int = 1,
transform: Matrix44 = None,
) -> Path:
"""Returns a wedge as a :class:`Path` object, with the center at
(0, 0, 0) and the radius of 1 drawing unit.
The arc spans from the start- to the end angle in counter clockwise
orientation. The end angle has to be greater than the start angle and the
angle span has to be greater than 0.
Args:
start_angle: start angle in radians
end_angle: end angle in radians (end_angle > start_angle!)
segments: count of Bèzier-curve segments, default is one segment for
each arc quarter (π/2)
transform: transformation Matrix applied to the wedge
"""
path = Path()
start_flag = True
for start, ctrl1, ctrl2, end in cubic_bezier_arc_parameters(
start_angle, end_angle, segments
):
if start_flag:
path.line_to(start)
start_flag = False
path.curve4_to(end, ctrl1, ctrl2)
path.line_to((0, 0, 0))
if transform is None:
return path
else:
return path.transform(transform)
def elliptic_transformation(
center: Vertex = (0, 0, 0),
radius: float = 1,
ratio: float = 1,
rotation: float = 0,
) -> Matrix44:
"""Returns the transformation matrix to transform an unit circle into
an arbitrary circular- or elliptic arc.
Example how to create an ellipse with an major axis length of 3, a minor
axis length 1.5 and rotated about 90°::
m = elliptic_transformation(radius=3, ratio=0.5, rotation=math.pi / 2)
ellipse = shapes.unit_circle(transform=m)
Args:
center: curve center in WCS
radius: radius of the major axis in drawing units
ratio: ratio of minor axis to major axis
rotation: rotation angle about the z-axis in radians
"""
if radius < 1e-6:
raise ValueError(f"invalid radius: {radius}")
if ratio < 1e-6:
raise ValueError(f"invalid ratio: {ratio}")
scale_x = radius
scale_y = radius * ratio
return basic_transformation(center, (scale_x, scale_y, 1), rotation)
def rect(
width: float = 1, height: float = 1, transform: Matrix44 = None
) -> Path:
"""Returns a closed rectangle as a :class:`Path` object, with the center at
(0, 0, 0) and the given `width` and `height` in drawing units.
Args:
width: width of the rectangle in drawing units, width > 0
height: height of the rectangle in drawing units, height > 0
transform: transformation Matrix applied to the rectangle
"""
if width < 1e-9:
raise ValueError(f"invalid width: {width}")
if height < 1e-9:
raise ValueError(f"invalid height: {height}")
w2 = float(width) / 2.0
h2 = float(height) / 2.0
path = converter.from_vertices(
[(w2, h2), (-w2, h2), (-w2, -h2), (w2, h2)], close=True
)
if transform is None:
return path
else:
return path.transform(transform)
def ngon(
count: int,
length: float = None,
radius: float = 1.0,
transform: Matrix44 = None,
) -> Path:
"""Returns a `regular polygon <https://en.wikipedia.org/wiki/Regular_polygon>`_
a :class:`Path` object, with the center at (0, 0, 0).
The polygon size is determined by the edge `length` or the circum `radius`
argument. If both are given `length` has higher priority. Default size is
a `radius` of 1. The ngon starts with the first vertex is on the x-axis!
The base geometry is created by function :func:`ezdxf.render.forms.ngon`.
Args:
count: count of polygon corners >= 3
length: length of polygon side
radius: circum radius, default is 1
transform: transformation Matrix applied to the ngon
"""
vertices = forms.ngon(count, length=length, radius=radius)
if transform is not None:
vertices = transform.transform_vertices(vertices)
return converter.from_vertices(vertices, close=True)
def star(count: int, r1: float, r2: float, transform: Matrix44 = None) -> Path:
"""Returns a `star shape <https://en.wikipedia.org/wiki/Star_polygon>`_ as
a :class:`Path` object, with the center at (0, 0, 0).
Argument `count` defines the count of star spikes, `r1` defines the radius
of the "outer" vertices and `r2` defines the radius of the "inner" vertices,
but this does not mean that `r1` has to be greater than `r2`.
The star shape starts with the first vertex is on the x-axis!
The base geometry is created by function :func:`ezdxf.render.forms.star`.
Args:
count: spike count >= 3
r1: radius 1
r2: radius 2
transform: transformation Matrix applied to the star
"""
vertices = forms.star(count, r1=r1, r2=r2)
if transform is not None:
vertices = transform.transform_vertices(vertices)
return converter.from_vertices(vertices, close=True)
def gear(
count: int,
top_width: float,
bottom_width: float,
height: float,
outside_radius: float,
transform: Matrix44 = None,
) -> Path:
"""
Returns a `gear <https://en.wikipedia.org/wiki/Gear>`_ (cogwheel) shape as
a :class:`Path` object, with the center at (0, 0, 0).
The base geometry is created by function :func:`ezdxf.render.forms.gear`.
.. warning::
This function does not create correct gears for mechanical engineering!
Args:
count: teeth count >= 3
top_width: teeth width at outside radius
bottom_width: teeth width at base radius
height: teeth height; base radius = outside radius - height
outside_radius: outside radius
transform: transformation Matrix applied to the gear shape
"""
vertices = forms.gear(
count, top_width, bottom_width, height, outside_radius
)
if transform is not None:
vertices = transform.transform_vertices(vertices)
return converter.from_vertices(vertices, close=True)
```
#### File: src/ezdxf/recover.py
```python
import typing
from typing import (
TYPE_CHECKING,
BinaryIO,
Iterable,
List,
Callable,
Tuple,
Dict,
Union,
)
import itertools
import re
from collections import defaultdict
from pathlib import Path
from ezdxf.lldxf import const
from ezdxf.lldxf import repair
from ezdxf.lldxf.encoding import (
has_dxf_unicode,
decode_dxf_unicode,
)
from ezdxf.lldxf.types import (
DXFTag,
DXFVertex,
DXFBinaryTag,
POINT_CODES,
BINARY_DATA,
TYPE_TABLE,
MAX_GROUP_CODE,
)
from ezdxf.lldxf.tags import group_tags, Tags
from ezdxf.lldxf.validator import entity_structure_validator
from ezdxf.tools.codepage import toencoding
from ezdxf.audit import Auditor, AuditError
if TYPE_CHECKING:
from ezdxf.eztypes import Drawing, SectionDict
__all__ = ["read", "readfile"]
EXCLUDE_STRUCTURE_CHECK = {
"SECTION",
"ENDSEC",
"EOF",
"TABLE",
"ENDTAB",
"ENDBLK",
"SEQEND",
}
def readfile(
filename: Union[str, Path], errors: str = "surrogateescape"
) -> Tuple["Drawing", "Auditor"]:
"""Read a DXF document from file system similar to :func:`ezdxf.readfile`,
but this function will repair as much flaws as possible, runs the required
audit process automatically the DXF document and the :class:`Auditor`.
Args:
filename: file-system name of the DXF document to load
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError` exception for invalid data
Raises:
DXFStructureError: for invalid or corrupted DXF structures
UnicodeDecodeError: if `errors` is "strict" and a decoding error occurs
"""
filename = str(filename)
with open(filename, mode="rb") as fp:
doc, auditor = read(fp, errors=errors)
doc.filename = filename
return doc, auditor
def read(
stream: BinaryIO, errors: str = "surrogateescape"
) -> Tuple["Drawing", "Auditor"]:
"""Read a DXF document from a binary-stream similar to :func:`ezdxf.read`,
but this function will detect the text encoding automatically and repair
as much flaws as possible, runs the required audit process afterwards
and returns the DXF document and the :class:`Auditor`.
Args:
stream: data stream to load in binary read mode
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError` exception for invalid data
Raises:
DXFStructureError: for invalid or corrupted DXF structures
UnicodeDecodeError: if `errors` is "strict" and a decoding error occurs
"""
recover_tool = Recover.run(stream, errors=errors)
return _load_and_audit_document(recover_tool)
def explore(
filename: Union[str, Path], errors: str = "ignore"
) -> Tuple["Drawing", "Auditor"]:
"""Read a DXF document from file system similar to :func:`readfile`,
but this function will use a special tag loader, which synchronise the tag
stream if invalid tags occur. This function is intended to load corrupted
DXF files and should only be used to explore such files, data loss is very
likely.
Args:
filename: file-system name of the DXF document to load
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError` exception for invalid data
Raises:
DXFStructureError: for invalid or corrupted DXF structures
UnicodeDecodeError: if `errors` is "strict" and a decoding error occurs
.. versionadded: 0.15
"""
filename = str(filename)
with open(filename, mode="rb") as fp:
recover_tool = Recover.run(
fp, errors=errors, loader=synced_bytes_loader
)
doc, auditor = _load_and_audit_document(recover_tool)
doc.filename = filename
return doc, auditor
def _load_and_audit_document(recover_tool) -> Tuple["Drawing", "Auditor"]:
from ezdxf.document import Drawing
doc = Drawing()
doc._load_section_dict(recover_tool.section_dict)
auditor = Auditor(doc)
for code, msg in recover_tool.errors:
auditor.add_error(code, msg)
for code, msg in recover_tool.fixes:
auditor.fixed_error(code, msg)
auditor.run()
return doc, auditor
# noinspection PyMethodMayBeStatic
class Recover:
"""Loose coupled recovering tools."""
def __init__(self, loader: Callable = None):
# different tag loading strategies can be used:
# - bytes_loader(): expects a valid low level structure
# - synced_bytes_loader(): loads everything which looks like a tag
# and skip other content (dangerous!)
self.tag_loader = loader or bytes_loader
# The main goal of all efforts, a Drawing compatible dict of sections:
self.section_dict: "SectionDict" = dict()
# Store error messages from low level processes
self.errors: List[Tuple[int, str]] = []
self.fixes: List[Tuple[int, str]] = []
# Detected DXF version
self.dxfversion = const.DXF12
@classmethod
def run(
cls,
stream: BinaryIO,
loader: Callable = None,
errors: str = "surrogateescape",
) -> "Recover":
"""Execute the recover process."""
recover_tool = Recover(loader)
tags = recover_tool.load_tags(stream, errors)
sections = recover_tool.rebuild_sections(tags)
recover_tool.load_section_dict(sections)
tables = recover_tool.section_dict.get("TABLES")
if tables:
tables = recover_tool.rebuild_tables(tables) # type: ignore
recover_tool.section_dict["TABLES"] = tables
section_dict = recover_tool.section_dict
for name, entities in section_dict.items():
if name in {"TABLES", "BLOCKS", "OBJECTS", "ENTITIES"}:
section_dict[name] = list(
recover_tool.check_entities(entities) # type: ignore
)
return recover_tool
def load_tags(self, stream: BinaryIO, errors: str) -> Iterable[DXFTag]:
return safe_tag_loader(
stream, self.tag_loader, messages=self.errors, errors=errors
)
def rebuild_sections(self, tags: Iterable[DXFTag]) -> List[List[DXFTag]]:
"""Collect tags between SECTION and ENDSEC or next SECTION tag
as list of DXFTag objects, collects tags outside of sections
as an extra section.
Returns:
List of sections as list of DXFTag() objects, the last section
contains orphaned tags found outside of sections
"""
# Invalid placed DXF entities are removed in the audit process!
def close_section():
# ENDSEC tag is not collected
nonlocal collector, inside_section
if inside_section:
sections.append(collector)
else: # missing SECTION
# ignore this tag, it is even not an orphan
self.fixes.append(
(
AuditError.MISSING_SECTION_TAG,
"DXF structure error: missing SECTION tag.",
)
)
collector = []
inside_section = False
def open_section():
nonlocal inside_section
if inside_section: # missing ENDSEC
self.fixes.append(
(
AuditError.MISSING_ENDSEC_TAG,
"DXF structure error: missing ENDSEC tag.",
)
)
close_section()
collector.append(tag)
inside_section = True
def process_structure_tag():
if value == "SECTION":
open_section()
elif value == "ENDSEC":
close_section()
elif value == "EOF":
if inside_section:
self.fixes.append(
(
AuditError.MISSING_ENDSEC_TAG,
"DXF structure error: missing ENDSEC tag.",
)
)
close_section()
else:
collect()
def collect():
if inside_section:
collector.append(tag)
else:
self.fixes.append(
(
AuditError.FOUND_TAG_OUTSIDE_SECTION,
f"DXF structure error: found tag outside section: "
f"({code}, {value})",
)
)
orphans.append(tag)
orphans: List[DXFTag] = []
sections: List[List[DXFTag]] = []
collector: List[DXFTag] = []
inside_section = False
for tag in tags:
code, value = tag
if code == 0:
process_structure_tag()
else:
collect()
sections.append(orphans)
return sections
def load_section_dict(self, sections: List[List[DXFTag]]) -> None:
"""Merge sections of same type."""
def add_section(name: str, tags) -> None:
if name in section_dict:
section_dict[name].extend(tags[2:])
else:
section_dict[name] = tags
def _build_section_dict(d: Dict) -> None:
for name, section in d.items():
if name in const.MANAGED_SECTIONS:
self.section_dict[name] = list(group_tags(section, 0))
def _remove_unsupported_sections(d: Dict):
for name in ("CLASSES", "OBJECTS", "ACDSDATA"):
if name in d:
del d[name]
self.fixes.append(
(
AuditError.REMOVED_UNSUPPORTED_SECTION,
f"Removed unsupported {name} section for DXF R12.",
)
)
# Last section could be orphaned tags:
orphans = sections.pop()
if orphans and orphans[0] == (0, "SECTION"):
# The last section contains not the orphaned tags:
sections.append(orphans)
orphans = []
section_dict: "SectionDict" = dict()
for section in sections:
code, name = section[1]
if code == 2:
add_section(name, section)
else: # invalid section name tag e.g. (2, "HEADER")
self.fixes.append(
(
AuditError.MISSING_SECTION_NAME_TAG,
"DXF structure error: missing section name tag, ignore section.",
)
)
header = section_dict.setdefault(
"HEADER",
[
DXFTag(0, "SECTION"), # type: ignore
DXFTag(2, "HEADER"), # type: ignore
],
)
self.rescue_orphaned_header_vars(header, orphans) # type: ignore
self.dxfversion = _detect_dxf_version(header)
if self.dxfversion <= const.DXF12:
_remove_unsupported_sections(section_dict)
_build_section_dict(section_dict)
def rebuild_tables(self, tables: List[Tags]) -> List[Tags]:
"""Rebuild TABLES section."""
# Note: the recover module does not report invalid placed table entries,
# it just recovers them. The "normal" loading process ignore these
# misplaced table entries and logs a warning.
def append_table(name: str):
if name not in content:
return
head = heads.get(name)
if head:
tables.append(head)
else:
# The new table head gets a valid handle from Auditor.
tables.append(Tags([DXFTag(0, "TABLE"), DXFTag(2, name)]))
tables.extend(content[name])
tables.append(Tags([DXFTag(0, "ENDTAB")]))
heads = dict()
content = defaultdict(list)
valid_tables = set(const.TABLE_NAMES_ACAD_ORDER)
for entry in tables:
name = entry[0].value.upper()
if name == "TABLE":
try:
table_name = entry[1].value.upper()
except (IndexError, AttributeError):
pass
else:
heads[table_name] = entry
elif name in valid_tables:
content[name].append(entry)
tables = [Tags([DXFTag(0, "SECTION"), DXFTag(2, "TABLES")])]
names = list(const.TABLE_NAMES_ACAD_ORDER)
if self.dxfversion <= const.DXF12:
# Ignore BLOCK_RECORD table
names.remove("BLOCK_RECORD")
if "BLOCK_RECORD" in content:
self.fixes.append(
(
AuditError.REMOVED_UNSUPPORTED_TABLE,
f"Removed unsupported BLOCK_RECORD table for DXF R12.",
)
)
for name in names:
append_table(name)
return tables
def rescue_orphaned_header_vars(
self, header: List[DXFTag], orphans: Iterable[DXFTag]
) -> None:
var_name = None
for tag in orphans:
code, value = tag
if code == 9:
var_name = tag
elif var_name is not None:
header.append(var_name)
header.append(tag)
var_name = None
def check_entities(self, entities: List[Tags]) -> Iterable[Tags]:
for entity in entities:
_, dxftype = entity[0]
if dxftype in EXCLUDE_STRUCTURE_CHECK:
yield entity
else:
# raises DXFStructureError() for invalid entities
yield Tags(entity_structure_validator(entity))
def _detect_dxf_version(header: List) -> str:
next_is_dxf_version = False
for tag in header:
if next_is_dxf_version:
dxfversion = str(tag[1]).strip()
if re.fullmatch(r"AC[0-9]{4}", dxfversion):
return dxfversion
else:
break
if tag == (9, "$ACADVER"):
next_is_dxf_version = True
return const.DXF12
def safe_tag_loader(
stream: BinaryIO,
loader: Callable = None,
messages: List = None,
errors: str = "surrogateescape",
) -> Iterable[DXFTag]:
"""Yields :class:``DXFTag`` objects from a bytes `stream`
(untrusted external source), skips all comment tags (group code == 999).
- Fixes unordered and invalid vertex tags.
- Pass :func:`synced_bytes_loader` as argument `loader` to brute force
load invalid tag structure.
Args:
stream: input data stream as bytes
loader: low level tag loader, default loader is :func:`bytes_loader`
messages: list to store error messages
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError` exception for invalid data
"""
if loader is None:
loader = bytes_loader
tags, detector_stream = itertools.tee(loader(stream), 2)
encoding = detect_encoding(detector_stream)
# Apply repair filter:
tags = repair.tag_reorder_layer(tags) # type: ignore
tags = repair.filter_invalid_point_codes(tags) # type: ignore
return byte_tag_compiler(tags, encoding, messages=messages, errors=errors)
INT_PATTERN_S = re.compile(r"[+-]?\d+")
INT_PATTERN_B = re.compile(rb"[+-]?\d+")
def _search_int(s: Union[str, bytes]) -> int:
"""Emulate the behavior of the C function stoll(), which just stop
converting strings to integers at the first invalid char without raising
an exception. e.g. "42xyz" is a valid integer 42
"""
res = re.search( # type: ignore
INT_PATTERN_S if isinstance(s, str) else INT_PATTERN_B, s
)
if res:
s = res.group()
return int(s)
FLOAT_PATTERN_S = re.compile(r"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?")
FLOAT_PATTERN_B = re.compile(rb"[+-]?\d+(:?\.\d*)?(:?[eE][+-]?\d+)?")
def _search_float(s: Union[str, bytes]) -> float:
"""Emulate the behavior of the C function stod(), which just stop
converting strings to doubles at the first invalid char without raising
an exception. e.g. "47.11xyz" is a valid double 47.11
"""
res = re.search( # type: ignore
FLOAT_PATTERN_S if isinstance(s, str) else FLOAT_PATTERN_B, s
)
if res:
s = res.group()
return float(s)
@typing.no_type_check
def bytes_loader(stream: BinaryIO) -> Iterable[DXFTag]:
"""Yields :class:``DXFTag`` objects from a bytes `stream`
(untrusted external source), skips all comment tags (group code == 999).
``DXFTag.code`` is always an ``int`` and ``DXFTag.value`` is always a
raw bytes value without line endings. Works with file system streams and
:class:`BytesIO` streams.
Raises:
DXFStructureError: Found invalid group code.
"""
line = 1
readline = stream.readline
while True:
code = readline()
# ByteIO(): empty strings indicates EOF - does not raise an exception
if code:
try:
code = int(code)
except ValueError:
try: # harder to find an int
code = _search_int(code)
except ValueError:
code = code.decode(errors="ignore")
raise const.DXFStructureError(
f'Invalid group code "{code}" at line {line}.'
)
else:
return
value = readline()
# ByteIO(): empty strings indicates EOF
if value:
if code != 999:
yield DXFTag(code, value.rstrip(b"\r\n"))
line += 2
else:
return
def synced_bytes_loader(stream: BinaryIO) -> Iterable[DXFTag]:
"""Yields :class:``DXFTag`` objects from a bytes `stream`
(untrusted external source), skips all comment tags (group code == 999).
``DXFTag.code`` is always an ``int`` and ``DXFTag.value`` is always a
raw bytes value without line endings. Works with file system streams and
:class:`BytesIO` streams.
Does not raise DXFStructureError on invalid group codes, instead skips
lines until a valid group code or EOF is found.
This can remove invalid lines before group codes, but can not
detect invalid lines between group code and tag value.
"""
code = 999
upper_boundary = MAX_GROUP_CODE + 1
readline = stream.readline
while True:
seeking_valid_group_code = True
while seeking_valid_group_code:
code = readline() # type: ignore
if code:
try: # hard to find an int
code = _search_int(code) # type: ignore
except ValueError:
pass
else:
if 0 <= code < upper_boundary:
seeking_valid_group_code = False
else:
return # empty string is EOF
value = readline()
if value:
if code != 999:
yield DXFTag(code, value.rstrip(b"\r\n"))
else:
return # empty string is EOF
DWGCODEPAGE = b"$DWGCODEPAGE"
ACADVER = b"$ACADVER"
def detect_encoding(tags: Iterable[DXFTag]) -> str:
"""Detect text encoding from header variables $DWGCODEPAGE and $ACADVER
out of a stream of DXFTag objects.
Assuming a malformed DXF file:
The header variables could reside outside of the HEADER section,
an ENDSEC tag is not a reliable fact that no $DWGCODEPAGE or
$ACADVER header variable will show up in the remaining tag stream.
Worst case: DXF file without a $ACADVER var, and a $DWGCODEPAGE
unequal to "ANSI_1252" at the end of the file.
"""
encoding = None
dxfversion = None
next_tag = None
for code, value in tags:
if code == 9:
if value == DWGCODEPAGE:
next_tag = DWGCODEPAGE # e.g. (3, "ANSI_1252")
elif value == ACADVER:
next_tag = ACADVER # e.g. (1, "AC1012")
elif code == 3 and next_tag == DWGCODEPAGE:
encoding = toencoding(value.decode(const.DEFAULT_ENCODING))
next_tag = None
elif code == 1 and next_tag == ACADVER:
dxfversion = value.decode(const.DEFAULT_ENCODING)
next_tag = None
if encoding and dxfversion:
return "utf8" if dxfversion >= const.DXF2007 else encoding
return const.DEFAULT_ENCODING
@typing.no_type_check
def byte_tag_compiler(
tags: Iterable[DXFTag],
encoding=const.DEFAULT_ENCODING,
messages: List = None,
errors: str = "surrogateescape",
) -> Iterable[DXFTag]:
"""Compiles DXF tag values imported by bytes_loader() into Python types.
Raises DXFStructureError() for invalid float values and invalid coordinate
values.
Expects DXF coordinates written in x, y[, z] order, see function
:func:`safe_tag_loader` for usage with applied repair filters.
Args:
tags: DXF tag generator, yielding tag values as bytes like bytes_loader()
encoding: text encoding
messages: list to store error messages
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError` exception for invalid data
Raises:
DXFStructureError: Found invalid DXF tag or unexpected coordinate order.
"""
def error_msg(tag):
code = tag.code
value = tag.value.decode(encoding)
return f'Invalid tag ({code}, "{value}") near line: {line}.'
if messages is None:
messages = []
tags = iter(tags)
undo_tag = None
line = 0
while True:
try:
if undo_tag is not None:
x = undo_tag
undo_tag = None
else:
x = next(tags)
line += 2
code = x.code
if code in POINT_CODES:
y = next(tags) # y coordinate is mandatory
line += 2
# e.g. y-code for x-code=10 is 20
if y.code != code + 10:
raise const.DXFStructureError(
f"Missing required y-coordinate near line: {line}."
)
# optional z coordinate
z = next(tags)
line += 2
try:
# is it a z-coordinate like (30, 0.0) for base x-code=10
if z.code == code + 20:
try:
point = (
float(x.value),
float(y.value),
float(z.value),
)
except ValueError: # search for any float values
point = (
_search_float(x.value),
_search_float(y.value),
_search_float(z.value),
)
else:
try:
point = (float(x.value), float(y.value))
except ValueError: # seach for any float values
point = (
_search_float(x.value),
_search_float(y.value),
)
undo_tag = z
except ValueError:
raise const.DXFStructureError(
f"Invalid floating point values near line: {line}."
)
yield DXFVertex(code, point)
elif code in BINARY_DATA:
# maybe pre compiled in low level tagger (binary DXF)
if isinstance(x, DXFBinaryTag):
tag = x
else:
try:
tag = DXFBinaryTag.from_string(code, x.value)
except ValueError:
raise const.DXFStructureError(
f"Invalid binary data near line: {line}."
)
yield tag
else: # just a single tag
type_ = TYPE_TABLE.get(code, str)
value: bytes = x.value
if type_ is str:
if code == 0:
# remove white space from structure tags
value = x.value.strip().upper()
try: # 2 stages to document decoding errors
str_ = value.decode(encoding, errors="strict")
except UnicodeDecodeError:
str_ = value.decode(encoding, errors=errors)
messages.append(
(
AuditError.DECODING_ERROR,
f"Fixed unicode decoding error near line {line}",
)
)
# Convert DXF unicode notation "\U+xxxx" to unicode,
# but exclude structure tags (code>0):
if code and has_dxf_unicode(str_):
str_ = decode_dxf_unicode(str_)
yield DXFTag(code, str_)
else:
try:
# fast path for int and float
yield DXFTag(code, type_(value))
except ValueError:
# slow path - e.g. ProE stores int values as floats :((
if type_ is int:
try:
yield DXFTag(code, _search_int(x.value))
except ValueError:
raise const.DXFStructureError(error_msg(x))
elif type_ is float:
try:
yield DXFTag(code, _search_float(x.value))
except ValueError:
raise const.DXFStructureError(error_msg(x))
else:
raise const.DXFStructureError(error_msg(x))
except StopIteration:
return
```
#### File: ezdxf/render/abstract_mtext_renderer.py
```python
from typing import List, Sequence, Dict, Tuple, Optional
import abc
from ezdxf.lldxf import const
from ezdxf.entities.mtext import MText, MTextColumns
from ezdxf.enums import (
MTextParagraphAlignment,
)
from ezdxf.tools import text_layout as tl, fonts
from ezdxf.tools.text import (
MTextParser,
MTextContext,
TokenType,
ParagraphProperties,
AbstractFont,
estimate_mtext_extents,
)
__all__ = ["AbstractMTextRenderer"]
ALIGN = {
MTextParagraphAlignment.LEFT: tl.ParagraphAlignment.LEFT,
MTextParagraphAlignment.RIGHT: tl.ParagraphAlignment.RIGHT,
MTextParagraphAlignment.CENTER: tl.ParagraphAlignment.CENTER,
MTextParagraphAlignment.JUSTIFIED: tl.ParagraphAlignment.JUSTIFIED,
MTextParagraphAlignment.DISTRIBUTED: tl.ParagraphAlignment.JUSTIFIED,
MTextParagraphAlignment.DEFAULT: tl.ParagraphAlignment.LEFT,
}
ATTACHMENT_POINT_TO_ALIGN = {
const.MTEXT_TOP_LEFT: tl.ParagraphAlignment.LEFT,
const.MTEXT_MIDDLE_LEFT: tl.ParagraphAlignment.LEFT,
const.MTEXT_BOTTOM_LEFT: tl.ParagraphAlignment.LEFT,
const.MTEXT_TOP_CENTER: tl.ParagraphAlignment.CENTER,
const.MTEXT_MIDDLE_CENTER: tl.ParagraphAlignment.CENTER,
const.MTEXT_BOTTOM_CENTER: tl.ParagraphAlignment.CENTER,
const.MTEXT_TOP_RIGHT: tl.ParagraphAlignment.RIGHT,
const.MTEXT_MIDDLE_RIGHT: tl.ParagraphAlignment.RIGHT,
const.MTEXT_BOTTOM_RIGHT: tl.ParagraphAlignment.RIGHT,
}
STACKING = {
"^": tl.Stacking.OVER,
"/": tl.Stacking.LINE,
"#": tl.Stacking.SLANTED,
}
def make_default_tab_stops(cap_height: float, width: float) -> List[tl.TabStop]:
tab_stops = []
step = 4.0 * cap_height
pos = step
while pos < width:
tab_stops.append(tl.TabStop(pos, tl.TabStopType.LEFT))
pos += step
return tab_stops
def append_default_tab_stops(
tab_stops: List[tl.TabStop], default_stops: Sequence[tl.TabStop]
) -> None:
last_pos = 0.0
if tab_stops:
last_pos = tab_stops[-1].pos
tab_stops.extend(stop for stop in default_stops if stop.pos > last_pos)
def make_tab_stops(
cap_height: float,
width: float,
tab_stops: Sequence,
default_stops: Sequence[tl.TabStop],
) -> List[tl.TabStop]:
_tab_stops = []
for stop in tab_stops:
if isinstance(stop, str):
value = float(stop[1:])
if stop[0] == "c":
kind = tl.TabStopType.CENTER
else:
kind = tl.TabStopType.RIGHT
else:
kind = tl.TabStopType.LEFT
value = float(stop)
pos = value * cap_height
if pos < width:
_tab_stops.append(tl.TabStop(pos, kind))
append_default_tab_stops(_tab_stops, default_stops)
return _tab_stops
def get_stroke(ctx: MTextContext) -> int:
stroke = 0
if ctx.underline:
stroke += tl.Stroke.UNDERLINE
if ctx.strike_through:
stroke += tl.Stroke.STRIKE_THROUGH
if ctx.overline:
stroke += tl.Stroke.OVERLINE
if ctx.continue_stroke:
stroke += tl.Stroke.CONTINUE
return stroke
def new_paragraph(
cells: List,
ctx: MTextContext,
cap_height: float,
line_spacing: float = 1,
width: float = 0,
default_stops: Sequence[tl.TabStop] = None,
):
if cells:
p = ctx.paragraph
align = ALIGN.get(p.align, tl.ParagraphAlignment.LEFT)
left = p.left * cap_height
right = p.right * cap_height
first = left + p.indent * cap_height # relative to left
_default_stops: Sequence[tl.TabStop] = default_stops or []
tab_stops = _default_stops
if p.tab_stops:
tab_stops = make_tab_stops(
cap_height, width, p.tab_stops, _default_stops
)
paragraph = tl.Paragraph(
align=align,
indent=(first, left, right),
line_spacing=line_spacing,
tab_stops=tab_stops,
)
paragraph.append_content(cells)
else:
paragraph = tl.EmptyParagraph( # type: ignore
cap_height=ctx.cap_height, line_spacing=line_spacing
)
return paragraph
def super_glue():
return tl.NonBreakingSpace(width=0, min_width=0, max_width=0)
def defined_width(mtext: MText) -> float:
width = mtext.dxf.get("width", 0.0)
if width < 1e-6:
width, height = estimate_mtext_extents(mtext)
return width
def column_heights(columns: MTextColumns) -> List[Optional[float]]:
heights: List[Optional[float]]
if columns.heights: # dynamic manual
heights = list(columns.heights)
# last height has to be auto height = None
heights[-1] = None
else: # static, dynamic auto
heights = [columns.defined_height] * columns.count
return heights
class AbstractMTextRenderer(abc.ABC):
def __init__(self):
self._font_cache: Dict[Tuple[str, float, float], AbstractFont] = {}
@abc.abstractmethod
def word(self, test: str, ctx: MTextContext) -> tl.ContentCell:
...
@abc.abstractmethod
def fraction(
self, data: Tuple[str, str, str], ctx: MTextContext
) -> tl.ContentCell:
...
@abc.abstractmethod
def get_font_face(self, mtext: MText) -> fonts.FontFace:
...
@abc.abstractmethod
def make_bg_renderer(self, mtext: MText) -> tl.ContentRenderer:
...
def make_mtext_context(self, mtext: MText) -> MTextContext:
ctx = MTextContext()
ctx.paragraph = ParagraphProperties(
align=ATTACHMENT_POINT_TO_ALIGN.get( # type: ignore
mtext.dxf.attachment_point, tl.ParagraphAlignment.LEFT
)
)
ctx.font_face = self.get_font_face(mtext)
ctx.cap_height = mtext.dxf.char_height
ctx.aci = mtext.dxf.color
rgb = mtext.rgb
if rgb is not None:
ctx.rgb = rgb
return ctx
def get_font(self, ctx: MTextContext) -> fonts.AbstractFont:
ttf = fonts.find_ttf_path(ctx.font_face) # 1st call is very slow
key = (ttf, ctx.cap_height, ctx.width_factor)
font = self._font_cache.get(key)
if font is None:
font = fonts.make_font(ttf, ctx.cap_height, ctx.width_factor)
self._font_cache[key] = font
return font
def get_stroke(self, ctx: MTextContext) -> int:
return get_stroke(ctx)
def get_stacking(self, type_: str) -> tl.Stacking:
return STACKING.get(type_, tl.Stacking.LINE)
def space_width(self, ctx: MTextContext) -> float:
return self.get_font(ctx).space_width()
def space(self, ctx: MTextContext):
return tl.Space(width=self.space_width(ctx))
def tabulator(self, ctx: MTextContext):
return tl.Tabulator(width=self.space_width(ctx))
def non_breaking_space(self, ctx: MTextContext):
return tl.NonBreakingSpace(width=self.space_width(ctx))
def layout_engine(self, mtext: MText) -> tl.Layout:
initial_cap_height = mtext.dxf.char_height
line_spacing = mtext.dxf.line_spacing_factor
def append_paragraph():
paragraph = new_paragraph(
cells,
ctx,
initial_cap_height,
line_spacing,
width,
default_stops,
)
layout.append_paragraphs([paragraph])
cells.clear()
bg_renderer = self.make_bg_renderer(mtext)
width = defined_width(mtext)
default_stops = make_default_tab_stops(initial_cap_height, width)
layout = tl.Layout(width=width)
if mtext.has_columns:
columns = mtext.columns
assert columns is not None
for height in column_heights(columns):
layout.append_column(
width=columns.width,
height=height,
gutter=columns.gutter_width,
renderer=bg_renderer,
)
else:
# column with auto height and default width
layout.append_column(renderer=bg_renderer)
content = mtext.all_columns_raw_content()
ctx = self.make_mtext_context(mtext)
cells: List[tl.Cell] = []
for token in MTextParser(content, ctx):
ctx = token.ctx
if token.type == TokenType.NEW_PARAGRAPH:
append_paragraph()
elif token.type == TokenType.NEW_COLUMN:
append_paragraph()
layout.next_column()
elif token.type == TokenType.SPACE:
cells.append(self.space(ctx))
elif token.type == TokenType.NBSP:
cells.append(self.non_breaking_space(ctx))
elif token.type == TokenType.TABULATOR:
cells.append(self.tabulator(ctx))
elif token.type == TokenType.WORD:
if cells and isinstance(cells[-1], (tl.Text, tl.Fraction)):
# Create an unbreakable connection between those two parts.
cells.append(super_glue())
cells.append(self.word(token.data, ctx))
elif token.type == TokenType.STACK:
if cells and isinstance(cells[-1], (tl.Text, tl.Fraction)):
# Create an unbreakable connection between those two parts.
cells.append(super_glue())
cells.append(self.fraction(token.data, ctx))
if cells:
append_paragraph()
return layout
```
#### File: ezdxf/render/dimension.py
```python
from typing import TYPE_CHECKING
from ezdxf.math import UCS
from ezdxf.lldxf.const import DXFValueError
from ezdxf.entities.dimstyleoverride import DimStyleOverride
from .dim_linear import LinearDimension
from .dim_radius import RadiusDimension
from .dim_diameter import DiameterDimension
from .dim_curved import AngularDimension, Angular3PDimension, ArcLengthDimension
from .dim_ordinate import OrdinateDimension
if TYPE_CHECKING:
from ezdxf.eztypes import Dimension, BaseDimensionRenderer
class DimensionRenderer:
def dispatch(
self, override: "DimStyleOverride", ucs: "UCS" = None
) -> "BaseDimensionRenderer":
dimension = override.dimension
dim_type = dimension.dimtype
dxf_type = dimension.dxftype()
if dxf_type == "ARC_DIMENSION":
return self.arc_length(dimension, ucs, override)
elif dxf_type == "LARGE_RADIAL_DIMENSION":
return self.large_radial(dimension, ucs, override)
elif dim_type in (0, 1):
return self.linear(dimension, ucs, override)
elif dim_type == 2:
return self.angular(dimension, ucs, override)
elif dim_type == 3:
return self.diameter(dimension, ucs, override)
elif dim_type == 4:
return self.radius(dimension, ucs, override)
elif dim_type == 5:
return self.angular3p(dimension, ucs, override)
elif dim_type == 6:
return self.ordinate(dimension, ucs, override)
else:
raise DXFValueError(f"Unknown DIMENSION type: {dim_type}")
def linear(
self,
dimension: "Dimension",
ucs: "UCS" = None,
override: "DimStyleOverride" = None,
):
"""Call renderer for linear dimension lines: horizontal, vertical and rotated"""
return LinearDimension(dimension, ucs, override)
def angular(
self,
dimension: "Dimension",
ucs: "UCS" = None,
override: "DimStyleOverride" = None,
):
"""Call renderer for angular dimension defined by two lines."""
return AngularDimension(dimension, ucs, override)
def diameter(
self,
dimension: "Dimension",
ucs: "UCS" = None,
override: "DimStyleOverride" = None,
):
"""Call renderer for diameter dimension"""
return DiameterDimension(dimension, ucs, override)
def radius(
self,
dimension: "Dimension",
ucs: "UCS" = None,
override: "DimStyleOverride" = None,
):
"""Call renderer for radius dimension"""
return RadiusDimension(dimension, ucs, override)
def large_radial(
self,
dimension: "Dimension",
ucs: "UCS" = None,
override: "DimStyleOverride" = None,
):
"""Call renderer for large radial dimension"""
raise NotImplementedError()
def angular3p(
self,
dimension: "Dimension",
ucs: "UCS" = None,
override: "DimStyleOverride" = None,
):
"""Call renderer for angular dimension defined by three points."""
return Angular3PDimension(dimension, ucs, override)
def ordinate(
self,
dimension: "Dimension",
ucs: "UCS" = None,
override: "DimStyleOverride" = None,
):
"""Call renderer for ordinate dimension."""
return OrdinateDimension(dimension, ucs, override)
def arc_length(
self,
dimension: "Dimension",
ucs: "UCS" = None,
override: "DimStyleOverride" = None,
):
"""Call renderer for arc length dimension."""
return ArcLengthDimension(dimension, ucs, override)
```
#### File: ezdxf/render/linetypes.py
```python
from typing import Tuple, Iterable
import math
from ezdxf.math import Vec3, Vertex
LineSegment = Tuple[Vec3, Vec3]
class LineTypeRenderer:
def __init__(self, dashes: Iterable[float]):
# Simplified dash pattern: line-gap-line-gap
# Dash pattern should end with a gap (even count).
# Dash length in drawing units.
self._dashes: Tuple[float] = tuple(dashes) # type: ignore # why Tuple[float, ...]?
self._dash_count: int = len(self._dashes)
self.is_solid: bool = True
self._current_dash: int = 0
self._current_dash_length: float = 0.0
if self._dash_count > 1:
self.is_solid = False
self._current_dash_length = self._dashes[0]
self._is_dash = True
def line_segment(self, start: Vertex, end: Vertex) -> Iterable[LineSegment]:
_start = Vec3(start)
_end = Vec3(end)
if self.is_solid or _start.isclose(_end):
yield _start, _end
return
segment_vec = _end - _start
segment_length = segment_vec.magnitude
segment_dir = segment_vec / segment_length # normalize
for is_dash, dash_length in self._render_dashes(segment_length):
_end = _start + segment_dir * dash_length
if is_dash:
yield _start, _end
_start = _end
def line_segments(
self, vertices: Iterable[Vertex]
) -> Iterable[LineSegment]:
last = None
for vertex in vertices:
if last is not None:
yield from self.line_segment(last, vertex)
last = vertex
def _render_dashes(self, length: float) -> Iterable[Tuple[bool, float]]:
if length <= self._current_dash_length:
self._current_dash_length -= length
yield self._is_dash, length
if math.isclose(self._current_dash_length, 0.0):
self._cycle_dashes()
else:
# Avoid deep recursions!
while length > self._current_dash_length:
length -= self._current_dash_length
yield from self._render_dashes(self._current_dash_length)
if length > 0.0:
yield from self._render_dashes(length)
def _cycle_dashes(self):
self._current_dash = (self._current_dash + 1) % self._dash_count
self._current_dash_length = self._dashes[self._current_dash]
self._is_dash = not self._is_dash
```
#### File: ezdxf/render/point.py
```python
from typing import TYPE_CHECKING, List, cast
import math
from ezdxf.entities import factory
from ezdxf.math import Vec3, UCS, NULLVEC
if TYPE_CHECKING:
from ezdxf.entities import Point, DXFGraphic
def virtual_entities(
point: "Point", pdsize: float = 1, pdmode: int = 0
) -> List["DXFGraphic"]:
"""Yields point graphic as DXF primitives LINE and CIRCLE entities.
The dimensionless point is rendered as zero-length line!
Check for this condition::
e.dxftype() == 'LINE' and e.dxf.start.isclose(e.dxf.end)
if the rendering engine can't handle zero-length lines.
Args:
point: DXF POINT entity
pdsize: point size in drawing units
pdmode: point styling mode, see :class:`~ezdxf.entities.Point` class
.. versionadded:: 0.15
"""
def add_line_symmetrical(offset: Vec3):
dxfattribs["start"] = ucs.to_wcs(-offset)
dxfattribs["end"] = ucs.to_wcs(offset)
entities.append(cast("DXFGraphic", factory.new("LINE", dxfattribs)))
def add_line(s: Vec3, e: Vec3):
dxfattribs["start"] = ucs.to_wcs(s)
dxfattribs["end"] = ucs.to_wcs(e)
entities.append(cast("DXFGraphic", factory.new("LINE", dxfattribs)))
center = point.dxf.location
# This is not a real OCS! Defines just the point orientation,
# location is in WCS!
ocs = point.ocs()
ucs = UCS(origin=center, ux=ocs.ux, uz=ocs.uz)
# The point angle is clockwise oriented:
ucs = ucs.rotate_local_z(math.radians(-point.dxf.angle))
entities: List["DXFGraphic"] = []
gfx = point.graphic_properties()
radius = pdsize * 0.5
has_circle = bool(pdmode & 32)
has_square = bool(pdmode & 64)
style = pdmode & 7
dxfattribs = dict(gfx)
if style == 0: # . dimensionless point as zero-length line
add_line_symmetrical(NULLVEC)
# style == 1: no point symbol
elif style == 2: # + cross
add_line_symmetrical(Vec3(pdsize, 0))
add_line_symmetrical(Vec3(0, pdsize))
elif style == 3: # x cross
add_line_symmetrical(Vec3(pdsize, pdsize))
add_line_symmetrical(Vec3(pdsize, -pdsize))
elif style == 4: # ' tick
add_line(NULLVEC, Vec3(0, radius))
if has_square:
x1 = -radius
x2 = radius
y1 = -radius
y2 = radius
add_line(Vec3(x1, y1), Vec3(x2, y1))
add_line(Vec3(x2, y1), Vec3(x2, y2))
add_line(Vec3(x2, y2), Vec3(x1, y2))
add_line(Vec3(x1, y2), Vec3(x1, y1))
if has_circle:
dxfattribs = dict(gfx)
if point.dxf.hasattr("extrusion"):
dxfattribs["extrusion"] = ocs.uz
dxfattribs["center"] = ocs.from_wcs(center)
else:
dxfattribs["center"] = center
dxfattribs["radius"] = radius
entities.append(cast("DXFGraphic", factory.new("CIRCLE", dxfattribs)))
return entities
```
#### File: src/ezdxf/reorder.py
```python
from typing import TYPE_CHECKING, Iterable, Tuple, Dict, Union, List
import heapq
if TYPE_CHECKING:
from ezdxf.eztypes import DXFGraphic
__all__ = ["ascending", "descending"]
# ODA DWG Specs: 2.13. Handle References
# COUNTER is 4 bits, which allows handles up to 16 * 1 byte = 128-bit
# Example for 128-bit handles: "CADKitSamples\AEC Plan Elev Sample.dxf"
MAX_HANDLE = "FFFFFFFFFFFFFFFF"
NULL_HANDLE = "0"
def ascending(
entities: Iterable["DXFGraphic"],
mapping: Union[Dict, Iterable[Tuple[str, str]]] = None,
) -> Iterable["DXFGraphic"]:
"""Yields entities in ascending handle order.
The sort handle doesn't have to be the entity handle, every entity handle
in `mapping` will be replaced by the given sort handle, `mapping` is an
iterable of 2-tuples (entity_handle, sort_handle) or a
dict (entity_handle, sort_handle). Entities with equal sort handles show
up in source entities order.
Args:
entities: iterable of :class:`DXFGraphic` objects
mapping: iterable of 2-tuples (entity_handle, sort_handle) or a
handle mapping as dict.
"""
mapping = dict(mapping) if mapping else {}
heap = _build(entities, mapping, +1)
return _sorted(heap)
def descending(
entities: Iterable["DXFGraphic"],
mapping: Union[Dict, Iterable[Tuple[str, str]]] = None,
) -> Iterable["DXFGraphic"]:
"""Yields entities in descending handle order.
The sort handle doesn't have to be the entity handle, every entity handle
in `mapping` will be replaced by the given sort handle, `mapping` is an
iterable of 2-tuples (entity_handle, sort_handle) or a
dict (entity_handle, sort_handle). Entities with equal sort handles show
up in reversed source entities order.
Args:
entities: iterable of :class:`DXFGraphic` objects
mapping: iterable of 2-tuples (entity_handle, sort_handle) or a
handle mapping as dict.
"""
mapping = dict(mapping) if mapping else {}
heap = _build(entities, mapping, -1)
return _sorted(heap)
def _sorted(heap) -> Iterable["DXFGraphic"]:
"""Yields heap content in order."""
while heap:
yield heapq.heappop(heap)[-1]
def _build(
entities: Iterable["DXFGraphic"], mapping: Dict, order: int
) -> List[Tuple[int, int, "DXFGraphic"]]:
"""Returns a heap structure.
Args:
entities: DXF entities to order
mapping: handle remapping
order: +1 for ascending, -1 for descending
"""
def sort_handle(entity: "DXFGraphic") -> int:
handle = entity.dxf.handle
sort_handle_ = mapping.get(handle, handle)
if sort_handle_ == NULL_HANDLE:
# This behavior is defined by AutoCAD but not documented in the
# DXF reference.
sort_handle_ = MAX_HANDLE
return int(sort_handle_, 16)
heap: List[Tuple[int, int, "DXFGraphic"]] = []
for index, entity in enumerate(entities):
# DXFGraphic is not sortable, using the index as second value avoids
# a key function and preserves explicit the source order of
# equal sort handles.
heapq.heappush(
heap,
(
sort_handle(entity) * order,
index * order,
entity,
),
)
return heap
```
#### File: ezdxf/sections/classes.py
```python
from typing import TYPE_CHECKING, Iterator, Iterable, Union, cast, Dict, Tuple
from collections import Counter, OrderedDict
import logging
from ezdxf.lldxf.const import DXFStructureError, DXF2004, DXF2000, DXFKeyError
from ezdxf.entities.dxfclass import DXFClass
from ezdxf.entities.dxfentity import DXFEntity, DXFTagStorage
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter, Drawing
logger = logging.getLogger("ezdxf")
# name: cpp_class_name (2), app_name (3), flags(90), was_a_proxy (280),
# is_an_entity (281)
# Multiple entries for 'name' are possible and supported, ClassSection stores
# entries with key: (name, cpp_class_name).
# 0 <ctrl> CLASS
# 1 <str> MPOLYGON
# 2 <str> AcDbMPolygon
# 3 <str> "AcMPolygonObj15|Version(1.0.0.0) Product Desc: Object enabler for the AcDbMPolyg ... odesk.com"
# 90 <int> 3071, b101111111111
# 280 <int> 0
# 281 <int> 1
CLASS_DEFINITIONS = {
"ACDBDICTIONARYWDFLT": [
"AcDbDictionaryWithDefault",
"ObjectDBX Classes",
0,
0,
0,
],
"SUN": ["AcDbSun", "SCENEOE", 1153, 0, 0],
"DICTIONARYVAR": ["AcDbDictionaryVar", "ObjectDBX Classes", 0, 0, 0],
"TABLESTYLE": ["AcDbTableStyle", "ObjectDBX Classes", 4095, 0, 0],
"MATERIAL": ["AcDbMaterial", "ObjectDBX Classes", 1153, 0, 0],
"VISUALSTYLE": ["AcDbVisualStyle", "ObjectDBX Classes", 4095, 0, 0],
"SCALE": ["AcDbScale", "ObjectDBX Classes", 1153, 0, 0],
"MLEADERSTYLE": ["AcDbMLeaderStyle", "ACDB_MLEADERSTYLE_CLASS", 4095, 0, 0],
"MLEADER": ["AcDbMLeader", "ACDB_MLEADER_CLASS", 3071, 0, 1],
"MPOLYGON": ["AcDbMPolygon", "AcMPolygonObj15", 1025, 0, 1],
"CELLSTYLEMAP": ["AcDbCellStyleMap", "ObjectDBX Classes", 1152, 0, 0],
"EXACXREFPANELOBJECT": ["ExAcXREFPanelObject", "EXAC_ESW", 1025, 0, 0],
"NPOCOLLECTION": [
"AcDbImpNonPersistentObjectsCollection",
"ObjectDBX Classes",
1153,
0,
0,
],
"LAYER_INDEX": ["AcDbLayerIndex", "ObjectDBX Classes", 0, 0, 0],
"SPATIAL_INDEX": ["AcDbSpatialIndex", "ObjectDBX Classes", 0, 0, 0],
"IDBUFFER": ["AcDbIdBuffer", "ObjectDBX Classes", 0, 0, 0],
"DIMASSOC": ["AcDbDimAssoc", "AcDbDimAssoc", 0, 0, 0],
"ACDBSECTIONVIEWSTYLE": [
"AcDbSectionViewStyle",
"ObjectDBX Classes",
1025,
0,
0,
],
"ACDBDETAILVIEWSTYLE": [
"AcDbDetailViewStyle",
"ObjectDBX Classes",
1025,
0,
0,
],
"IMAGEDEF": ["AcDbRasterImageDef", "ISM", 0, 0, 0],
"RASTERVARIABLES": ["AcDbRasterVariables", "ISM", 0, 0, 0],
"IMAGEDEF_REACTOR": ["AcDbRasterImageDefReactor", "ISM", 1, 0, 0],
"IMAGE": ["AcDbRasterImage", "ISM", 2175, 0, 1],
"PDFDEFINITION": ["AcDbPdfDefinition", "ObjectDBX Classes", 1153, 0, 0],
"PDFUNDERLAY": ["AcDbPdfReference", "ObjectDBX Classes", 4095, 0, 1],
"DWFDEFINITION": ["AcDbDwfDefinition", "ObjectDBX Classes", 1153, 0, 0],
"DWFUNDERLAY": ["AcDbDwfReference", "ObjectDBX Classes", 1153, 0, 1],
"DGNDEFINITION": ["AcDbDgnDefinition", "ObjectDBX Classes", 1153, 0, 0],
"DGNUNDERLAY": ["AcDbDgnReference", "ObjectDBX Classes", 1153, 0, 1],
"MENTALRAYRENDERSETTINGS": [
"AcDbMentalRayRenderSettings",
"SCENEOE",
1024,
0,
0,
],
"ACDBPLACEHOLDER": ["AcDbPlaceHolder", "ObjectDBX Classes", 0, 0, 0],
"LAYOUT": ["AcDbLayout", "ObjectDBX Classes", 0, 0, 0],
"SURFACE": ["AcDbSurface", "ObjectDBX Classes", 4095, 0, 1],
"EXTRUDEDSURFACE": ["AcDbExtrudedSurface", "ObjectDBX Classes", 4095, 0, 1],
"LOFTEDSURFACE": ["AcDbLoftedSurface", "ObjectDBX Classes", 0, 0, 1],
"REVOLVEDSURFACE": ["AcDbRevolvedSurface", "ObjectDBX Classes", 0, 0, 1],
"SWEPTSURFACE": ["AcDbSweptSurface", "ObjectDBX Classes", 0, 0, 1],
"PLANESURFACE": ["AcDbPlaneSurface", "ObjectDBX Classes", 4095, 0, 1],
"NURBSSURFACE": ["AcDbNurbSurface", "ObjectDBX Classes", 4095, 0, 1],
"ACDBASSOCEXTRUDEDSURFACEACTIONBODY": [
"AcDbAssocExtrudedSurfaceActionBody",
"ObjectDBX Classes",
1025,
0,
0,
],
"ACDBASSOCLOFTEDSURFACEACTIONBODY": [
"AcDbAssocLoftedSurfaceActionBody",
"ObjectDBX Classes",
1025,
0,
0,
],
"ACDBASSOCREVOLVEDSURFACEACTIONBODY": [
"AcDbAssocRevolvedSurfaceActionBody",
"ObjectDBX Classes",
1025,
0,
0,
],
"ACDBASSOCSWEPTSURFACEACTIONBODY": [
"AcDbAssocSweptSurfaceActionBody",
"ObjectDBX Classes",
1025,
0,
0,
],
"HELIX": ["AcDbHelix", "ObjectDBX Classes", 4095, 0, 1],
"WIPEOUT": ["AcDbWipeout", "WipeOut", 127, 0, 1],
"WIPEOUTVARIABLES": ["AcDbWipeoutVariables", "WipeOut", 0, 0, 0],
"FIELDLIST": ["AcDbFieldList", "ObjectDBX Classes", 1152, 0, 0],
"GEODATA": ["AcDbGeoData", "ObjectDBX Classes", 4095, 0, 0],
"SORTENTSTABLE": ["AcDbSortentsTable", "ObjectDBX Classes", 0, 0, 0],
"ACAD_TABLE": ["AcDbTable", "ObjectDBX Classes", 1025, 0, 1],
"ARC_DIMENSION": ["AcDbArcDimension", "ObjectDBX Classes", 1025, 0, 1],
"LARGE_RADIAL_DIMENSION": [
"AcDbRadialDimensionLarge",
"ObjectDBX Classes",
1025,
0,
1,
],
}
REQ_R2000 = [
"ACDBDICTIONARYWDFLT",
"SUN",
"VISUALSTYLE",
"MATERIAL",
"SCALE",
"TABLESTYLE",
"MLEADERSTYLE",
"DICTIONARYVAR",
"CELLSTYLEMAP",
"MENTALRAYRENDERSETTINGS",
"ACDBDETAILVIEWSTYLE",
"ACDBSECTIONVIEWSTYLE",
"RASTERVARIABLES",
"ACDBPLACEHOLDER",
"LAYOUT",
]
REQ_R2004 = [
"ACDBDICTIONARYWDFLT",
"SUN",
"VISUALSTYLE",
"MATERIAL",
"SCALE",
"TABLESTYLE",
"MLEADERSTYLE",
"DICTIONARYVAR",
"CELLSTYLEMAP",
"MENTALRAYRENDERSETTINGS",
"ACDBDETAILVIEWSTYLE",
"ACDBSECTIONVIEWSTYLE",
"RASTERVARIABLES",
]
REQUIRED_CLASSES = {
DXF2000: REQ_R2000,
DXF2004: REQ_R2004,
}
class ClassesSection:
def __init__(
self, doc: "Drawing" = None, entities: Iterable[DXFEntity] = None
):
# Multiple entries for 'name' possible -> key is (name, cpp_class_name)
# DXFClasses are not stored in the entities database, because CLASS has
# no handle.
self.classes: Dict[Tuple[str, str], DXFClass] = OrderedDict()
self.doc = doc
if entities is not None:
self.load(iter(entities))
def __iter__(self) -> Iterable[DXFClass]:
return (cls for cls in self.classes.values())
def load(self, entities: Iterator[DXFEntity]) -> None:
section_head = cast(DXFTagStorage, next(entities))
if section_head.dxftype() != "SECTION" or section_head.base_class[
1
] != (2, "CLASSES"):
raise DXFStructureError(
"Critical structure error in CLASSES section."
)
for cls_entity in entities:
if isinstance(cls_entity, DXFClass):
self.register(cls_entity)
else:
logger.warning(
f"Ignored invalid DXF entity type '{cls_entity.dxftype()}'"
f" in section CLASSES."
)
def register(
self, classes: Union[DXFClass, Iterable[DXFClass]] = None
) -> None:
if classes is None:
return
if isinstance(classes, DXFClass):
classes = (classes,)
for dxfclass in classes:
key = dxfclass.key
if key not in self.classes:
self.classes[key] = dxfclass
def add_class(self, name: str):
"""Register a known class by `name`."""
if name not in CLASS_DEFINITIONS:
return
cls_data = CLASS_DEFINITIONS[name]
cls = DXFClass.new(doc=self.doc)
cpp, app, flags, proxy, entity = cls_data
cls.update_dxf_attribs(
{
"name": name,
"cpp_class_name": cpp,
"app_name": app,
"flags": flags,
"was_a_proxy": proxy,
"is_an_entity": entity,
}
)
self.register(cls)
def get(self, name: str) -> DXFClass:
"""Returns the first class matching `name`.
Storage key is the ``(name, cpp_class_name)`` tuple, because there are
some classes with the same :attr:`name` but different
:attr:`cpp_class_names`.
"""
for cls in self.classes.values():
if cls.dxf.name == name:
return cls
raise DXFKeyError(name)
def add_required_classes(self, dxfversion: str) -> None:
"""Add all required CLASS definitions for `dxfversion`."""
names = REQUIRED_CLASSES.get(dxfversion, REQ_R2004)
for name in names:
self.add_class(name)
if self.doc is None: # testing environment SUT
return
dxf_types_in_use = self.doc.entitydb.dxf_types_in_use()
if "IMAGE" in dxf_types_in_use:
self.add_class("IMAGE")
self.add_class("IMAGEDEF")
self.add_class("IMAGEDEF_REACTOR")
if "PDFUNDERLAY" in dxf_types_in_use:
self.add_class("PDFDEFINITION")
self.add_class("PDFUNDERLAY")
if "DWFUNDERLAY" in dxf_types_in_use:
self.add_class("DWFDEFINITION")
self.add_class("DWFUNDERLAY")
if "DGNUNDERLAY" in dxf_types_in_use:
self.add_class("DGNDEFINITION")
self.add_class("DGNUNDERLAY")
if "EXTRUDEDSURFACE" in dxf_types_in_use:
self.add_class("EXTRUDEDSURFACE")
self.add_class("ACDBASSOCEXTRUDEDSURFACEACTIONBODY")
if "LOFTEDSURFACE" in dxf_types_in_use:
self.add_class("LOFTEDSURFACE")
self.add_class("ACDBASSOCLOFTEDSURFACEACTIONBODY")
if "REVOLVEDSURFACE" in dxf_types_in_use:
self.add_class("REVOLVEDSURFACE")
self.add_class("ACDBASSOCREVOLVEDSURFACEACTIONBODY")
if "SWEPTSURFACE" in dxf_types_in_use:
self.add_class("SWEPTSURFACE")
self.add_class("ACDBASSOCSWEPTSURFACEACTIONBODY")
for dxftype in dxf_types_in_use:
self.add_class(dxftype)
def export_dxf(self, tagwriter: "TagWriter") -> None:
"""Export DXF tags. (internal API)"""
tagwriter.write_str(" 0\nSECTION\n 2\nCLASSES\n")
for dxfclass in self.classes.values():
dxfclass.export_dxf(tagwriter)
tagwriter.write_str(" 0\nENDSEC\n")
def update_instance_counters(self) -> None:
"""Update CLASS instance counter for all registered classes, requires
DXF R2004+.
"""
assert self.doc is not None
if self.doc.dxfversion < DXF2004:
return # instance counter not supported
counter: Dict[str, int] = Counter()
# count all entities in the entity database
for entity in self.doc.entitydb.values():
counter[entity.dxftype()] += 1
for dxfclass in self.classes.values():
dxfclass.dxf.instance_count = counter[dxfclass.dxf.name]
```
#### File: ezdxf/tools/crypt.py
```python
from typing import Iterable
_decode_table = {
0x20: ' ',
0x40: '_',
0x5F: '@',
}
for c in range(0x41, 0x5F):
_decode_table[c] = chr(0x41 + (0x5E - c)) # 0x5E -> 'A', 0x5D->'B', ...
def decode(text_lines: Iterable[str]) -> Iterable[str]:
""" Decode the Standard :term:`ACIS` Text (SAT) format "encrypted" by AutoCAD. """
def _decode(text):
dectab = _decode_table # fast local var
s = []
text = bytes(text, 'ascii')
skip = False
for c in text:
if skip:
skip = False
continue
if c in dectab:
s += dectab[c]
skip = (c == 0x5E) # skip space after 'A'
else:
s += chr(c ^ 0x5F)
return ''.join(s)
return (_decode(line) for line in text_lines)
_encode_table = {
' ': ' ', # 0x20
'_': '@', # 0x40
'@': '_', # 0x5F
}
for c in range(0x41, 0x5F):
_encode_table[chr(c)] = chr(0x5E - (c - 0x41)) # 0x5E->'A', 'B'->0x5D, ...
def encode(text_lines: Iterable[str]) -> Iterable[str]:
""" Encode the Standard :term:`ACIS` Text (SAT) format by AutoCAD "encryption" algorithm. """
def _encode(text):
s = []
enctab = _encode_table # fast local var
for c in text:
if c in enctab:
s += enctab[c]
if c == 'A':
s += ' ' # append a space for an 'A' -> cryptography
else:
s += chr(ord(c) ^ 0x5F)
return ''.join(s)
return (_encode(line) for line in text_lines)
```
#### File: ezdxf/tools/indexing.py
```python
from typing import Iterable
class Index:
def __init__(self, item):
try:
self.length = len(item)
except TypeError:
self.length = int(item)
def index(self, item: int, error=None) -> int:
if item < 0:
result = self.length + int(item)
else:
result = int(item)
if error and not (0 <= result < self.length):
raise error('index out of range')
return result
def slicing(self, *args) -> Iterable[int]:
if isinstance(args[0], slice):
s = args[0]
else:
s = slice(*args)
return range(*s.indices(self.length))
```
#### File: ezdxf/tools/__init__.py
```python
from typing import Tuple, Any, Iterable
from uuid import uuid4
import functools
import html
from .juliandate import juliandate, calendardate
from .binarydata import hex_strings_to_bytes, bytes_to_hexstr
escape = functools.partial(html.escape, quote=True)
def float2transparency(value: float) -> int:
"""
Returns DXF transparency value as integer in the range from ``0`` to ``255``, where ``0`` is 100% transparent
and ``255`` is opaque.
Args:
value: transparency value as float in the range from ``0`` to ``1``, where ``0`` is opaque
and ``1`` is 100% transparency.
"""
return int((1.0 - float(value)) * 255) | 0x02000000
def transparency2float(value: int) -> float:
"""
Returns transparency value as float from ``0`` to ``1``, ``0`` for no transparency (opaque) and ``1``
for 100% transparency.
Args:
value: DXF integer transparency value, ``0`` for 100% transparency and ``255`` for opaque
"""
# 255 -> 0.
# 0 -> 1.
return 1.0 - float(int(value) & 0xFF) / 255.0
def set_flag_state(flags: int, flag: int, state: bool = True) -> int:
"""Set/clear binary `flag` in data `flags`.
Args:
flags: data value
flag: flag to set/clear
state: ``True`` for setting, ``False`` for clearing
"""
if state:
flags = flags | flag
else:
flags = flags & ~flag
return flags
def guid() -> str:
"""Returns a general unique ID, based on :func:`uuid.uuid4`.
This function creates a GUID for the header variables $VERSIONGUID and
$FINGERPRINTGUID, which matches the AutoCAD pattern
``{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}``.
"""
return "{" + str(uuid4()).upper() + "}"
def take2(iterable: Iterable) -> Iterable[Tuple[Any, Any]]:
"""Iterate `iterable` as 2-tuples.
:code:`[1, 2, 3, 4, ...] -> (1, 2), (3, 4), ...`
"""
store = None
for item in iterable:
if store is None:
store = item
else:
yield store, item
store = None
def suppress_zeros(s: str, leading: bool = False, trailing: bool = True):
"""Suppress trailing and/or leading ``0`` of string `s`.
Args:
s: data string
leading: suppress leading ``0``
trailing: suppress trailing ``0``
"""
# is anything to do?
if (not leading) and (not trailing):
return s
# if `s` represents zero
if float(s) == 0.0:
return "0"
# preserve sign
if s[0] in "-+":
sign = s[0]
s = s[1:]
else:
sign = ""
# strip zeros
if leading:
s = s.lstrip("0")
if trailing and "." in s:
s = s.rstrip("0")
# remove comma if no decimals follow
if s[-1] in ".,":
s = s[:-1]
return sign + s
def normalize_text_angle(angle: float, fix_upside_down=True) -> float:
"""
Normalizes text `angle` to the range from 0 to 360 degrees and fixes upside down text angles.
Args:
angle: text angle in degrees
fix_upside_down: rotate upside down text angle about 180 degree
"""
angle = angle % 360.0 # normalize angle (0 .. 360)
if fix_upside_down and (90 < angle <= 270): # flip text orientation
angle -= 180
angle = angle % 360.0 # normalize again
return angle
```
#### File: ezdxf/tools/pattern.py
```python
from typing import Dict, List, Sequence, Tuple
from ezdxf.math import Vec2
from ._iso_pattern import ISO_PATTERN
# Predefined hatch pattern prior to ezdxf v0.11 were scaled for imperial units,
# and were too small for ISO units by a factor of 1/25.4, to replicate this
# pattern scaling use load(measurement=0).
__all__ = [
"load",
"scale_pattern",
"scale_all",
"parse",
"ISO_PATTERN",
"IMPERIAL_PATTERN",
"HatchPatternLineType",
"HatchPatternType",
"PatternAnalyser",
]
IMPERIAL_SCALE_FACTOR = 1.0 / 25.4
HatchPatternLineType = Tuple[
float, Sequence[float], Sequence[float], Sequence[float]
]
HatchPatternType = Sequence[HatchPatternLineType]
def load(measurement: int = 1, factor: float = None):
"""Load hatch pattern definition, default scaling is like the iso.pat of
BricsCAD, set `measurement` to 0 to use the imperial (US) scaled pattern,
which has a scaling factor of 1/25.4 = ~0.03937.
Args:
measurement: like the $MEASUREMENT header variable, 0 to user imperial
scaled pattern, 1 to use ISO scaled pattern.
factor: hatch pattern scaling factor, overrides `measurement`
Returns: hatch pattern dict of scaled pattern
"""
if factor is None:
factor = 1.0 if measurement == 1 else IMPERIAL_SCALE_FACTOR
pattern = ISO_PATTERN
if factor != 1.0:
pattern = scale_all(pattern, factor=factor)
return pattern
def scale_pattern(
pattern: HatchPatternType, factor: float = 1, angle: float = 0
) -> HatchPatternType:
ndigits = 10
def _scale(iterable) -> Sequence[float]:
return [round(i * factor, ndigits) for i in iterable]
def _scale_line(line) -> HatchPatternLineType:
angle0, base_point, offset, dash_length_items = line
if angle:
base_point = Vec2(base_point).rotate_deg(angle)
offset = Vec2(offset).rotate_deg(angle)
angle0 = (angle0 + angle) % 360.0
# noinspection PyTypeChecker
return [ # type: ignore
round(angle0, ndigits),
tuple(_scale(base_point)),
tuple(_scale(offset)),
_scale(dash_length_items),
]
return [_scale_line(line) for line in pattern]
def scale_all(pattern: dict, factor: float = 1, angle: float = 0):
return {
name: scale_pattern(p, factor, angle) for name, p in pattern.items()
}
def parse(pattern: str) -> Dict:
try:
comp = PatternFileCompiler(pattern)
return comp.compile_pattern()
except Exception:
raise ValueError("Incompatible pattern definition.")
def _tokenize_pattern_line(line: str) -> List:
return line.split(",", maxsplit=1 if line.startswith("*") else -1)
class PatternFileCompiler:
def __init__(self, content: str):
self._lines = [
_tokenize_pattern_line(line)
for line in (line.strip() for line in content.split("\n"))
if line and line[0] != ";"
]
def _parse_pattern(self):
pattern = []
for line in self._lines:
if line[0].startswith("*"):
if pattern:
yield pattern
pattern = [[line[0][1:], line[1]]] # name, description
else:
pattern.append([float(e) for e in line]) # List[floats]
if pattern:
yield pattern
def compile_pattern(self, ndigits: int = 10) -> Dict:
pattern = dict()
for p in self._parse_pattern():
pat = []
for line in p[1:]:
# offset before rounding:
offset = Vec2(line[3], line[4])
# round all values:
line = [round(e, ndigits) for e in line]
pat_line = []
angle = line[0]
pat_line.append(angle)
# base point:
pat_line.append((line[1], line[2]))
# rotate offset:
offset = offset.rotate_deg(angle)
pat_line.append(
(round(offset.x, ndigits), round(offset.y, ndigits))
)
# line dash pattern
pat_line.append(line[5:])
pat.append(pat_line)
pattern[p[0][0]] = pat
return pattern
IMPERIAL_PATTERN = load(measurement=0)
def is_solid(pattern: Sequence[float]) -> bool:
return not bool(len(pattern))
def round_angle_15_deg(angle: float) -> int:
return round((angle % 180) / 15) * 15
class PatternAnalyser:
def __init__(self, pattern: HatchPatternType):
# List of 2-tuples: (angle, is solid line pattern)
# angle is rounded to a multiple of 15° in the range [0, 180)
self._lines: List[Tuple[int, bool]] = [
(round_angle_15_deg(angle), is_solid(line_pattern))
for angle, _, _, line_pattern in pattern
]
def has_angle(self, angle: int) -> bool:
return any(angle_ == angle for angle_, _ in self._lines)
def all_angles(self, angle: int) -> bool:
return all(angle_ == angle for angle_, _ in self._lines)
def has_line(self, angle: int, solid: bool) -> bool:
return any(
angle_ == angle and solid_ == solid
for angle_, solid_ in self._lines
)
def all_lines(self, angle: int, solid: bool) -> bool:
return all(
angle_ == angle and solid_ == solid
for angle_, solid_ in self._lines
)
def has_solid_line(self) -> bool:
return any(solid for _, solid in self._lines)
def has_dashed_line(self) -> bool:
return any(not solid for _, solid in self._lines)
def all_solid_lines(self) -> bool:
return all(solid for _, solid in self._lines)
def all_dashed_lines(self) -> bool:
return all(not solid for _, solid in self._lines)
```
#### File: ezdxf/tools/strip.py
```python
from typing import BinaryIO, Optional
from ezdxf.lldxf.validator import is_dxf_file, DXFStructureError
from pathlib import Path
class TagWriter:
def __init__(self, fp: BinaryIO):
self.fp = fp
def write(self, raw_code_str: bytes, raw_value_str: bytes):
self.fp.write(raw_code_str)
self.fp.write(raw_value_str)
class ThumbnailRemover(TagWriter):
def __init__(self, fp: BinaryIO):
super().__init__(fp)
self._start_section = False
self._skip_tags = False
self._section_code: Optional[bytes] = None
self._section_value: Optional[bytes] = None
self.removed_thumbnail_image = False
def write(self, raw_code_str: bytes, raw_value_str: bytes):
code = raw_code_str.strip()
value = raw_value_str.strip()
if self._start_section:
self._start_section = False
if code == b"2" and value == b"THUMBNAILIMAGE":
self._skip_tags = True
self.removed_thumbnail_image = True
else:
# write buffered section tag:
super().write(self._section_code, self._section_value) # type: ignore
if code == b"0":
if value == b"SECTION":
self._start_section = True
self._skip_tags = False
# buffer section tag:
self._section_code = raw_code_str
self._section_value = raw_value_str
return
elif value == b"ENDSEC":
skip = self._skip_tags
self._skip_tags = False
if skip: # don't write ENDSEC
return
if not self._skip_tags:
super().write(raw_code_str, raw_value_str)
def strip_comments(
infile: BinaryIO, tagwriter: TagWriter, verbose=False
) -> int:
line_number: int = 1
removed_tags: int = 0
while True:
try:
raw_code_str = infile.readline()
except EOFError:
raw_code_str = b""
if raw_code_str == b"": # regular end of file
return removed_tags
try:
code = int(raw_code_str)
except ValueError:
code_str = raw_code_str.strip().decode(
encoding="utf8", errors="ignore"
)
raise DXFStructureError(
f'CANCELED: "{infile.name}" - found invalid '
f'group code "{code_str}" at line {line_number}'
)
try:
raw_value_str = infile.readline()
except EOFError:
raw_value_str = b""
if raw_value_str == b"":
raise DXFStructureError(
f'CANCELED: "{infile.name}" - premature end of file'
)
line_number += 2
if code != 999:
tagwriter.write(raw_code_str, raw_value_str)
else:
if verbose:
value = raw_value_str.strip()
_value = value.decode(encoding="utf8", errors="ignore")
print(f'removing comment: "{_value}"')
removed_tags += 1
def safe_rename(source: Path, target: Path, backup=True, verbose=False) -> bool:
backup_file = target.with_suffix(".bak")
backup_file.unlink(missing_ok=True) # type: ignore
_target = Path(target)
if _target.exists():
if verbose:
print(f'renaming "{_target.name}" to "{backup_file.name}"')
try:
_target.rename(backup_file)
except IOError as e:
print(f"IOError: {str(e)}")
return False
if verbose:
print(f'renaming "{source.name}" to "{target.name}"')
try:
source.rename(target)
except IOError as e:
print(f"IOError: {str(e)}")
return False
if not backup:
if verbose:
print(f'deleting backup file "{backup_file.name}"')
backup_file.unlink(missing_ok=True) # type: ignore
return True
def strip(filename: str, backup=False, thumbnail=False, verbose=False):
def remove_tmp_file():
if tmp_file.exists():
if verbose:
print(f'deleting temp file: "{tmp_file.name}"')
tmp_file.unlink(missing_ok=True)
if verbose:
print(f'\nProcessing file: "{filename}"')
try:
if not is_dxf_file(filename):
print(
f'CANCELED: "{filename}" is not a DXF file, binary DXF files '
f"are not supported"
)
return
except IOError as e:
print(f"IOError: {str(e)}")
return
source_file = Path(filename)
tmp_file = source_file.with_suffix(".ezdxf.tmp")
error = False
tagwriter: TagWriter
if verbose:
print(f'make a temporary copy: "{tmp_file.name}"')
with open(tmp_file, "wb") as fp, open(source_file, "rb") as infile:
if thumbnail:
tagwriter = ThumbnailRemover(fp)
else:
tagwriter = TagWriter(fp)
try:
removed_tags = strip_comments(infile, tagwriter, verbose)
except IOError as e:
print(f"IOError: {str(e)}")
error = True
except DXFStructureError as e:
print(str(e))
error = True
if not error:
rename = False
if thumbnail and tagwriter.removed_thumbnail_image: # type: ignore
print(f'"{source_file.name}" - removed THUMBNAILIMAGE section')
rename = True
if removed_tags > 0:
tags = "tag" if removed_tags == 1 else "tags"
print(
f'"{source_file.name}" - {removed_tags} comment {tags} removed'
)
rename = True
if rename:
safe_rename(tmp_file, source_file, backup, verbose)
remove_tmp_file()
```
#### File: ezdxf/tools/text_layout.py
```python
from typing import Sequence, Iterable, Optional, Tuple, List, NamedTuple
import abc
import itertools
import enum
from ezdxf.math import Matrix44, BoundingBox2d
from ezdxf.tools.text import leading
"""
Text Layout Engine
==================
The main goal of this text layout engine is to layout words as boxes in
columns, paragraphs, and (bullet) lists.
The starting point is a layout engine for MTEXT, which can be used for
different purposes like the drawing add-on or exploding MTEXT into DXF
primitives. But the engine is not bound to the MTEXT entity, the MTEXT
entity just defines the basic requirements.
This engine works on given (text) boxes as input and does not render the glyphs
by itself nor does it have any knowledge about the glyphs, therefore individual
kerning between letters is not supported in anyway. As consequence the
"distributed" paragraph alignment of MTEXT can not be supported.
Each input box can have an individual rendering object attached, derived from
the :class:`ContentRenderer` class, which requires two methods:
1. method :meth:`render` to render the box content like the text or the
container background
2. method :meth:`line` to render simple straight lines like under- and over
stroke lines or fraction dividers.
Soft hyphens or auto word wrapping is not supported.
Text direction is determined by the client by the given arrangement of the
input cells, but the vertical flow is always organized in lines from top to
bottom.
The main work done by the layout engine is the placing of the given content
cells. The layout engine does not change the size of content cells and only
adjusts the width of glue cells e.g. "justified" paragraphs.
Switching fonts or changing text size and -color has to be done by the client
at the process of dividing the input text into text- and glue cells and
assigning them appropriate rendering functions.
The only text styling provided by the layout engine are strokes above, through
or below one or more words, which have to span across glue cells.
Content organization
--------------------
The content is divided into containers (layout, column, paragraphs, ...) and
simple boxes for the actual content as cells like words and glue cells like
spaces or tabs.
The basic content object is a text cell, which represents a single word.
Fractions of the MTEXT entity are supported by fraction cells. Content cells
have to be separated by mandatory glue cells.
Non breaking spaces have to be fed into the layout engine as special glue
element, because it is also a simple space, which should be adjustable in the
"justified" paragraph alignment.
Containers
----------
All containers support margins.
1. Layout
Contains only columns. The size of the layout is determined by the
columns inside of the layout. Each column can have a different width.
2. Column
Contains only paragraphs. A Column has a fixed width, the height can be
fixed (MTEXT) or flexible.
3. Paragraph
A paragraph has a fixed width and the height is always flexible.
A paragraph can contain anything except the high level containers
Layout and Column.
3.1 FlowText, supports left, right, center and justified alignments;
indentation for the left side, the right side and the first line;
line spacing; no nested paragraphs or bullet lists;
The final content is distributed as lines (HCellGroup).
3.2 BulletList, the "bullet" can be any text cell, the flow text of each
list is an paragraph with left aligned text ...
Simple Boxes
------------
Do not support margins.
1. Glue cells
The height of glue cells is always 0.
1.1 Space, flexible width but has a minimum width, possible line break
1.2 Non breaking space, like a space but prevents line break between
adjacent text cells
1.3 Tabulator, the current implementation treats tabulators like spaces.
2. Content cells
2.1 Text cell - the height of a text cell is the cap height (height of
letter "X"), ascenders and descenders are ignored.
This is not a clipping box, the associated render object can still draw
outside of the box borders, this box is only used to determine the final
layout location.
2.2 Fraction cell ... (MTEXT!)
3. AbstractLine
A line contains only simple boxes and has a fixed width.
The height is determined by the tallest box of the group.
The content cells (words) are connected/separated by mandatory glue cells.
"""
LOREM_IPSUM = """Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed
diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed
diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet
clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.
"""
class Stacking(enum.IntEnum):
OVER = 0
LINE = 1
SLANTED = 2
class LayoutAlignment(enum.IntEnum):
TOP_LEFT = 1
TOP_CENTER = 2
TOP_RIGHT = 3
MIDDLE_LEFT = 4
MIDDLE_CENTER = 5
MIDDLE_RIGHT = 6
BOTTOM_LEFT = 7
BOTTOM_CENTER = 8
BOTTOM_RIGHT = 9
class CellAlignment(enum.IntEnum):
BOTTOM = 0
CENTER = 1
TOP = 2
class ParagraphAlignment(enum.IntEnum):
LEFT = 1
RIGHT = 2
CENTER = 3
JUSTIFIED = 4
class TabStopType(enum.IntEnum):
LEFT = 0
RIGHT = 1
CENTER = 2
class TabStop(NamedTuple):
pos: float = 0.0
kind: TabStopType = TabStopType.LEFT
def lorem_ipsum(count=100):
return itertools.islice(itertools.cycle(LOREM_IPSUM.split()), count)
class ContentRenderer(abc.ABC):
@abc.abstractmethod
def render(
self,
left: float,
bottom: float,
right: float,
top: float,
m: Matrix44 = None,
) -> None:
"""Render content into the given borders (lower left and upper right
corners).
Args:
left: x coordinate of the left border
bottom: y coordinate of the bottom border
right: x coordinate of the right border
top: y coordinate of the top border
m: transformation Matrix44
"""
pass
@abc.abstractmethod
def line(
self, x1: float, y1: float, x2: float, y2: float, m: Matrix44 = None
) -> None:
"""Draw a line from (x1, y1) to (x2, y2)."""
pass
class DoNothingRenderer(ContentRenderer):
def render(
self,
left: float,
bottom: float,
right: float,
top: float,
m: Matrix44 = None,
) -> None:
pass
def line(
self, x1: float, y1: float, x2: float, y2: float, m: Matrix44 = None
) -> None:
pass
Tuple4f = Tuple[float, float, float, float]
Tuple2f = Tuple[float, float]
def resolve_margins(margins: Optional[Sequence[float]]) -> Tuple4f:
"""Returns the box margins in CSS like order: top, right, bottom, left."""
if margins is None:
return 0, 0, 0, 0
count = len(margins)
if count == 4: # CSS: top, right, bottom, left
return margins[0], margins[1], margins[2], margins[3]
elif count == 3: # CSS: top, right, bottom, left=right
return margins[0], margins[1], margins[2], margins[1]
elif count == 2: # CSS: top, right, bottom=top, left=right
return margins[0], margins[1], margins[0], margins[1]
elif count == 1: # CSS: top, right=top, bottom=top, left=top
return margins[0], margins[0], margins[0], margins[0]
return 0, 0, 0, 0
def insert_location(
align: LayoutAlignment, width: float, height: float
) -> Tuple2f:
"""Returns the left top corner adjusted to the given alignment."""
left: float = 0.0
top: float = 0.0
center = width / 2.0
middle = height / 2.0
if align == LayoutAlignment.TOP_LEFT:
pass
elif align == LayoutAlignment.TOP_CENTER:
left, top = (-center, 0)
elif align == LayoutAlignment.TOP_RIGHT:
left, top = (-width, 0)
elif align == LayoutAlignment.MIDDLE_LEFT:
left, top = (0, middle)
elif align == LayoutAlignment.MIDDLE_CENTER:
left, top = (-center, middle)
elif align == LayoutAlignment.MIDDLE_RIGHT:
left, top = (-width, middle)
elif align == LayoutAlignment.BOTTOM_LEFT:
left, top = (0, height)
elif align == LayoutAlignment.BOTTOM_CENTER:
left, top = (-center, height)
elif align == LayoutAlignment.BOTTOM_RIGHT:
left, top = (-width, height)
return left, top
class Box(abc.ABC):
@property
@abc.abstractmethod
def total_width(self) -> float:
pass
@property
@abc.abstractmethod
def total_height(self) -> float:
pass
@abc.abstractmethod
def place(self, x: float, y: float):
"""(x, y) is the top/left corner"""
pass
@abc.abstractmethod
def final_location(self) -> Tuple[float, float]:
"""Returns the final location as the top/left corner"""
pass
@abc.abstractmethod
def render(self, m: Matrix44 = None) -> None:
"""Render content at the final location."""
pass
def bbox(self) -> BoundingBox2d:
"""Returns the 2D bounding box of the container. If the cell is
not placed the top/left corner = (0, 0).
"""
try:
x, y = self.final_location()
except (ValueError, TypeError):
x, y = 0, 0
return BoundingBox2d(
[(x, y), (x + self.total_width, y - self.total_height)]
)
class Cell(Box): # ABC
is_visible = False
def place(self, x: float, y: float):
# Base cells do not render anything, therefore placing the content is
# not necessary
pass
def final_location(self) -> Tuple[float, float]:
# Base cells do not render anything, therefore final location is not
# important
return 0, 0
def render(self, m: Matrix44 = None) -> None:
pass
class Glue(Cell): # ABC
EMPTY: Tuple = tuple()
def __init__(
self, width: float, min_width: float = None, max_width: float = None
):
self._width: float = float(width)
self._min_width = float(min_width) if min_width else self._width
self._max_width: Optional[float] = max_width
def resize(self, width: float):
max_width = self._max_width
if max_width is not None:
width = min(max_width, width)
self._width = max(width, self._min_width)
@property
def can_shrink(self):
return self._min_width < self._width
@property
def can_grow(self):
return self._max_width is None or self._width < self._max_width
@property
def total_width(self) -> float:
return self._width
@property
def total_height(self) -> float:
return 0
def to_space(self) -> "Space":
return Space(self._width, self._min_width, self._max_width)
class Space(Glue):
pass
class NonBreakingSpace(Glue):
pass
class Tabulator(Glue):
pass
class ContentCell(Cell): # ABC
"""Represents visible content like text or fractions.
Supported vertical alignments (IntEnum):
=== =================
int CellAlignment
=== =================
0 BOTTOM
1 CENTER
2 TOP
=== =================
"""
is_visible = True
def __init__(
self,
width: float,
height: float,
valign: CellAlignment = CellAlignment.BOTTOM,
renderer: ContentRenderer = None,
):
self._final_x: Optional[float] = None
self._final_y: Optional[float] = None
self._width = float(width)
self._height = float(height)
self.valign = CellAlignment(valign) # public attribute read/write
self.renderer = renderer
def set_final_location(self, x: float, y: float):
self._final_x = x
self._final_y = y
def final_location(self):
return self._final_x, self._final_y
@property
def total_width(self) -> float:
return self._width
@property
def total_height(self) -> float:
return self._height
def place(self, x: float, y: float):
"""(x, y) is the top/left corner"""
self._final_x = x
self._final_y = y
class Stroke:
# no enum because bit values can be combined: UNDERLINE + OVERLINE
NO_STROKE = 0
UNDERLINE = 1
STRIKE_THROUGH = 2
OVERLINE = 4
CONTINUE = 8 # continue stroke to following text cell
class Text(ContentCell):
"""Represents visible text content.
Supported strokes as bit values (flags), can be combined:
=== =================
int Stroke
=== =================
0 NO_STROKE
1 UNDERLINE
2 STRIKE THROUGH
4 OVERLINE
8 CONTINUE
=== =================
The CONTINUE flag extends the stroke of the current text cell across the
glue cells to the following text cell.
"""
def __init__(
self,
width: float,
height: float,
valign: CellAlignment = CellAlignment.BOTTOM,
stroke: int = Stroke.NO_STROKE,
renderer: ContentRenderer = None,
):
super().__init__(width, height, valign, renderer)
self.stroke = int(stroke) # public attribute read/write
def render(self, m: Matrix44 = None) -> None:
left, top = self.final_location()
height = self.total_height
bottom = top - height
right = left + self.total_width
self.renderer.render( # type: ignore
left=left, bottom=bottom, right=right, top=top, m=m
)
def render_stroke(
self,
extend_left: float = 0,
extend_right: float = 0,
m: Matrix44 = None,
) -> None:
left, top = self.final_location()
left -= extend_left
height = self.total_height
bottom = top - height
right = left + self.total_width + extend_right
renderer = self.renderer
assert renderer is not None
# render underline, strike through, overline
spacing = height / 5 # ???
if self.stroke & Stroke.UNDERLINE:
y = bottom - spacing
renderer.line(left, y, right, y, m)
if self.stroke & Stroke.STRIKE_THROUGH:
y = (top + bottom) / 2
renderer.line(left, y, right, y, m)
if self.stroke & Stroke.OVERLINE:
y = top + spacing
renderer.line(left, y, right, y, m)
def render_cells(cells: Iterable[Cell], m: Matrix44 = None) -> None:
for cell in cells:
if cell.is_visible:
cell.render(m)
def render_text_strokes(cells: List[Cell], m: Matrix44 = None) -> None:
"""Render text cell strokes across glue cells."""
# Should be called for container with horizontal arranged text cells
# like HCellGroup to create underline, overline and strike trough
# features.
# Can not render strokes across line breaks!
def stroke_extension():
extend = 0
i = index + 1
count = len(cells)
while i < count:
cell = cells[i]
# extend stroke only across adjacent glue cells:
if isinstance(cell, Glue):
extend += cell.total_width
else:
break
i += 1
return extend
for index, cell in enumerate(cells):
if isinstance(cell, Text) and cell.stroke:
extend = stroke_extension() if cell.stroke & Stroke.CONTINUE else 0
cell.render_stroke(extend_right=extend, m=m)
class Fraction(ContentCell):
"""Represents visible fractions.
Supported stacking A/B (IntEnum):
=== =========== =========
int Stacking Description
=== =========== =========
0 OVER A over B, without horizontal line
1 LINE A over B, horizontal line between
2 SLANTED A slanted line B
=== =========== =========
"""
HEIGHT_SCALE = 1.2
def __init__(
self,
top: ContentCell,
bottom: ContentCell,
stacking: Stacking = Stacking.OVER,
valign: CellAlignment = CellAlignment.BOTTOM,
renderer: ContentRenderer = None,
):
super().__init__(0, 0, valign, renderer)
self._stacking = stacking
self._top_content = top
self._bottom_content = bottom
self._update_size()
def _update_size(self):
top = self._top_content
bottom = self._bottom_content
if self._stacking == Stacking.SLANTED:
self._height = top.total_height + bottom.total_height
self._width = top.total_width + bottom.total_width
else:
self._height = self.HEIGHT_SCALE * (
top.total_height + bottom.total_height
)
self._width = max(top.total_width, bottom.total_width)
def place(self, x: float, y: float):
"""(x, y) is the top/left corner"""
self._final_x = x
self._final_y = y
width = self.total_width
height = self.total_height
top_content = self._top_content
bottom_content = self._bottom_content
if top_content is None or bottom_content is None:
raise ValueError("no content set")
if self._stacking == Stacking.SLANTED:
top_content.place(x, y) # left/top
x += width - bottom_content.total_width
y -= height - bottom_content.total_height
bottom_content.place(x, y) # right/bottom
else:
center = x + width / 2
x = center - top_content.total_width / 2
top_content.place(x, y) # center/top
x = center - bottom_content.total_width / 2
y -= height - bottom_content.total_height
bottom_content.place(x, y) # center/bottom
def render(self, m: Matrix44 = None) -> None:
self._top_content.render(m)
self._bottom_content.render(m)
if self._stacking != Stacking.OVER:
self._render_line(m)
def _render_line(self, m: Matrix44) -> None:
x, y = self.final_location()
tw = self.total_width
th = self.total_height
if self._stacking == Stacking.LINE:
x1 = x
x2 = x + tw
y1 = y2 = y - th / 2
else: # SLANTED
delta = min(tw, th) / 2
cx = x + self._top_content.total_width
cy = y - self._top_content.total_height
x1 = cx - delta
y1 = cy - delta
x2 = cx + delta
y2 = cy + delta
self.renderer.line(x1, y1, x2, y2, m) # type: ignore
_content = (Text, Fraction)
_glue = (Space, NonBreakingSpace, Tabulator)
_no_break = (Text, NonBreakingSpace)
def normalize_cells(cells: Iterable[Cell]) -> List[Cell]:
def replace_pending_nbsp_by_spaces():
index = len(content) - 1
while index >= 0:
cell = content[index]
if isinstance(cell, NonBreakingSpace):
content[index] = cell.to_space()
index -= 1
else:
return
def is_useless_nbsp():
try:
peek = cells[index + 1]
except IndexError:
return True
if not isinstance(prev, _no_break) or not isinstance(peek, _no_break):
return True
return False
content = []
cells = list(cells)
prev = None
for index, cell in enumerate(cells):
if isinstance(cell, _content):
if isinstance(prev, _content):
raise ValueError("no glue between content cells")
elif isinstance(cell, NonBreakingSpace) and is_useless_nbsp():
cell = cell.to_space()
replace_pending_nbsp_by_spaces()
prev = cell
content.append(cell)
# remove pending glue:
while content and isinstance(content[-1], _glue):
content.pop()
return content
class Container(Box):
def __init__(
self,
width: Optional[float],
height: float = None,
margins: Sequence[float] = None,
renderer: ContentRenderer = None,
):
self._final_x: Optional[float] = None
self._final_y: Optional[float] = None
# _content_width is None for: defined by content
self._content_width: Optional[float] = width
# _content_height is None for: defined by content
self._content_height: Optional[float] = height
# margins are always defined
self._margins: Tuple4f = resolve_margins(margins)
# content renderer is optional:
self.renderer: Optional[ContentRenderer] = renderer
def place(self, x: float, y: float):
self._final_x = x
self._final_y = y
self.place_content()
def final_location(self):
if not self.is_placed():
raise ValueError("Container is not placed.")
return self._final_x, self._final_y
def is_placed(self) -> bool:
return self._final_x is not None and self._final_y is not None
@abc.abstractmethod
def __iter__(self) -> Box:
pass
@property
def top_margin(self) -> float:
return self._margins[0]
@property
def right_margin(self) -> float:
return self._margins[1]
@property
def bottom_margin(self) -> float:
return self._margins[2]
@property
def left_margin(self) -> float:
return self._margins[3]
@property
def content_width(self) -> float:
if self._content_width is None:
return 0
else:
return self._content_width
@property
def total_width(self) -> float:
return self.content_width + self.right_margin + self.left_margin
@property
def content_height(self) -> float:
if self._content_height is None:
return 0
else:
return self._content_height
@property
def has_flex_height(self):
return self._content_height is None
@property
def total_height(self) -> float:
return self.content_height + self.top_margin + self.bottom_margin
def render(self, m: Matrix44 = None) -> None:
"""Render container content.
(x, y) is the top/left corner
"""
if not self.is_placed():
raise ValueError("Layout has to be placed before rendering")
if self.renderer:
self.render_background(m)
self.render_content(m)
@abc.abstractmethod
def place_content(self):
"""Place container content at the final location."""
pass
def render_content(self, m: Matrix44 = None) -> None:
"""Render content at the final location."""
for entity in self: # type: ignore
entity.render(m)
def render_background(self, m: Matrix44) -> None:
"""Render background at the final location."""
# Render content background inclusive margins!
# (x, y) is the top/left corner
x, y = self.final_location()
if self.renderer:
self.renderer.render(
left=x,
bottom=y - self.total_height,
top=y,
right=x + self.total_width,
m=m,
)
class EmptyParagraph(Cell):
"""Spacer between two paragraphs, represents empty lines like in
"line1\n\nline2".
"""
def __init__(self, cap_height: float, line_spacing: float = 1):
self._height: float = cap_height
self._width: float = 0
self._last_line_spacing = leading(cap_height, line_spacing) - cap_height
@property
def total_width(self) -> float:
return self._width
@property
def total_height(self) -> float:
return self._height
def set_total_width(self, width: float):
self._width = width
def distribute_content(self, height: float = None):
pass
@property
def distance_to_next_paragraph(self) -> float:
return self._last_line_spacing
class Paragraph(Container):
def __init__(
self,
width: float = None, # defined by parent container
align: ParagraphAlignment = ParagraphAlignment.LEFT,
indent: Tuple[float, float, float] = (0, 0, 0),
line_spacing: float = 1,
margins: Sequence[float] = None,
tab_stops: Sequence[TabStop] = None,
renderer: ContentRenderer = None,
):
super().__init__(width, None, margins, renderer)
self._align = align
first, left, right = indent
self._indent_first = first
self._indent_left = left
self._indent_right = right
self._line_spacing = line_spacing
self._tab_stops = tab_stops or []
# contains the raw and not distributed content:
self._cells: List[Cell] = []
# contains the final distributed content:
self._lines: List[AbstractLine] = []
# space to next paragraph
self._last_line_spacing = 0.0
def __iter__(self):
return iter(self._lines)
@property
def distance_to_next_paragraph(self):
return self._last_line_spacing
def set_total_width(self, width: float):
self._content_width = width - self.left_margin - self.right_margin
if self._content_width < 1e-6:
raise ValueError("invalid width, no usable space left")
def append_content(self, content: Iterable[Cell]):
self._cells.extend(content)
def line_width(self, first: bool) -> float:
indent = self._indent_right
indent += self._indent_first if first else self._indent_left
return self.content_width - indent
def place_content(self):
x, y = self.final_location()
x += self.left_margin
y -= self.top_margin
first = True
lines = self._lines
for line in lines:
x_final = self._left_border(x, first)
line.place(x_final, y)
y -= leading(line.total_height, self._line_spacing)
first = False
def _left_border(self, x: float, first: bool) -> float:
"""Apply indentation and paragraph alignment"""
left_indent = self._indent_first if first else self._indent_left
return x + left_indent
def _calculate_content_height(self) -> float:
"""Returns the actual content height determined by the distributed
lines.
"""
lines = self._lines
line_spacing = self._line_spacing
height = 0.0
if len(lines):
last_line = lines[-1]
height = sum(
leading(line.total_height, line_spacing) for line in lines[:-1]
)
# do not add line spacing after last line!
last_line_height = last_line.total_height
self._last_line_spacing = (
leading(last_line_height, line_spacing) - last_line_height
)
height += last_line_height
return height
def distribute_content(self, height: float = None) -> Optional["Paragraph"]:
"""Distribute the raw content into lines. Returns the cells which do
not fit as a new paragraph.
Args:
height: available total height (margins + content), ``None`` for
unrestricted paragraph height
"""
def new_line(width: float) -> AbstractLine:
if align in (ParagraphAlignment.LEFT, ParagraphAlignment.JUSTIFIED):
indent = self._indent_first if first else self._indent_left
tab_stops = shift_tab_stops(self._tab_stops, -indent, width)
return (
LeftLine(width, tab_stops)
if align == ParagraphAlignment.LEFT
else JustifiedLine(width, tab_stops)
)
elif align == ParagraphAlignment.RIGHT:
return RightLine(width)
elif align == ParagraphAlignment.CENTER:
return CenterLine(width)
else:
raise ValueError(align)
cells: List[Cell] = normalize_cells(self._cells)
cells = group_non_breakable_cells(cells)
# Delete raw content:
self._cells.clear()
align: ParagraphAlignment = self._align
index: int = 0 # index of current cell
count: int = len(cells)
first: bool = True # is current line the first line?
# current paragraph height:
paragraph_height: float = self.top_margin + self.bottom_margin
# localize enums for core loop optimization:
# CPython 3.9 access is around 3x faster, no difference for PyPy 3.7!
FAIL, SUCCESS, FORCED = iter(AppendType)
while index < count:
# store index of first unprocessed cell to restore index,
# if not enough space in line
undo = index
line = new_line(self.line_width(first))
has_tab_support = line.has_tab_support
while index < count:
# core loop of paragraph processing and the whole layout engine:
cell = cells[index]
if isinstance(cell, Tabulator) and has_tab_support:
append_state = line.append_with_tab(
# a tabulator cell has always a following cell,
# see normalize_cells()!
cells[index + 1],
cell,
)
if append_state == SUCCESS:
index += 1 # consume tabulator
else:
append_state = line.append(cell)
# state check order by probability:
if append_state == SUCCESS:
index += 1 # consume current cell
elif append_state == FAIL:
break
elif append_state == FORCED:
index += 1 # consume current cell
break
if line.has_content:
line.remove_line_breaking_space()
# remove line breaking space:
if index < count and isinstance(cells[index], Space):
index += 1
line_height = line.total_height
if (
height is not None # restricted paragraph height
and paragraph_height + line_height > height
):
# Not enough space for the new line:
index = undo
break
else:
first = False
self._lines.append(line)
paragraph_height += leading(line_height, self._line_spacing)
not_all_cells_processed = index < count
if align == ParagraphAlignment.JUSTIFIED:
# distribute justified text across the line width,
# except for the VERY last line:
end = len(self._lines) if not_all_cells_processed else -1
for line in self._lines[:end]:
assert isinstance(line, JustifiedLine)
line.distribute()
# Update content height:
self._content_height = self._calculate_content_height()
# If not all cells could be processed, put them into a new paragraph
# and return it to the caller.
if not_all_cells_processed:
return self._new_paragraph(cells[index:], first)
else:
return None
def _new_paragraph(self, cells: List[Cell], first: bool) -> "Paragraph":
# First line of the paragraph included?
indent_first = self._indent_first if first else self._indent_left
indent = (indent_first, self._indent_left, self._indent_right)
paragraph = Paragraph(
self._content_width,
self._align,
indent,
self._line_spacing,
self._margins,
self._tab_stops,
self.renderer,
)
paragraph.append_content(cells)
return paragraph
class Column(Container):
def __init__(
self,
width: float,
height: float = None,
gutter: float = 0,
margins: Sequence[float] = None,
renderer: ContentRenderer = None,
):
super().__init__(width, height, margins, renderer)
# spacing between columns
self._gutter = gutter
self._paragraphs: List[Paragraph] = []
def clone_empty(self) -> "Column":
return self.__class__(
width=self.content_width,
height=self.content_height,
gutter=self.gutter,
margins=(
self.top_margin,
self.right_margin,
self.bottom_margin,
self.left_margin,
),
renderer=self.renderer,
)
def __iter__(self):
return iter(self._paragraphs)
def __len__(self):
return len(self._paragraphs)
@property
def content_height(self) -> float:
"""Returns the current content height for flexible columns and the
max. content height otherwise.
"""
max_height = self.max_content_height
if max_height is None:
return self.used_content_height()
else:
return max_height
def used_content_height(self) -> float:
paragraphs = self._paragraphs
height = 0.0
if paragraphs:
height = sum(
p.total_height + p.distance_to_next_paragraph
for p in paragraphs[:-1]
)
height += paragraphs[-1].total_height
return height
@property
def gutter(self):
return self._gutter
@property
def max_content_height(self) -> Optional[float]:
return self._content_height
@property
def has_free_space(self) -> bool:
if self.max_content_height is None: # flexible height column
return True
return self.used_content_height() < self.max_content_height
def place_content(self):
x, y = self.final_location()
x += self.left_margin
y -= self.top_margin
for p in self._paragraphs:
p.place(x, y)
y -= p.total_height + p.distance_to_next_paragraph
def append_paragraphs(
self, paragraphs: Iterable[Paragraph]
) -> List[Paragraph]:
remainer: List[Paragraph] = []
for paragraph in paragraphs:
if remainer:
remainer.append(paragraph)
continue
paragraph.set_total_width(self.content_width)
if self.has_flex_height:
height = None
else:
height = self.max_content_height - self.used_content_height() # type: ignore
rest = paragraph.distribute_content(height)
self._paragraphs.append(paragraph)
if rest is not None:
remainer.append(rest)
return remainer
class Layout(Container):
def __init__(
self,
width: float,
height: float = None,
margins: Sequence[float] = None,
renderer: ContentRenderer = None,
):
super().__init__(width, height, margins, renderer)
self._reference_column_width = width
self._current_column = 0
self._columns: List[Column] = []
def __iter__(self):
return iter(self._columns)
def __len__(self):
return len(self._columns)
@property
def current_column_index(self):
return self._current_column
@property
def content_width(self):
width = self._content_width
if self._columns:
width = self._calculate_content_width()
return width
def _calculate_content_width(self) -> float:
width = sum(c.total_width + c.gutter for c in self._columns[:-1])
if self._columns:
width += self._columns[-1].total_width
return width
@property
def content_height(self):
height = self._content_height
if self._columns:
height = self._calculate_content_height()
elif height is None:
height = 0
return height
def _calculate_content_height(self) -> float:
return max(c.total_height for c in self._columns)
def place(
self,
x: float = 0,
y: float = 0,
align: LayoutAlignment = LayoutAlignment.TOP_LEFT,
):
"""Place layout and all sub-entities at the final location, relative
to the insertion point (x, y) by the alignment defined by the argument
`align` (IntEnum).
=== ================
int LayoutAlignment
=== ================
1 TOP_LEFT
2 TOP_CENTER
3 TOP_RIGHT
4 MIDDLE_LEFT
5 MIDDLE_CENTER
6 MIDDLE_RIGHT
7 BOTTOM_LEFT
8 BOTTOM_CENTER
9 BOTTOM_RIGHT
=== ================
It is possible to add content after calling :meth:`place`, but
:meth:`place` has to be called again before calling :meth:`render`.
It is recommended to place the layout at origin (0, 0) and use a
transformation matrix to move the layout to the final location in
the target DXF layout.
"""
width = self.total_width
height = self.total_height
left, top = insert_location(align, width, height)
super().place(x + left, y + top)
def place_content(self):
"""Place content at the final location."""
x, y = self.final_location()
x = x + self.left_margin
y = y - self.top_margin
for column in self:
column.place(x, y)
x += column.total_width + column.gutter
def append_column(
self,
width: float = None,
height: float = None,
gutter: float = 0,
margins: Sequence[float] = None,
renderer: ContentRenderer = None,
) -> Column:
"""Append a new column to the layout."""
if not width:
width = self._reference_column_width
column = Column(
width, height, gutter=gutter, margins=margins, renderer=renderer
)
self._columns.append(column)
return column
def append_paragraphs(self, paragraphs: Iterable[Paragraph]):
remainer = list(paragraphs)
# 1. fill existing columns:
columns = self._columns
while self._current_column < len(columns):
column = columns[self._current_column]
remainer = column.append_paragraphs(remainer)
if len(remainer) == 0:
return
self._current_column += 1
# 2. create additional columns
while remainer:
column = self._new_column()
self._current_column = len(self._columns) - 1
remainer = column.append_paragraphs(remainer)
if self._current_column > 100:
raise ValueError("Internal error - not enough space!?")
def _new_column(self) -> Column:
if len(self._columns) == 0:
raise ValueError("no column exist")
empty = self._columns[-1].clone_empty()
self._columns.append(empty)
return empty
def next_column(self) -> None:
self._current_column += 1
if self._current_column >= len(self._columns):
self._new_column()
def linear_placing(cells: Sequence[Cell], x: float, y: float):
for cell in cells:
cell.place(x, y)
x += cell.total_width
class RigidConnection(ContentCell):
def __init__(
self, cells: Iterable[Cell] = None, valign=CellAlignment.BOTTOM
):
super().__init__(0, 0, valign=valign)
self._cells: List[Cell] = list(cells) if cells else []
def __iter__(self):
return iter(self._cells)
@property
def total_width(self) -> float:
return sum(cell.total_width for cell in self._cells)
@property
def total_height(self) -> float:
return max(cell.total_height for cell in self._cells)
def render(self, m: Matrix44 = None) -> None:
render_cells(self._cells, m)
render_text_strokes(self._cells, m)
def place(self, x: float, y: float):
super().place(x, y)
linear_placing(self._cells, x, y)
def growable_glue(self) -> Iterable[Glue]:
return (
cell
for cell in self._cells
if isinstance(cell, Glue) and cell.can_grow
)
def group_non_breakable_cells(cells: List[Cell]) -> List[Cell]:
def append_rigid_content(s: int, e: int):
_rigid_content = cells[s:e]
if len(_rigid_content) > 1:
new_cells.append(RigidConnection(_rigid_content))
else:
new_cells.append(_rigid_content[0])
index = 0
count = len(cells)
new_cells = []
while index < count:
cell = cells[index]
if isinstance(cell, _no_break):
start = index
index += 1
while index < count:
if not isinstance(cells[index], _no_break):
append_rigid_content(start, index)
break
index += 1
if index == count:
append_rigid_content(start, index)
else:
continue
else:
new_cells.append(cell)
index += 1
return new_cells
def vertical_cell_shift(cell: Cell, group_height: float) -> float:
dy = 0.0
if isinstance(cell, ContentCell) and cell.valign != CellAlignment.TOP:
dy = cell.total_height - group_height
if cell.valign == CellAlignment.CENTER:
dy /= 2.0
return dy
class LineCell(NamedTuple):
cell: Cell
offset: float
locked: bool
class AppendType(enum.IntEnum):
FAIL = 0
SUCCESS = 1
FORCED = 2
class AbstractLine(ContentCell): # ABC
has_tab_support = False
def __init__(self, width: float):
super().__init__(width=width, height=0, valign=CellAlignment.BOTTOM)
self._cells: List[LineCell] = []
self._current_offset: float = 0.0
def __iter__(self):
return self.flatten()
@abc.abstractmethod
def append(self, cell: Cell) -> AppendType:
"""Append cell to the line content and report SUCCESS or FAIL."""
pass
@abc.abstractmethod
def append_with_tab(self, cell: Cell, tab: Tabulator) -> AppendType:
"""Append cell with preceding tabulator cell to the line content
and report SUCCESS or FAIL.
"""
pass
@property
def has_content(self):
return bool(self._cells)
def place(self, x: float, y: float):
super().place(x, y)
group_height = self.total_height
for line_cell in self._cells:
cell = line_cell.cell
cx = x + line_cell.offset
cy = y + vertical_cell_shift(cell, group_height)
cell.place(cx, cy)
@property
def line_width(self) -> float:
return self._width
@property
def total_width(self) -> float:
width: float = 0.0
if len(self._cells):
last_cell = self._cells[-1]
width = last_cell.offset + last_cell.cell.total_width
return width
@property
def total_height(self) -> float:
if len(self._cells):
return max(c.cell.total_height for c in self._cells)
return 0.0
def cells(self) -> Iterable[Cell]:
"""Yield line content including RigidConnections."""
return [c.cell for c in self._cells]
def flatten(self) -> Iterable[Cell]:
"""Yield line content with resolved RigidConnections."""
for cell in self.cells():
if isinstance(cell, RigidConnection):
yield from cell
else:
yield cell
def render(self, m: Matrix44 = None) -> None:
cells = list(self.cells())
render_cells(cells, m)
render_text_strokes(cells, m)
def remove_line_breaking_space(self):
"""Remove the last space in the line."""
_cells = self._cells
if _cells and isinstance(_cells[-1].cell, Space):
_cells.pop()
class LeftLine(AbstractLine):
has_tab_support = True
def __init__(self, width: float, tab_stops: Sequence[TabStop] = None):
super().__init__(width=width)
self._tab_stops = tab_stops or [] # tab stops relative to line start
def _append_line_cell(
self, cell: Cell, offset: float, locked: bool = False
) -> None:
self._cells.append(LineCell(cell, offset, locked))
def append(self, cell: Cell) -> AppendType:
width = cell.total_width
if self._current_offset + width <= self.line_width:
self._append_line_cell(cell, self._current_offset)
self._current_offset += width
return AppendType.SUCCESS
if len(self._cells) == 0:
# single cell is too wide for a line,
# forced rendering with oversize
self._append_line_cell(cell, 0)
return AppendType.FORCED
return AppendType.FAIL
def append_with_tab(self, cell: Cell, tab: Tabulator) -> AppendType:
width = cell.total_width
pos = self._current_offset
# does content fit into line:
if pos + width > self.line_width:
return AppendType.FAIL
# next possible tab stop location:
left_pos = pos
center_pos = pos + width / 2
right_pos = pos + width
tab_stop = self._next_tab_stop(left_pos, center_pos, right_pos)
if tab_stop is None: # no tab stop found
self.append(tab.to_space()) # replace tabulator by space
return self.append(cell)
else:
if tab_stop.kind == TabStopType.LEFT:
return self._append_left(cell, tab_stop.pos)
elif tab_stop.kind == TabStopType.CENTER:
return self._append_center(cell, tab_stop.pos)
else:
return self._append_right(cell, tab_stop.pos)
def _append_left(self, cell, pos) -> AppendType:
width = cell.total_width
if pos + width <= self.line_width:
self._append_line_cell(cell, pos, True)
self._current_offset = pos + width
return AppendType.SUCCESS
return AppendType.FAIL
def _append_center(self, cell, pos) -> AppendType:
width2 = cell.total_width / 2
if self._current_offset + width2 > pos:
return self.append(cell)
elif pos + width2 <= self.line_width:
self._append_line_cell(cell, pos - width2, True)
self._current_offset = pos + width2
return AppendType.SUCCESS
return AppendType.FAIL
def _append_right(self, cell, pos) -> AppendType:
width = cell.total_width
end_of_cell_pos = self._current_offset + width
if end_of_cell_pos > self.line_width:
return AppendType.FAIL
if end_of_cell_pos > pos:
return self.append(cell)
self._append_line_cell(cell, pos - width, True)
self._current_offset = pos
return AppendType.SUCCESS
def _next_tab_stop(self, left, center, right):
for tab in self._tab_stops:
if tab.kind == TabStopType.LEFT and tab.pos > left:
return tab
elif tab.kind == TabStopType.CENTER and tab.pos > center:
return tab
elif tab.kind == TabStopType.RIGHT and tab.pos > right:
return tab
return None
def content_width(cells: Iterable[Cell]) -> float:
return sum(cell.total_width for cell in cells)
def growable_cells(cells: Iterable[Cell]) -> List[Glue]:
growable = []
for cell in cells:
if isinstance(cell, Glue) and cell.can_grow:
growable.append(cell)
elif isinstance(cell, RigidConnection):
growable.extend(cell.growable_glue())
return growable
def update_offsets(cells: List[LineCell], index: int) -> None:
count = len(cells)
if count == 0 or index > count:
return
last_cell = cells[index - 1]
offset = last_cell.offset + last_cell.cell.total_width
while index < count:
cell = cells[index].cell
cells[index] = LineCell(cell, offset, False)
offset += cell.total_width
index += 1
class JustifiedLine(LeftLine):
def distribute(self):
cells = self._cells
last_locked_cell = self._last_locked_cell()
if last_locked_cell == len(cells):
return
available_space = self._available_space(last_locked_cell)
cells = [c.cell for c in cells[last_locked_cell + 1 :]]
modified = False
while True:
growable = growable_cells(cells)
if len(growable) == 0:
break
space_to_distribute = available_space - content_width(cells)
if space_to_distribute <= 1e-9:
break
delta = space_to_distribute / len(growable)
for cell in growable:
cell.resize(cell.total_width + delta)
modified = True
if modified:
update_offsets(self._cells, last_locked_cell + 1)
def _end_offset(self, index):
cell = self._cells[index]
return cell.offset + cell.cell.total_width
def _available_space(self, index):
return self.line_width - self._end_offset(index)
def _last_locked_cell(self):
cells = self._cells
index = len(cells) - 1
while index > 0:
if cells[index].locked:
return index
index -= 1
return 0
class NoTabLine(AbstractLine):
"""Base class for lines without tab stop support!"""
has_tab_support = False
def append(self, cell: Cell) -> AppendType:
if isinstance(cell, Tabulator):
cell = cell.to_space()
width = cell.total_width
if self._current_offset + width < self.line_width:
self._cells.append(LineCell(cell, self._current_offset, False))
self._current_offset += width
return AppendType.SUCCESS
if len(self._cells) == 0:
# single cell is too wide for a line,
# forced rendering with oversize
self._cells.append(LineCell(cell, 0, False))
return AppendType.FORCED
return AppendType.FAIL
def append_with_tab(self, cell: Cell, tab: Tabulator) -> AppendType:
"""No tabulator support!"""
raise NotImplementedError()
def place(self, x: float, y: float):
# shift the line cell:
super().place(x + self.start_offset(), y)
@abc.abstractmethod
def start_offset(self) -> float:
pass
class CenterLine(NoTabLine):
"""Right aligned lines do not support tab stops!"""
def start_offset(self) -> float:
real_width = sum(c.cell.total_width for c in self._cells)
return (self.line_width - real_width) / 2
class RightLine(NoTabLine):
"""Right aligned lines do not support tab stops!"""
def start_offset(self) -> float:
real_width = sum(c.cell.total_width for c in self._cells)
return self.line_width - real_width
def shift_tab_stops(
tab_stops: Iterable[TabStop], offset: float, right_border: float
) -> List[TabStop]:
return [
tab_stop
for tab_stop in (TabStop(pos + offset, kind) for pos, kind in tab_stops)
if 0 < tab_stop.pos <= right_border
]
```
#### File: tests/test_00_dxf_low_level_structs/test_020_validators_and_fixer.py
```python
import pytest
from ezdxf.lldxf.validator import (
is_in_integer_range,
is_valid_aci_color,
is_valid_layer_name,
is_valid_lineweight,
is_not_null_vector,
is_positive,
fix_lineweight,
is_integer_bool,
is_valid_one_line_text,
fix_one_line_text,
is_not_zero,
is_not_negative,
is_one_of,
is_in_float_range,
fit_into_float_range,
fix_integer_bool,
fit_into_integer_range,
is_valid_bitmask,
fix_bitmask,
is_greater_or_equal_zero,
is_handle,
is_transparency,
is_valid_rgb,
)
from ezdxf.entities.layer import is_valid_layer_color_index, fix_layer_color
def test_invalid_layer_name():
assert is_valid_layer_name("Layer Layer") is True
assert is_valid_layer_name("Layer/") is False
assert is_valid_layer_name("Layer*") is False
assert is_valid_layer_name("*Layer") is False
assert is_valid_layer_name("Layer=") is False
assert is_valid_layer_name("Layer;") is False
assert is_valid_layer_name("Layer:") is False
assert is_valid_layer_name("Layer<") is False
assert is_valid_layer_name("Layer>") is False
assert is_valid_layer_name("Layer`") is False
assert is_valid_layer_name("\\Layer`") is False
assert is_valid_layer_name('"Layer"') is False
def test_strange_but_valid_layer_name():
assert is_valid_layer_name("Layer|Layer") is True
def test_is_adsk_special_layer():
assert is_valid_layer_name("*adsk_xyz") is True
assert is_valid_layer_name("*ADSK_xyz") is True
assert is_valid_layer_name("ADSK_xyz*") is False
def test_is_valid_lineweight():
assert is_valid_lineweight(0) is True
assert is_valid_lineweight(50) is True
assert is_valid_lineweight(211) is True
assert is_valid_lineweight(-4) is False, "is too small"
assert is_valid_lineweight(212) is False, "is too big"
assert is_valid_lineweight(10) is False
def test_lineweight_fixer():
assert fix_lineweight(-4) == -1, "too small, fix as BYLAYER"
assert fix_lineweight(212) == 211, "too big, fix as biggest lineweight"
assert fix_lineweight(10) == 13, "invalid, fix as next valid lineweight"
def test_is_valid_aci_color():
assert is_valid_aci_color(-1) is False
assert is_valid_aci_color(0) is True
assert is_valid_aci_color(257) is True
assert is_valid_aci_color(258) is False
def test_is_in_integer_range():
validator = is_in_integer_range(1, 10)
assert validator(0) is False
assert validator(1) is True
assert validator(9) is True
assert validator(10) is False, "exclude end value"
def test_fit_into_integer_range():
fixer = fit_into_integer_range(0, 6)
assert fixer(-1) == 0
assert fixer(0) == 0
assert fixer(5) == 5
assert fixer(6) == 5, "exclude end value"
def test_is_in_float_range():
validator = is_in_float_range(1, 10)
assert validator(0) is False
assert validator(1) is True
assert validator(9) is True
assert validator(10) is True, "include end value"
def test_fit_into_float_range():
fixer = fit_into_float_range(0.25, 4.00)
assert fixer(0.24) == 0.25
assert fixer(0.25) == 0.25
assert fixer(0.50) == 0.50
assert fixer(4.00) == 4.00, "include end value"
assert fixer(4.01) == 4.00
def test_is_not_null_vector():
assert is_not_null_vector((0, 0, 1)) is True
assert is_not_null_vector((0, 1, 0)) is True
assert is_not_null_vector((1, 0, 0)) is True
assert is_not_null_vector((0, 0, 0)) is False
def test_is_positive_value():
assert is_positive(1) is True
assert is_positive(0.5) is True
assert is_positive(0) is False
assert is_positive(0.0) is False
assert is_positive(-1) is False
def test_is_integer_bool():
assert is_integer_bool(0) is True
assert is_integer_bool(1) is True
assert is_integer_bool(2) is False
assert is_integer_bool(-1) is False
def test_fix_integer_bool():
assert fix_integer_bool(0) == 0
assert fix_integer_bool(1) == 1
assert fix_integer_bool(None) == 0
assert fix_integer_bool("") == 0
assert fix_integer_bool("A") == 1
assert fix_integer_bool(2) == 1
assert fix_integer_bool(-1) == 1
@pytest.mark.parametrize(
"invalid_text",
[
"test\ntext\r",
"test\r\ntext",
"testtext^",
"test\ntext^",
"test\ntext^\r",
],
)
def test_is_valid_one_line_text(invalid_text):
assert is_valid_one_line_text(invalid_text) is False
@pytest.mark.parametrize(
"invalid_text",
[
"test\ntext\r",
"test\r\ntext",
"testtext^",
"test\ntext^",
"test\ntext^\r",
],
)
def test_fix_invalid_one_line_text(invalid_text):
assert fix_one_line_text(invalid_text) == "testtext"
def test_is_not_negative():
assert is_not_negative(-1) is False
assert is_not_negative(-1e-9) is False
assert is_not_negative(0) is True
assert is_not_negative(1e-9) is True
assert is_not_negative(1) is True
def test_is_not_zero():
assert is_not_zero(-1) is True
assert is_not_zero(-1e-9) is True
assert is_not_zero(1e-9) is True
assert is_not_zero(1) is True
assert is_not_zero(0) is False
assert is_not_zero(0.0) is False
assert is_not_zero(1e-12) is False
assert is_not_zero(-1e-12) is False
def test_is_one_of():
_validator = is_one_of({1, 3, 5})
assert _validator(0) is False
assert _validator(2) is False
assert _validator(4) is False
assert _validator(1) is True
assert _validator(3) is True
assert _validator(5) is True
def test_is_greater_or_equal_zero():
assert is_greater_or_equal_zero(-1) is False
assert is_greater_or_equal_zero(0) is True
assert is_greater_or_equal_zero(1) is True
def test_is_valid_bitmask():
validator = is_valid_bitmask(3)
assert validator(0) is True
assert validator(1) is True
assert validator(2) is True
assert validator(3) is True
assert validator(4) is False
def test_fix_bitmask():
fixer = fix_bitmask(3)
assert fixer(0) == 0
assert fixer(1) == 1
assert fixer(2) == 2
assert fixer(3) == 3
assert fixer(4) == 0
assert fixer(5) == 1
@pytest.mark.parametrize("aci", [255, -7, -1, 1, 7, 255])
def test_is_valid_layer_color(aci):
assert is_valid_layer_color_index(aci) is True
assert fix_layer_color(aci) == aci
@pytest.mark.parametrize("aci", [256, 0, 256])
def test_is_not_valid_layer_color(aci):
assert is_valid_layer_color_index(aci) is False
assert fix_layer_color(aci) == 7
@pytest.mark.parametrize("handle", ["0", "100", "FEFE"])
def test_is_a_handle(handle):
assert is_handle(handle) is True
@pytest.mark.parametrize("handle", [None, 0, 0x200000, "xyz"])
def test_is_not_a_handle(handle):
assert is_handle(handle) is False
@pytest.mark.parametrize(
"t",
[
0x02000000, # 100% transparent
0x0200007F, # 50% transparent
0x020000FF, # opaque
0x01000000, # ByBlock
],
)
def test_is_transparency(t):
assert is_transparency(t) is True
@pytest.mark.parametrize("t", [None, 0, 127, 255, 0x01000001])
def test_is_not_transparency(t):
assert is_transparency(t) is False
@pytest.mark.parametrize("rgb", [
(0, 0, 0),
[0, 0, 0],
(255, 255, 255),
[255, 255, 255],
])
def test_is_valid_rgb(rgb):
assert is_valid_rgb(rgb) is True
@pytest.mark.parametrize("rgb", [
None,
1,
1.0,
"1",
(0,),
(0, 0),
(0, 0, 0, 0),
(-1, 0, 0),
(256, 0, 0),
("0", 0, 0),
(0.0, 0, 0), # no floats!
])
def test_is_not_valid_rgb(rgb):
assert is_valid_rgb(rgb) is False
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_00_dxf_low_level_structs/test_021_fix_line_coordinates.py
```python
import pytest
from ezdxf.lldxf.tagger import ascii_tags_loader
from ezdxf.lldxf.repair import fix_coordinate_order, tag_reorder_layer
from io import StringIO
def string_reorder_tagger(s):
return tag_reorder_layer(ascii_tags_loader(StringIO(s)))
def test_low_level_tagger():
tags = list(ascii_tags_loader(StringIO(TEST_LINE1)))
assert len(tags) == 14
def test_fix_line_coordinate_order():
tags = list(ascii_tags_loader(StringIO(TEST_LINE1)))
ordered_tags = list(fix_coordinate_order(tags, codes=(10, 11)))
assert ordered_tags[0] == (0, "LINE")
assert ordered_tags[-6] == (10, "1000.")
assert ordered_tags[-5] == (20, "2000.")
assert ordered_tags[-4] == (11, "1100.")
assert ordered_tags[-3] == (21, "2100.")
assert ordered_tags[-2] == (1000, "ExtData")
assert ordered_tags[-1] == (0, "EOF")
def test_fix_2d_coordinates():
ordered_tags = list(string_reorder_tagger(TEST_LINE1))
assert ordered_tags[0] == (0, "LINE")
assert ordered_tags[-6] == (10, "1000.")
assert ordered_tags[-5] == (20, "2000.")
assert ordered_tags[-4] == (11, "1100.")
assert ordered_tags[-3] == (21, "2100.")
assert ordered_tags[-2] == (1000, "ExtData")
assert ordered_tags[-1] == (0, "EOF")
def test_dont_fix_invalid_coordinates():
# do not change invalid (missing) coordinates
ordered_tags = list(string_reorder_tagger(TEST_INVALID_LINE))
assert ordered_tags[0] == (0, "LINE")
assert ordered_tags[-5] == (10, "1000.")
assert ordered_tags[-4] == (11, "1100.")
assert ordered_tags[-3] == (21, "2100.")
assert ordered_tags[-2] == (1000, "ExtData")
assert ordered_tags[-1] == (0, "EOF")
def test_fix_3d_coordinates():
ordered_tags = list(string_reorder_tagger(TEST_3D_LINE))
assert ordered_tags[0] == (0, "LINE")
assert ordered_tags[-8] == (10, "1000.")
assert ordered_tags[-7] == (20, "2000.")
assert ordered_tags[-6] == (30, "3000.")
assert ordered_tags[-5] == (11, "1100.")
assert ordered_tags[-4] == (21, "2100.")
assert ordered_tags[-3] == (31, "3100.")
assert ordered_tags[-2] == (1000, "ExtData")
assert ordered_tags[-1] == (0, "EOF")
def test_fix_two_lines_coordinate_order():
ordered_tags = list(string_reorder_tagger(TEST_TWO_LINES))
assert len(ordered_tags) == 27
assert ordered_tags[0] == (0, "LINE")
assert ordered_tags[-6] == (10, "1000.")
assert ordered_tags[-5] == (20, "2000.")
assert ordered_tags[-4] == (11, "1100.")
assert ordered_tags[-3] == (21, "2100.")
assert ordered_tags[-2] == (1000, "ExtData")
assert ordered_tags[-1] == (0, "EOF")
TEST_LINE1 = """ 0
LINE
5
45
100
AcDbEntity
8
4
6
BYLAYER
62
256
370
-1
100
AcDbLine
10
1000.
11
1100.
20
2000.
21
2100.
1000
ExtData
0
EOF
"""
TEST_INVALID_LINE = """ 0
LINE
5
45
100
AcDbEntity
8
4
6
BYLAYER
62
256
370
-1
100
AcDbLine
10
1000.
11
1100.
21
2100.
1000
ExtData
0
EOF
"""
TEST_3D_LINE = """ 0
LINE
5
45
100
AcDbEntity
8
4
6
BYLAYER
62
256
370
-1
100
AcDbLine
30
3000.
31
3100.
10
1000.
11
1100.
20
2000.
21
2100.
1000
ExtData
0
EOF
"""
TEST_TWO_LINES = """ 0
LINE
5
45
100
AcDbEntity
8
4
6
BYLAYER
62
256
370
-1
100
AcDbLine
10
1000.
11
1100.
20
2000.
21
2100.
1000
ExtData
0
LINE
5
45
100
AcDbEntity
8
4
6
BYLAYER
62
256
370
-1
100
AcDbLine
10
1000.
11
1100.
20
2000.
21
2100.
1000
ExtData
0
EOF
"""
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_00_dxf_low_level_structs/test_022_set_flag_state.py
```python
from ezdxf.tools import set_flag_state
def test_set_flag_state():
assert set_flag_state(0, 1, True) == 1
assert set_flag_state(0b10, 1, True) == 0b11
assert set_flag_state(0b111, 0b010, False) == 0b101
assert set_flag_state(0b010, 0b111, True) == 0b111
assert set_flag_state(0b1111, 0b1001, False) == 0b0110
```
#### File: tests/test_00_dxf_low_level_structs/test_056_decode_dxf_unicode.py
```python
import pytest
import ezdxf
def test_has_dxf_unicode_encoding():
assert ezdxf.has_dxf_unicode(r"\U+039B") is True
assert ezdxf.has_dxf_unicode(r"\\U+039B") is True
assert ezdxf.has_dxf_unicode(r"\U+039") is False
assert ezdxf.has_dxf_unicode(r"\U+") is False
assert ezdxf.has_dxf_unicode("ABC") is False
assert ezdxf.has_dxf_unicode("") is False
def test_successive_chars():
result = ezdxf.decode_dxf_unicode(
r"abc\U+039B\U+0391\U+0393\U+0395\U+03A1xyz"
)
assert result == r"abcΛΑΓΕΡxyz"
def test_extra_backslash():
result = ezdxf.decode_dxf_unicode(
r"abc\U+039B\\U+0391\\U+0393\\U+0395\\U+03A1xyz"
)
assert result == r"abcΛ\Α\Γ\Ε\Ρxyz"
def test_extra_digits():
result = ezdxf.decode_dxf_unicode(
r"abc\U+039B0\U+03911\U+03932\U+03953\U+03A1xyz"
)
assert result == r"abcΛ0Α1Γ2Ε3Ρxyz"
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_01_dxf_entities/test_111_unknown_dxf_entity.py
```python
import pytest
from ezdxf.entities import is_graphic_entity, is_dxf_object
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
from ezdxf.entities.dxfentity import DXFTagStorage
from ezdxf.protocols import SupportsVirtualEntities, query_virtual_entities
@pytest.fixture
def entity():
return DXFTagStorage.from_text(THE_KNOWN_UNKNOWN)
def test_default_attribs(entity):
assert entity.dxftype() == "MTEXT"
assert entity.dxf.handle == "278"
assert entity.dxf.owner == "1F"
assert entity.base_class[0] == (0, "MTEXT")
assert entity.base_class[1] == (5, "278")
def test_wrapped_mtext_is_a_graphic_entity(entity):
assert entity.is_graphic_entity is True
assert is_graphic_entity(entity) is True
def test_wrapped_mtext_is_not_a_dxf_object(entity):
assert is_dxf_object(entity) is False
def test_dxf_tag_storage_is_a_non_graphical_entity_by_default():
assert DXFTagStorage().is_graphic_entity is False
assert is_graphic_entity(DXFTagStorage()) is False
def test_dxf_export(entity):
control_tags = basic_tags_from_text(THE_KNOWN_UNKNOWN)
collector = TagCollector()
entity.export_dxf(collector)
result = collector.tags
assert result == control_tags
def test_virtual_entities(entity):
assert len(list(entity.virtual_entities())) == 0
def test_supports_virtual_entities_protocol(entity):
assert isinstance(entity, SupportsVirtualEntities) is True
assert len(query_virtual_entities(entity)) == 0
THE_KNOWN_UNKNOWN = r"""0
MTEXT
5
278
330
1F
100
AcDbEntity
8
0
100
AcDbMText
10
2762.147
20
2327.073
30
0.0
40
2.5
41
18.851
46
0.0
71
1
72
5
1
{\fArial|b0|i0|c162|p34;CHANGE;\P\P\PTEXT}
73
1
44
1.0
101
Embedded Object
70
1
10
1.0
20
0.0
30
0.0
11
2762.147
21
2327.073
31
0.0
40
18.851
41
0.0
42
15.428
43
15.042
71
2
72
1
44
18.851
45
12.5
73
0
74
0
46
0.0
"""
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_01_dxf_entities/test_139_user_record.py
```python
import pytest
from ezdxf.lldxf import const
from ezdxf.math import Vec3
from ezdxf.lldxf.tags import Tags
# noinspection PyProtectedMember
from ezdxf.urecord import (
parse_items,
compile_user_record,
parse_binary_data,
UserRecord,
BinaryRecord,
)
class TestFlatRecord:
@pytest.fixture
def tags(self):
return Tags.from_text(FLAT_RECORD)
def test_parse_all(self, tags):
data = parse_items(tags)
assert len(data) == 4
def test_parse_str(self, tags):
data = parse_items(tags)[0]
assert data == "MyString"
assert type(data) == str
def test_parse_float(self, tags):
data = parse_items(tags)[1]
assert data == 3.1415
assert type(data) is float
def test_parse_int(self, tags):
data = parse_items(tags)[2]
assert data == 255
assert type(data) is int
def test_parse_vec3(self, tags):
data = parse_items(tags)[3]
assert data == (7, 8, 9)
assert type(data) is Vec3
def test_top_level_list():
tags = Tags.from_text(LIST1)
data = parse_items(tags)
assert data == ["MyString", ["ListItem1"], "Tail"]
def test_nested_list_inside_list():
tags = Tags.from_text(LIST2)
data = parse_items(tags)
assert data == ["MyString", ["ListItem1", ["ListItem2"]], "Tail"]
def test_top_level_dict():
tags = Tags.from_text(DICT1)
data = parse_items(tags)
assert data == ["MyString", {"Key1": "Value1"}, "Tail"]
def test_nested_dict_as_dict_value():
tags = Tags.from_text(DICT2)
data = parse_items(tags)
assert data == ["MyString", {"Key1": {"Key2": "Value2"}}, "Tail"]
def test_nested_list_as_dict_value():
tags = Tags.from_text(DICT_LIST)
data = parse_items(tags)
assert data == ["MyString", {"Key": ["ListItem"]}, "Tail"]
def test_nested_dict_inside_list():
tags = Tags.from_text(LIST_DICT)
data = parse_items(tags)
assert data == [
"MyString",
["ListItem1", {"Key": "Value"}, "ListItem2"],
"Tail",
]
def test_missing_open_tag_raises_dxf_structure_error():
tags = Tags.from_text("1\nListItem1\n302\n]")
with pytest.raises(const.DXFStructureError):
parse_items(tags)
def test_missing_close_tag_raises_dxf_structure_error():
tags = Tags.from_text("302\n[\n1\nListItem1")
with pytest.raises(const.DXFStructureError):
parse_items(tags)
def test_invalid_group_code_raises_value_error():
tags = Tags.from_text("5\ninvalid\n")
with pytest.raises(const.DXFValueError):
parse_items(tags)
@pytest.mark.parametrize("char", ["\n", "\r"])
def test_invalid_line_break_characters_raise_exception(char):
with pytest.raises(const.DXFValueError):
compile_user_record("TEST", [f"{char}"])
def test_too_long_string_raise_exception():
# max. str length is 2049 - DXF R2000 limit for group codes 0-9
with pytest.raises(const.DXFValueError):
compile_user_record("TEST", ["0123456789" * 205])
class TestCompileData:
def test_compile_empty_data(self):
tags = compile_user_record("TEST", [])
assert tags[0] == (2, "TEST")
assert len(tags) == 1
@pytest.mark.parametrize(
"value",
[
"MyString",
257,
3.1415,
Vec3(5, 6, 7),
],
ids=["str", "int", "float", "Vec3"],
)
def test_compile_simple_types(self, value):
tags = compile_user_record("TEST", [value])
assert parse_items(tags[1:]) == [value]
@pytest.mark.parametrize(
"struct",
[
[1, 2, 3], # simple flat list
[{"key": "value"}], # top level structure has to be a list!
["head", [1, 2, 3], "tail"], # nested list
["head", {"key": "value"}, "tail"], # nested dict
["head", {"key": [1, 2, 3]}, "tail"], # list as dict value
["head", [1, 2, {"key": "value"}], "tail"], # dict inside a list
],
ids=[
"flat list",
"flat dict",
"nested list",
"nested dict",
"list as dict value",
"dict inside a list",
],
)
def test_compile_complex_structures(self, struct):
tags = compile_user_record("TEST", struct)
assert parse_items(tags[1:]) == struct
class TestUserRecord:
def test_required_final_commit_to_store_data_in_xrecord(self):
user_record = UserRecord(name="MyRecord")
user_record.data.extend(["str", 1, 3.1415])
assert len(user_record.xrecord.tags) == 0
# calling commit() stores the data in the xrecord
user_record.commit()
assert user_record.xrecord.tags == [
(2, "MyRecord"),
(1, "str"),
(90, 1),
(40, 3.1415),
]
def test_works_as_context_manager(self):
with UserRecord(name="MyRecord") as user_record:
user_record.data.extend(["str", 1, 3.1415])
# calls commit() at exit
assert user_record.xrecord.tags == [
(2, "MyRecord"),
(1, "str"),
(90, 1),
(40, 3.1415),
]
def test_str(self):
with UserRecord(name="MyRecord") as user_record:
user_record.data.extend(["str", 1, 3.1415])
assert str(user_record) == "['str', 1, 3.1415]"
def test_parse_binary_data():
assert (
parse_binary_data(Tags.from_text(BINARY_DATA))
== b"0123456789\xab\xba" * 2
)
class TestBinaryRecord:
def test_required_final_commit_to_store_data_in_xrecord(self):
user_record = BinaryRecord()
user_record.data = b"\xfe\xfe"
assert len(user_record.xrecord.tags) == 0
# calling commit() stores the data in the xrecord
user_record.commit()
assert user_record.xrecord.tags == [(160, 2), (310, b"\xfe\xfe")]
def test_works_as_context_manager(self):
with BinaryRecord() as user_record:
user_record.data = b"\xfe\xfe"
# calls commit() at exit
assert user_record.xrecord.tags == [(160, 2), (310, b"\xfe\xfe")]
def test_stores_line_endings(self):
with BinaryRecord() as user_record:
user_record.data = b"\r\n"
assert user_record.xrecord.tags == [(160, 2), (310, b"\r\n")]
def test_str(self):
record = BinaryRecord()
record.data = b"\xfe\xfe"
assert str(record) == "FEFE"
FLAT_RECORD = """1
MyString
40
3.1415
90
255
10
7
20
8
30
9
"""
LIST1 = """1
MyString
302
[
1
ListItem1
302
]
1
Tail
"""
LIST2 = """1
MyString
302
[
1
ListItem1
302
[
1
ListItem2
302
]
302
]
1
Tail
"""
DICT1 = """1
MyString
302
{
1
Key1
1
Value1
302
}
1
Tail
"""
DICT2 = """1
MyString
302
{
1
Key1
302
{
1
Key2
1
Value2
302
}
302
}
1
Tail
"""
DICT_LIST = """1
MyString
302
{
1
Key
302
[
1
ListItem
302
]
302
}
1
Tail
"""
LIST_DICT = """1
MyString
302
[
1
ListItem1
302
{
1
Key
1
Value
302
}
1
ListItem2
302
]
1
Tail
"""
BINARY_DATA = """160
24
310
30313233343536373839ABBA
310
30313233343536373839ABBA
"""
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_02_dxf_graphics/test_206_text.py
```python
import pytest
import math
import ezdxf
from ezdxf.entities.text import Text
from ezdxf.enums import TextEntityAlignment
from ezdxf.lldxf.const import DXF12, DXF2000
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
from ezdxf.math import Vec3, Matrix44
TEST_CLASS = Text
TEST_TYPE = "TEXT"
ENTITY_R12 = """0
TEXT
5
0
8
0
10
0.0
20
0.0
30
0.0
40
1.0
1
TEXTCONTENT
50
0.0
51
0.0
7
Standard
41
1.0
71
0
72
0
11
0.0
21
0.0
31
0.0
73
0
"""
ENTITY_R2000 = """0
TEXT
5
0
330
0
100
AcDbEntity
8
0
100
AcDbText
10
0.0
20
0.0
30
0.0
40
1.0
1
TEXTCONTENT
50
0.0
51
0.0
7
Standard
41
1.0
71
0
72
0
11
0.0
21
0.0
31
0.0
100
AcDbText
73
0
"""
@pytest.fixture(scope="module")
def doc():
return ezdxf.new()
@pytest.fixture(params=[ENTITY_R12, ENTITY_R2000])
def entity(request):
return TEST_CLASS.from_text(request.param)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert TEST_TYPE in ENTITY_CLASSES
def test_default_init():
entity = TEST_CLASS()
assert entity.dxftype() == TEST_TYPE
def test_default_new():
entity = TEST_CLASS.new(
handle="ABBA",
owner="0",
dxfattribs={
"color": "7",
"insert": (1, 2, 3),
},
)
assert entity.dxf.layer == "0"
assert entity.dxf.color == 7
assert entity.dxf.linetype == "BYLAYER"
assert entity.dxf.insert == (1, 2, 3)
assert entity.dxf.insert.x == 1, "is not Vec3 compatible"
assert entity.dxf.insert.y == 2, "is not Vec3 compatible"
assert entity.dxf.insert.z == 3, "is not Vec3 compatible"
# can set DXF R2007 value
entity.dxf.shadow_mode = 1
assert entity.dxf.shadow_mode == 1
assert entity.dxf.extrusion == (0.0, 0.0, 1.0)
assert entity.dxf.hasattr("extrusion") is False, "just the default value"
def test_load_from_text(entity):
assert entity.dxf.layer == "0"
assert entity.dxf.color == 256, "default color is 256 (by layer)"
assert entity.dxf.insert == (0, 0, 0)
@pytest.mark.parametrize(
"txt,ver",
[
(ENTITY_R2000, DXF2000),
(ENTITY_R12, DXF12),
],
)
def test_write_dxf(txt, ver):
expected = basic_tags_from_text(txt)
attdef = TEST_CLASS.from_text(txt)
collector = TagCollector(dxfversion=ver, optional=True)
attdef.export_dxf(collector)
assert collector.tags == expected
collector2 = TagCollector(dxfversion=ver, optional=False)
attdef.export_dxf(collector2)
assert collector.has_all_tags(collector2)
@pytest.mark.parametrize(
"invalid_text",
[
"test\ntext\r",
"test\r\ntext",
"testtext^",
"test\ntext^",
"test\ntext^\r",
],
)
def test_removing_invalid_chars_at_setting_content(invalid_text):
txt = Text()
txt.dxf.text = invalid_text
assert txt.dxf.text == "testtext"
@pytest.fixture
def text():
return Text.new(handle="ABBA", owner="0")
def test_set_alignment(text):
text.set_placement((2, 2), align=TextEntityAlignment.TOP_CENTER)
assert text.dxf.halign == 1
assert text.dxf.valign == 3
assert text.dxf.align_point == (2, 2)
def test_set_fit_alignment(text):
text.set_placement((2, 2), (4, 2), align=TextEntityAlignment.FIT)
assert text.dxf.halign == 5
assert text.dxf.valign == 0
assert text.dxf.insert == (2, 2)
assert text.dxf.align_point == (4, 2)
def test_reset_fit_alignment(text):
text.set_placement((2, 2), (4, 2), align=TextEntityAlignment.FIT)
text.set_placement((3, 3), (5, 3))
assert text.dxf.halign == 5
assert text.dxf.valign == 0
assert text.dxf.insert == (3, 3)
assert text.dxf.align_point == (5, 3)
def test_resetting_location_raises_value_error_for_missing_point(text):
text.set_placement((2, 2), (4, 2), align=TextEntityAlignment.FIT)
with pytest.raises(ValueError):
text.set_placement((3, 3))
def test_get_align_enum(text):
text.dxf.halign = 1
text.dxf.valign = 3
assert text.get_align_enum() == TextEntityAlignment.TOP_CENTER
def test_get_pos_enum_TOP_CENTER(text):
text.set_placement((2, 2), align=TextEntityAlignment.TOP_CENTER)
align, p1, p2 = text.get_placement()
assert align == TextEntityAlignment.TOP_CENTER
assert p1 == (2, 2)
assert p2 is None
def test_get_pos_LEFT(text):
text.set_placement((2, 2))
align, p1, p2 = text.get_placement()
assert align == TextEntityAlignment.LEFT
assert p1 == (2, 2)
assert p2 is None
def test_transform_interface():
text = Text()
text.dxf.insert = (1, 0, 0)
text.transform(Matrix44.translate(1, 2, 3))
assert text.dxf.insert == (2, 2, 3)
# optimized translate
text.dxf.align_point = (3, 2, 1)
text.translate(1, 2, 3)
assert text.dxf.insert == (3, 4, 6)
assert text.dxf.align_point == (4, 4, 4)
def test_fit_length(text):
text.set_placement((2, 2), (4, 2), align=TextEntityAlignment.FIT)
assert text.fit_length() == 2
# remove align point
del text.dxf.align_point
assert text.fit_length() == 0
def test_default_font_name(text):
assert text.font_name() == "arial.ttf"
@pytest.fixture
def text2():
return Text.new(
dxfattribs={
"text": "TEXT",
"height": 1.0,
"width": 1.0,
"rotation": 0,
"layer": "text",
}
).set_placement((0, 0, 0), align=TextEntityAlignment.LEFT)
@pytest.mark.parametrize("rx, ry", [(1, 1), (-1, 1), (-1, -1), (1, -1)])
def test_scale_and_reflexion(rx, ry, text2):
insert = Vec3(0, 0, 0)
m = Matrix44.chain(
Matrix44.scale(2 * rx, 3 * ry, 1),
Matrix44.z_rotate(math.radians(45)),
Matrix44.translate(3 * rx, 3 * ry, 0),
)
text2.transform(m)
check_point = m.transform(insert)
ocs = text2.ocs()
assert ocs.to_wcs(text2.dxf.insert).isclose(check_point)
assert math.isclose(text2.dxf.height, 3.0)
assert math.isclose(text2.dxf.width, 2.0 / 3.0)
def test_non_uniform_scaling(text2):
text2.rotate_z(math.radians(30))
text2.scale(1, 2, 1)
assert math.isclose(text2.dxf.oblique, 33.004491598883064)
def test_is_backward(text):
assert text.is_backward is False
def test_set_backward(text):
text.is_backward = True
assert text.is_backward is True
def test_is_upside_down(text):
assert text.is_upside_down is False
def test_set_is_upside_down(text):
text.is_upside_down = True
assert text.is_upside_down is True
def test_get_pos_handles_missing_align_point():
"""Any text alignment except LEFT requires and uses the align_point
attribute as text location point. But there are real world example from
AutoCAD which do not provide the align_point even it is required.
In this case the get_pos() method returns the insert attribute.
"""
text = Text()
text.dxf.halign = 1 # center
text.dxf.valign = 1 # bottom
text.dxf.insert = (1, 2)
text.dxf.align_point = (3, 4) # the real alignment point
# the expected and correct align point:
alignment, p1, p2 = text.get_placement()
assert p1 == (3, 4)
assert p2 is None # only used for FIT and ALIGNED
# remove the align point
del text.dxf.align_point
alignment, p1, p2 = text.get_placement()
assert p1 == (1, 2) # use the insert point instead
assert p2 is None # only used for FIT and ALIGNED
```
#### File: tests/test_02_dxf_graphics/test_207_attdef.py
```python
import pytest
from ezdxf.entities.attrib import AttDef
from ezdxf.lldxf import const
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
TEST_CLASS = AttDef
TEST_TYPE = "ATTDEF"
ENTITY_R12 = """0
ATTDEF
5
0
8
0
10
0.0
20
0.0
30
0.0
40
1.0
1
DEFAULTTEXT
50
0.0
51
0.0
7
STANDARD
41
1.0
71
0
72
0
11
0.0
21
0.0
31
0.0
3
PROMPTTEXT
2
TAG
70
0
74
0
"""
ENTITY_R2000 = """0
ATTDEF
5
0
330
0
100
AcDbEntity
8
0
100
AcDbText
10
0.0
20
0.0
30
0.0
40
1.0
1
DEFAULTTEXT
50
0.0
51
0.0
7
STANDARD
41
1.0
71
0
72
0
11
0.0
21
0.0
31
0.0
100
AcDbAttributeDefinition
3
PROMPTTEXT
2
TAG
70
0
73
0
74
0
"""
@pytest.fixture(params=[ENTITY_R12, ENTITY_R2000])
def entity(request):
return TEST_CLASS.from_text(request.param)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert TEST_TYPE in ENTITY_CLASSES
def test_default_init():
entity = TEST_CLASS()
assert entity.dxftype() == TEST_TYPE
def test_default_new():
entity = TEST_CLASS.new(
handle="ABBA",
owner="0",
dxfattribs={
"color": "7",
"insert": (1, 2, 3),
},
)
assert entity.dxf.layer == "0"
assert entity.dxf.color == 7
assert entity.dxf.linetype == "BYLAYER"
assert entity.dxf.insert == (1, 2, 3)
assert entity.dxf.insert.x == 1, "is not Vec3 compatible"
assert entity.dxf.insert.y == 2, "is not Vec3 compatible"
assert entity.dxf.insert.z == 3, "is not Vec3 compatible"
# can set DXF R2007 value
entity.dxf.shadow_mode = 1
assert entity.dxf.shadow_mode == 1
assert entity.dxf.extrusion == (0.0, 0.0, 1.0)
assert entity.dxf.hasattr("extrusion") is False, "just the default value"
def test_load_from_text(entity):
assert entity.dxf.layer == "0"
assert entity.dxf.color == 256, "default color is 256 (by layer)"
assert entity.dxf.insert == (0, 0, 0)
@pytest.mark.parametrize(
"txt,ver", [(ENTITY_R2000, const.DXF2000), (ENTITY_R12, const.DXF12)]
)
def test_write_dxf(txt, ver):
expected = basic_tags_from_text(txt)
attdef = TEST_CLASS.from_text(txt)
collector = TagCollector(dxfversion=ver, optional=True)
attdef.export_dxf(collector)
assert collector.tags == expected
collector2 = TagCollector(dxfversion=ver, optional=False)
attdef.export_dxf(collector2)
assert collector.has_all_tags(collector2)
class TestEmbeddedMTextSupport:
@pytest.fixture
def attdef(self) -> AttDef:
return AttDef.from_text(EMBEDDED_MTEXT)
def test_has_embedded_mtext(self, attdef):
assert attdef.has_embedded_mtext_entity is True
def test_get_plain_mtext(self, attdef):
assert attdef.plain_mtext() == "TEST VENUE\nTEST FLOOR PLAN"
def test_get_virtual_mtext_entity(self, attdef):
mtext = attdef.virtual_mtext_entity()
assert mtext.plain_text() == "TEST VENUE\nTEST FLOOR PLAN"
def test_attdef_graphic_attributes(self, attdef):
assert attdef.dxf.color == 7
assert attdef.dxf.layer == "AttribLayer"
def test_mtext_graphic_attdefutes_inherited_from_host(self, attdef):
mtext = attdef.virtual_mtext_entity()
assert mtext.dxf.color == 7
assert mtext.dxf.layer == "AttribLayer"
def test_mtext_entity_attributes(self, attdef):
mtext = attdef.virtual_mtext_entity()
# These seems to be the required DXF tag for the embedded MTEXT entity:
assert mtext.dxf.insert.isclose((45.3, 45.0, 0))
assert mtext.dxf.char_height == 3.0
assert mtext.dxf.width == 0
assert mtext.dxf.defined_height == 0
assert mtext.dxf.attachment_point == 5
assert mtext.dxf.flow_direction == 5
assert mtext.dxf.style == "Arial_3 NARROW"
assert mtext.dxf.line_spacing_style == 1
assert mtext.dxf.line_spacing_factor == 1.0
def test_dxf_export_matches_test_data(self, attdef):
result = TagCollector.dxftags(attdef, dxfversion=const.DXF2018)
expected = basic_tags_from_text(EMBEDDED_MTEXT)
assert result == expected
EMBEDDED_MTEXT = r""" 0
ATTDEF
5
28A
330
285
100
AcDbEntity
8
AttribLayer
62
7
100
AcDbText
10
45.3
20
43.5
30
0.0
40
3.0
1
TEST VENUE
7
Arial_3 NARROW
72
1
11
45.3
21
45.0
31
0.0
100
AcDbAttributeDefinition
280
0
3
TITLE-OF-DRAWING
2
DRAWING-NAME
70
0
74
2
280
0
71
4
72
0
11
45.3
21
45.0
31
0.0
101
Embedded Object
10
45.3
20
45.0
30
0.0
40
3.0
41
0.0
46
0.0
71
5
72
5
1
TEST VENUE\PTEST FLOOR PLAN
7
Arial_3 NARROW
73
1
44
1.0
1001
AcadAnnotative
1000
AnnotativeData
1002
{
1070
1
1070
0
1002
}
"""
```
#### File: tests/test_02_dxf_graphics/test_214_block.py
```python
import pytest
from ezdxf.entities.block import Block, EndBlk
from ezdxf.lldxf.const import DXF12, DXF2000
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
TEST_CLASS = Block
TEST_TYPE = "BLOCK"
ENTITY_R12 = """0
BLOCK
5
0
8
0
2
BLOCKNAME
70
0
10
0.0
20
0.0
30
0.0
3
BLOCKNAME
1
"""
ENTITY_R2000 = """0
BLOCK
5
0
330
0
100
AcDbEntity
8
0
100
AcDbBlockBegin
2
BLOCKNAME
70
0
10
0.0
20
0.0
30
0.0
3
BLOCKNAME
1
"""
@pytest.fixture(params=[ENTITY_R12, ENTITY_R2000])
def entity(request):
return TEST_CLASS.from_text(request.param)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert TEST_TYPE in ENTITY_CLASSES
def test_default_init():
entity = TEST_CLASS()
assert entity.dxftype() == TEST_TYPE
def test_default_new():
entity = TEST_CLASS.new(
handle="ABBA",
owner="0",
dxfattribs={
"base_point": (1, 2, 3),
},
)
assert entity.dxf.layer == "0"
assert entity.dxf.base_point == (1, 2, 3)
assert entity.dxf.base_point.x == 1, "is not Vec3 compatible"
assert entity.dxf.base_point.y == 2, "is not Vec3 compatible"
assert entity.dxf.base_point.z == 3, "is not Vec3 compatible"
def test_load_from_text(entity):
assert entity.dxf.layer == "0"
assert entity.dxf.base_point == (0, 0, 0)
@pytest.mark.parametrize(
"txt,ver", [(ENTITY_R2000, DXF2000), (ENTITY_R12, DXF12)]
)
def test_write_block_dxf(txt, ver):
expected = basic_tags_from_text(txt)
block = TEST_CLASS.from_text(txt)
collector = TagCollector(dxfversion=ver, optional=True)
block.export_dxf(collector)
assert collector.tags == expected
collector2 = TagCollector(dxfversion=ver, optional=False)
block.export_dxf(collector2)
assert collector.has_all_tags(collector2)
ENDBLK_R12 = " 0\nENDBLK\n 5\n0\n 8\n0\n"
ENDBLK_R2000 = """0
ENDBLK
5
0
330
0
100
AcDbEntity
8
0
100
AcDbBlockEnd
"""
@pytest.mark.parametrize(
"txt,ver", [(ENDBLK_R2000, DXF2000), (ENDBLK_R12, DXF12)]
)
def test_write_endblk_dxf(txt, ver):
expected = basic_tags_from_text(txt)
endblk = EndBlk.from_text(txt)
collector = TagCollector(dxfversion=ver, optional=True)
endblk.export_dxf(collector)
assert collector.tags == expected
collector2 = TagCollector(dxfversion=ver, optional=False)
endblk.export_dxf(collector2)
assert collector.has_all_tags(collector2)
```
#### File: tests/test_02_dxf_graphics/test_229b_hatch_extended.py
```python
import pytest
import ezdxf
from ezdxf.entities import Hatch, BoundaryPathType, EdgeType
from ezdxf.lldxf.tagwriter import TagCollector, Tags
from ezdxf.lldxf import const
from ezdxf.math import Vec3
@pytest.fixture
def hatch():
return Hatch.new()
@pytest.fixture
def path_hatch():
return Hatch.from_text(PATH_HATCH)
@pytest.fixture
def edge_hatch():
return Hatch.from_text(EDGE_HATCH)
@pytest.fixture
def spline_edge_hatch():
return Hatch.from_text(EDGE_HATCH_WITH_SPLINE)
@pytest.fixture
def hatch_pattern():
return Hatch.from_text(HATCH_PATTERN)
def test_default_settings(hatch):
assert hatch.dxf.layer == "0"
assert hatch.dxf.color == 256 # by layer
assert hatch.dxf.linetype == "BYLAYER"
assert hatch.dxf.ltscale == 1.0
assert hatch.dxf.invisible == 0
assert hatch.dxf.extrusion == (0.0, 0.0, 1.0)
assert hatch.dxf.elevation == (0.0, 0.0, 0.0)
def test_default_hatch_settings(hatch):
assert hatch.has_solid_fill is True
assert hatch.has_gradient_data is False
assert hatch.has_pattern_fill is False
assert hatch.dxf.solid_fill == 1
assert hatch.dxf.hatch_style == 0
assert hatch.dxf.pattern_type == 1
assert hatch.dxf.pattern_angle == 0
assert hatch.dxf.pattern_scale == 1
assert hatch.dxf.pattern_double == 0
assert hatch.dxf.n_seed_points == 0
def test_get_seed_points(hatch):
assert len(hatch.seeds) == 0
def test_set_seed_points(hatch):
seed_points = [(1.0, 1.0), (2.0, 2.0)]
hatch.set_seed_points(seed_points)
assert 2 == hatch.dxf.n_seed_points
assert seed_points == hatch.seeds
def test_remove_all_paths(path_hatch):
path_hatch.paths.clear()
assert 0 == len(path_hatch.paths), "invalid boundary path count"
def test_polyline_path_attribs(path_hatch):
path = path_hatch.paths[0] # test first boundary path
assert path.type == BoundaryPathType.POLYLINE
assert 4 == len(path.vertices)
assert path.has_bulge() is False
assert path.is_closed == 1
assert 7 == path.path_type_flags, "unexpected path type flags"
def test_polyline_path_vertices(path_hatch):
path = path_hatch.paths[0] # test first boundary path
assert path.type == BoundaryPathType.POLYLINE
assert 4 == len(path.vertices)
# vertex format: x, y, bulge_value
assert (10, 10, 0) == path.vertices[0], "invalid first vertex"
assert (10, 0, 0) == path.vertices[3], "invalid last vertex"
def test_edge_path_count(edge_hatch):
assert len(edge_hatch.paths) == 1, "invalid boundary path count"
def test_edge_path_type(edge_hatch):
path = edge_hatch.paths[0]
assert path.type == BoundaryPathType.EDGE
def test_edge_path_edges(edge_hatch):
path = edge_hatch.paths[0]
edge = path.edges[0]
assert edge.type == EdgeType.ELLIPSE, "expected ellipse edge as 1. edge"
assert (10, 5) == edge.center
assert (3, 0) == edge.major_axis
assert 1.0 / 3.0 == edge.ratio
assert 270 == edge.start_angle
assert (
450 == edge.end_angle
) # this value was created by AutoCAD == 90 degree
assert 1 == edge.ccw
edge = path.edges[1]
assert edge.type == EdgeType.LINE, "expected line edge type as 2. edge"
assert (10, 6) == edge.start
assert (10, 10) == edge.end
edge = path.edges[2]
assert edge.type == EdgeType.LINE, "expected line edge as 3. edge"
assert (10, 10) == edge.start
assert (6, 10) == edge.end
edge = path.edges[3]
assert edge.type == EdgeType.ARC, "expected arc edge as 4. edge"
assert (5, 10) == edge.center
assert 1 == edge.radius
# clockwise arc edge:
assert 0 == edge.ccw
# now we get converted and swapped angles
assert (
360 == 360.0 - edge.end_angle
) # this value was created by AutoCAD (0 degree)
assert (
540 == 360.0 - edge.start_angle
) # this value was created by AutoCAD (-180 degree)
assert -180 == edge.start_angle # ezdxf representation
assert 0 == edge.end_angle # ezdxf representation
edge = path.edges[4]
assert edge.type == EdgeType.LINE, "expected line edge as 5. edge"
assert (4, 10) == edge.start
assert (0, 10) == edge.end
edge = path.edges[5]
assert edge.type == EdgeType.LINE, "expected line edge as 6. edge"
assert (0, 10) == edge.start
assert (0, 0) == edge.end
edge = path.edges[6]
assert edge.type == EdgeType.LINE, "expected line edge as 7. edge"
assert (0, 0) == edge.start
assert (10, 0) == edge.end
edge = path.edges[7]
assert edge.type == EdgeType.LINE, "expected line edge as 8. edge"
assert (10, 0) == edge.start
assert (10, 4) == edge.end
def test_spline_edge_hatch_get_params(spline_edge_hatch):
path = spline_edge_hatch.paths[0]
spline = None
for edge in path.edges:
if edge.type == EdgeType.SPLINE:
spline = edge
break
assert spline is not None, "Spline edge not found."
assert 3 == spline.degree
assert 0 == spline.rational
assert 0 == spline.periodic
assert (0, 0) == spline.start_tangent
assert (0, 0) == spline.end_tangent
assert 10 == len(spline.knot_values)
assert 11.86874452602773 == spline.knot_values[-1]
assert 6 == len(spline.control_points)
assert (0, 10) == spline.control_points[
0
], "Unexpected start control point."
assert (0, 0) == spline.control_points[-1], "Unexpected end control point."
assert 0 == len(spline.weights)
assert 4 == len(spline.fit_points)
assert (0, 10) == spline.fit_points[0], "Unexpected start fit point."
assert (0, 0) == spline.fit_points[-1], "Unexpected end fit point."
def test_create_spline_edge(spline_edge_hatch):
# create the spline
path = spline_edge_hatch.paths[0]
spline = path.add_spline(
[(1, 1), (2, 2), (3, 3), (4, 4)], degree=3, periodic=1
)
# the following values do not represent a mathematically valid spline
spline.control_points = [(1, 1), (2, 2), (3, 3), (4, 4)]
spline.knot_values = [1, 2, 3, 4, 5, 6]
spline.weights = [4, 3, 2, 1]
spline.start_tangent = (10, 1)
spline.end_tangent = (2, 20)
# test the spline
path = spline_edge_hatch.paths[0]
spline = path.edges[-1]
assert 3 == spline.degree
assert 1 == spline.periodic
assert (10, 1) == spline.start_tangent
assert (2, 20) == spline.end_tangent
assert [(1, 1), (2, 2), (3, 3), (4, 4)] == spline.control_points
assert [(1, 1), (2, 2), (3, 3), (4, 4)] == spline.fit_points
assert [1, 2, 3, 4, 5, 6] == spline.knot_values
assert [4, 3, 2, 1] == spline.weights
writer = TagCollector()
spline.export_dxf(writer)
tags = Tags(writer.tags)
assert tags.get_first_value(97) == 4, "expected count of fit points"
def test_create_required_tangents_for_spline_edge_if_fit_points_present(
spline_edge_hatch,
):
# create the spline
path = spline_edge_hatch.paths[0]
spline = path.add_spline_control_frame(
fit_points=[(1, 1), (2, 2), (3, 3), (4, 4)]
)
writer = TagCollector()
spline.export_dxf(writer)
tags = Tags(writer.tags)
assert tags.get_first_value(97) == 4, "expected count of fit points"
assert tags.has_tag(12), "expected start tangent to be present"
assert tags.has_tag(13), "expected end tangent to be present"
def test_no_fit_points_export(spline_edge_hatch):
path = spline_edge_hatch.paths[0]
spline = path.add_spline(
control_points=[(1, 1), (2, 2), (3, 3), (4, 4)], degree=3, periodic=1
)
spline.knot_values = [1, 2, 3, 4, 5, 6]
assert [(1, 1), (2, 2), (3, 3), (4, 4)] == spline.control_points
assert len(spline.fit_points) == 0
writer = TagCollector(dxfversion=const.DXF2007)
spline.export_dxf(writer)
# do not write length tag 97 if no fit points exists for DXF2007 and prior
assert any(tag.code == 97 for tag in writer.tags) is False
writer = TagCollector(dxfversion=const.DXF2010)
spline.export_dxf(writer)
# do write length tag 97 if no fit points exists for DXF2010+
assert (97, 0) in writer.tags
def test_is_pattern_hatch(hatch_pattern):
assert hatch_pattern.has_solid_fill is False
assert hatch_pattern.has_gradient_data is False
assert hatch_pattern.has_pattern_fill is True
def test_edit_pattern(hatch_pattern):
pattern = hatch_pattern.pattern
assert 2 == len(pattern.lines)
line0 = pattern.lines[0]
assert 45 == line0.angle
assert (0, 0) == line0.base_point
assert (-0.1767766952966369, 0.1767766952966369) == line0.offset
assert 0 == len(line0.dash_length_items)
line1 = pattern.lines[1]
assert 45 == line1.angle
assert (0.176776695, 0) == line1.base_point
assert (-0.1767766952966369, 0.1767766952966369) == line1.offset
assert 2 == len(line1.dash_length_items)
assert [0.125, -0.0625] == line1.dash_length_items
@pytest.fixture()
def pattern():
return [
[45, (0, 0), (0, 1), []], # 1. Line: continuous
[45, (0, 0.5), (0, 1), [0.2, -0.1]], # 2. Line: dashed
]
def test_create_new_pattern_hatch(hatch, pattern):
hatch.set_pattern_fill("MOZMAN", definition=pattern)
assert hatch.has_solid_fill is False
assert hatch.has_gradient_data is False
assert hatch.has_pattern_fill is True
assert "MOZMAN" == hatch.dxf.pattern_name
line0 = hatch.pattern.lines[0]
assert 45 == line0.angle
assert (0, 0) == line0.base_point
assert (0, 1) == line0.offset
assert 0 == len(line0.dash_length_items)
line1 = hatch.pattern.lines[1]
assert 45 == line1.angle
assert (0, 0.5) == line1.base_point
assert (0, 1) == line1.offset
assert 2 == len(line1.dash_length_items)
assert [0.2, -0.1] == line1.dash_length_items
def test_pattern_scale(hatch, pattern):
hatch.set_pattern_fill("MOZMAN", definition=pattern)
hatch.set_pattern_scale(2)
assert hatch.dxf.pattern_scale == 2
line1, line2 = hatch.pattern.lines
assert line1.base_point == (0, 0)
assert line1.offset == (0, 2)
assert line2.base_point == (0, 1)
assert line2.offset == (0, 2)
def test_pattern_scale_x_times(hatch, pattern):
hatch.set_pattern_fill("MOZMAN", definition=pattern)
hatch.set_pattern_scale(2)
# scale pattern 3 times of actual scaling 2
# = base pattern x 6
hatch.set_pattern_scale(hatch.dxf.pattern_scale * 3)
assert hatch.dxf.pattern_scale == 6
line1, line2 = hatch.pattern.lines
assert line1.base_point == (0, 0)
assert line1.offset == (0, 6)
assert line2.base_point == (0, 3)
assert line2.offset == (0, 6)
def test_pattern_rotation(hatch, pattern):
hatch.set_pattern_fill("MOZMAN", definition=pattern)
assert hatch.dxf.pattern_angle == 0
hatch.set_pattern_angle(45)
assert hatch.dxf.pattern_angle == 45
line1, line2 = hatch.pattern.lines
assert line1.angle == 90
assert line1.base_point == (0, 0)
assert line1.offset.isclose(Vec3(-0.7071067811865475, 0.7071067811865476))
assert line2.angle == 90
assert line2.base_point.isclose(
Vec3(-0.35355339059327373, 0.3535533905932738)
)
assert line2.offset.isclose(Vec3(-0.7071067811865475, 0.7071067811865476))
def test_pattern_rotation_add_angle(hatch, pattern):
hatch.set_pattern_fill("MOZMAN", definition=pattern)
assert hatch.dxf.pattern_angle == 0
hatch.set_pattern_angle(45)
assert hatch.dxf.pattern_angle == 45
# add 45 degrees to actual pattern rotation
hatch.set_pattern_angle(hatch.dxf.pattern_angle + 45)
assert hatch.dxf.pattern_angle == 90
def test_create_gradient(hatch):
hatch.set_gradient((10, 10, 10), (250, 250, 250), rotation=180.0)
assert hatch.has_gradient_data is True
assert hatch.has_solid_fill is True
assert hatch.has_pattern_fill is False
gdata = hatch.gradient
assert (10, 10, 10) == gdata.color1
assert (250, 250, 250) == gdata.color2
assert 180 == int(gdata.rotation)
assert 0 == gdata.centered
assert 0 == gdata.tint
assert "LINEAR" == gdata.name
def test_create_gradient_low_level_dxf_tags(hatch):
hatch.set_gradient((10, 10, 10), (250, 250, 250), rotation=180.0)
tags = TagCollector.dxftags(hatch.gradient)
for code in [450, 451, 452, 453, 460, 461, 462, 470]:
assert tags.has_tag(code) is True, "missing required tag: %d" % code
assert 2 == len(tags.find_all(463))
assert 2 == len(tags.find_all(421))
def test_remove_gradient_data(hatch):
hatch.set_gradient((10, 10, 10), (250, 250, 250), rotation=180.0)
assert hatch.has_gradient_data is True
hatch.set_solid_fill(color=4) # remove gradient data
assert hatch.has_gradient_data is False, "gradient data not removed"
assert hatch.has_pattern_fill is False
assert hatch.has_solid_fill is True
def test_remove_gradient_low_level_dxf_tags(hatch):
hatch.set_gradient((10, 10, 10), (250, 250, 250), rotation=180.0)
assert hatch.has_gradient_data is True
hatch.set_solid_fill(color=4) # remove gradient data
assert hatch.gradient is None
def test_bgcolor_not_exists(hatch):
assert hatch.bgcolor is None
def test_set_new_bgcolor(hatch):
hatch.bgcolor = (10, 20, 30)
assert (10, 20, 30) == hatch.bgcolor
def test_change_bgcolor(hatch):
hatch.bgcolor = (10, 20, 30)
assert (10, 20, 30) == hatch.bgcolor
hatch.bgcolor = (30, 20, 10)
assert (30, 20, 10) == hatch.bgcolor
def test_delete_bgcolor(hatch):
hatch.bgcolor = (10, 20, 30)
assert (10, 20, 30) == hatch.bgcolor
del hatch.bgcolor
assert hatch.bgcolor is None
def test_delete_not_existing_bgcolor(hatch):
del hatch.bgcolor
assert hatch.bgcolor is None
@pytest.fixture(scope="module")
def msp():
doc = ezdxf.new()
return doc.modelspace()
VERTICES = [(0, 0), (1, 0), (1, 1), (0, 1)]
def add_hatch(msp):
hatch = msp.add_hatch()
path = hatch.paths.add_polyline_path(VERTICES)
return hatch, path
def test_associate_valid_entity(msp):
hatch, path = add_hatch(msp)
pline = msp.add_lwpolyline(VERTICES, close=True)
hatch.associate(path, [pline])
assert path.source_boundary_objects == [pline.dxf.handle]
def test_if_hatch_is_alive_before_association(msp):
hatch, path = add_hatch(msp)
hatch.destroy()
with pytest.raises(const.DXFStructureError):
hatch.associate(path, [])
def test_can_not_associate_entity_from_different_document(msp):
hatch, path = add_hatch(msp)
pline = msp.add_lwpolyline(VERTICES, close=True)
pline.doc = None
with pytest.raises(const.DXFStructureError):
hatch.associate(path, [pline])
def test_can_not_associate_entity_with_different_owner(msp):
hatch, path = add_hatch(msp)
pline = msp.add_lwpolyline(VERTICES, close=True)
pline.dxf.owner = None
with pytest.raises(const.DXFStructureError):
hatch.associate(path, [pline])
def test_can_not_associate_destroyed_entity(msp):
hatch, path = add_hatch(msp)
pline = msp.add_lwpolyline(VERTICES, close=True)
pline.destroy()
with pytest.raises(const.DXFStructureError):
hatch.associate(path, [pline])
PATH_HATCH = """ 0
HATCH
5
27C
330
1F
100
AcDbEntity
8
0
62
1
100
AcDbHatch
10
0.0
20
0.0
30
0.0
210
0.0
220
0.0
230
1.0
2
SOLID
70
1
71
0
91
1
92
7
72
0
73
1
93
4
10
10.0
20
10.0
10
0.0
20
10.0
10
0.0
20
0.0
10
10.0
20
0.0
97
0
75
1
76
1
47
0.0442352806926743
98
1
10
4.826903383179796
20
4.715694827530256
450
0
451
0
460
0.0
461
0.0
452
0
462
1.0
453
2
463
0.0
63
5
421
255
463
1.0
63
2
421
16776960
470
LINEAR
1001
GradientColor1ACI
1070
5
1001
GradientColor2ACI
1070
2
1001
ACAD
1010
0.0
1020
0.0
1030
0.0
"""
EDGE_HATCH = """ 0
HATCH
5
1FE
330
1F
100
AcDbEntity
8
0
100
AcDbHatch
10
0.0
20
0.0
30
0.0
210
0.0
220
0.0
230
1.0
2
SOLID
70
1
71
1
91
1
92
5
93
8
72
3
10
10.0
20
5.0
11
3.0
21
0.0
40
0.3333333333333333
50
270
51
450
73
1
72
1
10
10.0
20
6.0
11
10.0
21
10.0
72
1
10
10.0
20
10.0
11
6.0
21
10.0
72
2
10
5.0
20
10.0
40
1.0
50
360.0
51
540.0
73
0
72
1
10
4.0
20
10.0
11
0.0
21
10.0
72
1
10
0.0
20
10.0
11
0.0
21
0.0
72
1
10
0.0
20
0.0
11
10.0
21
0.0
72
1
10
10.0
20
0.0
11
10.0
21
4.0
97
8
330
1E7
330
1EC
330
1E4
330
1E6
330
1EA
330
1E5
330
1E2
330
1E3
75
1
76
1
47
0.0226465124087611
98
1
10
5.15694040451099
20
5.079032000141936
450
0
451
0
460
0.0
461
0.0
452
0
462
1.0
453
2
463
0.0
63
5
421
255
463
1.0
63
2
421
16776960
470
LINEAR
1001
GradientColor1ACI
1070
5
1001
GradientColor2ACI
1070
2
1001
ACAD
1010
0.0
1020
0.0
1030
0.0
"""
EDGE_HATCH_WITH_SPLINE = """ 0
HATCH
5
220
330
1F
100
AcDbEntity
8
0
62
1
100
AcDbHatch
10
0.0
20
0.0
30
0.0
210
0.0
220
0.0
230
1.0
2
SOLID
70
1
71
1
91
1
92
5
93
4
72
1
10
10.0
20
10.0
11
0.0
21
10.0
72
4
94
3
73
0
74
0
95
10
96
6
40
0.0
40
0.0
40
0.0
40
0.0
40
3.354101966249684
40
7.596742653368969
40
11.86874452602773
40
11.86874452602773
40
11.86874452602773
40
11.86874452602773
10
0.0
20
10.0
10
0.8761452790665735
20
8.935160214313272
10
2.860536415354832
20
6.523392802252294
10
-3.08307347911064
20
4.314363374126372
10
-1.030050983735315
20
1.441423393837641
10
0.0
20
0.0
97
4
11
0.0
21
10.0
11
1.5
21
7.0
11
-1.5
21
4.0
11
0.0
21
0.0
12
0.0
22
0.0
13
0.0
23
0.0
72
1
10
0.0
20
0.0
11
10.0
21
0.0
72
1
10
10.0
20
0.0
11
10.0
21
10.0
97
4
330
215
330
217
330
213
330
214
75
1
76
1
47
0.0365335049696054
98
1
10
5.5
20
4.5
450
0
451
0
460
0.0
461
0.0
452
0
462
1.0
453
2
463
0.0
63
5
421
255
463
1.0
63
2
421
16776960
470
LINEAR
1001
GradientColor1ACI
1070
5
1001
GradientColor2ACI
1070
2
1001
ACAD
1010
0.0
1020
0.0
1030
0.0
"""
HATCH_PATTERN = """0
HATCH
5
1EA
330
1F
100
AcDbEntity
8
0
100
AcDbHatch
10
0.0
20
0.0
30
0.0
210
0.0
220
0.0
230
1.0
2
ANSI33
70
0
71
0
91
1
92
7
72
0
73
1
93
4
10
10.0
20
10.0
10
0.0
20
10.0
10
0.0
20
0.0
10
10.0
20
0.0
97
0
75
1
76
1
52
0.0
41
1.0
77
0
78
2
53
45.0
43
0.0
44
0.0
45
-0.1767766952966369
46
0.1767766952966369
79
0
53
45.0
43
0.176776695
44
0.0
45
-0.1767766952966369
46
0.1767766952966369
79
2
49
0.125
49
-0.0625
47
0.0180224512632811
98
1
10
3.5
20
6.0
1001
GradientColor1ACI
1070
5
1001
GradientColor2ACI
1070
2
1001
ACAD
1010
0.0
1020
0.0
1030
0.0
"""
```
#### File: tests/test_02_dxf_graphics/test_251_upright.py
```python
import pytest
import math
import ezdxf
from ezdxf.upright import upright, _flip_deg_angle
from ezdxf import path
from ezdxf.entities import (
Circle,
Arc,
DXFEntity,
Text,
Solid,
Trace,
Ellipse,
LWPolyline,
Hatch,
)
from ezdxf.math import Z_AXIS, Matrix44, OCS, OCSTransform, Vec3
@pytest.mark.parametrize(
"angle",
[
0.0,
30.0,
60.0,
90.0,
180.0,
270.0,
-30.0,
-60.0,
-90.0,
-180.0,
-270.0,
],
)
def test_flip_deg_angle(angle):
t = OCSTransform.from_ocs(
OCS(-Z_AXIS),
OCS(Z_AXIS),
Matrix44(),
)
control_value = t.transform_deg_angle(angle)
assert _flip_deg_angle(angle) == pytest.approx(control_value)
@pytest.fixture
def circle():
return Circle.new(
dxfattribs={"center": (3, 4), "radius": 2.0, "extrusion": (0, 0, -1)}
)
def test_safety_checks(circle):
# invalid entities should be ignored silently
upright(None) # ignore None values
upright(DXFEntity()) # ignore invalid DXF entity types
upright(Text()) # ignore unsupported DXF entity types
circle.destroy()
upright(circle) # ignore destroyed entities
assert True is True
def test_upright_circle_dxf_attributes(circle):
upright(circle)
assert circle.dxf.extrusion.isclose(Z_AXIS)
assert circle.dxf.center.isclose((-3, 4))
assert circle.dxf.radius == 2.0
def test_upright_circle_geometry(circle):
circle.dxf.center = (0, 0) # required for rotation!
p0 = path.make_path(circle)
upright(circle)
# IMPORTANT: Circle has a different WCS representation as Path object
# Rotated around the z-axis by 180 degrees AND reversed order, because
# the start point is always at 0 degrees, determined by the OCS x-axis!
p1 = path.make_path(circle).transform(Matrix44.z_rotate(math.pi))
assert path.have_close_control_vertices(p0, p1.reversed())
@pytest.fixture
def arc():
return Arc.new(
dxfattribs={
"center": (3, 4, 5),
"radius": 2.0,
"start_angle": 15,
"end_angle": 75,
"extrusion": (0, 0, -1),
}
)
def test_upright_arc_dxf_attributes(arc):
upright(arc)
assert arc.dxf.extrusion.isclose(Z_AXIS)
assert arc.dxf.center.isclose((-3, 4, -5))
assert arc.dxf.radius == 2.0
assert arc.dxf.start_angle == pytest.approx(105.0)
assert arc.dxf.end_angle == pytest.approx(165.0)
def test_upright_arc_geometry(arc):
p0 = path.make_path(arc)
upright(arc)
# ARC angles are always in counter-clockwise orientation around the
# extrusion vector, therefore a reversed path vertex order:
p1 = path.make_path(arc).reversed()
assert path.have_close_control_vertices(p0, p1)
@pytest.mark.parametrize("cls", [Solid, Trace])
def test_upright_quadrilaterals(cls):
solid = cls.new(
dxfattribs={
"vtx0": (1, 1),
"vtx1": (2, 1),
"vtx2": (2, 2),
"vtx3": (1, 2),
"extrusion": (0, 0, -1),
}
)
p0 = path.make_path(solid)
assert len(p0) == 4
upright(solid)
assert solid.dxf.extrusion.isclose(Z_AXIS)
p1 = path.make_path(solid)
# same vertex order as source entity
assert path.have_close_control_vertices(p0, p1)
def test_upright_ellipse():
ellipse = Ellipse.new(
dxfattribs={
"center": (5, 5, 5),
"major_axis": (5, 0, 0),
"ratio": 0.5,
"start_param": 0.5,
"end_param": 1.5,
"extrusion": (0, 0, -1),
}
)
p0 = path.make_path(ellipse)
assert p0.has_curves is True
upright(ellipse)
assert ellipse.dxf.extrusion.isclose(Z_AXIS)
p1 = path.make_path(ellipse)
# has reversed vertex order of source entity:
assert path.have_close_control_vertices(p0, p1.reversed())
POLYLINE_POINTS = [
# x, y, s, e, b
(0, 0, 0, 0, 0),
(2, 2, 1, 2, -1),
(4, 0, 2, 1, 1),
(6, 0, 0, 0, 0),
]
def lwpolyline():
pline = LWPolyline.new(
dxfattribs={
"elevation": 4,
"extrusion": (0, 0, -1),
}
)
pline.set_points(POLYLINE_POINTS)
return pline
def polyline2d():
from ezdxf.layouts import VirtualLayout
layout = VirtualLayout()
return layout.add_polyline2d(
POLYLINE_POINTS,
format="xyseb",
dxfattribs={
"elevation": (0, 0, 4),
"extrusion": (0, 0, -1),
},
)
@pytest.mark.parametrize("factory", [lwpolyline, polyline2d])
def test_upright_polyline(factory):
polyline = factory()
p0 = path.make_path(polyline)
assert p0.has_curves is True
upright(polyline)
assert polyline.dxf.extrusion.isclose(Z_AXIS)
p1 = path.make_path(polyline)
# vertex order do not change:
assert path.have_close_control_vertices(p0, p1)
def test_upright_hatch_with_polyline_path():
hatch = Hatch.new(
dxfattribs={
"elevation": (0, 0, 4),
"extrusion": (0, 0, -1),
}
)
hatch.paths.add_polyline_path(
[(x, y, b) for x, y, s, e, b in POLYLINE_POINTS]
)
p0 = path.make_path(hatch)
assert p0.has_curves is True
upright(hatch)
assert hatch.dxf.extrusion.isclose(Z_AXIS)
p1 = path.make_path(hatch)
assert path.have_close_control_vertices(p0, p1)
def test_upright_hatch_with_edge_path(all_edge_types_hatch):
hatch = all_edge_types_hatch
hatch.dxf.elevation = Vec3(0, 0, 4)
hatch.dxf.extrusion = Vec3(0, 0, -1)
assert hatch.dxf.extrusion.isclose(-Z_AXIS)
p0 = path.make_path(hatch)
assert p0.has_curves is True
upright(hatch)
assert hatch.dxf.extrusion.isclose(Z_AXIS)
p1 = path.make_path(hatch)
assert path.have_close_control_vertices(p0, p1)
def test_upright_insert():
doc = ezdxf.new()
blk = doc.blocks.new("example")
blk.add_arc(
center=(5, 0, 2),
radius=3,
start_angle=30,
end_angle=150,
)
blk.add_lwpolyline(POLYLINE_POINTS)
msp = doc.modelspace()
blk_ref = msp.add_blockref(
name="example",
insert=(0, 0, 4),
dxfattribs={
"extrusion": (0, 0, -1),
"rotation": -37,
},
)
blk_ref_copy = blk_ref.copy()
upright(blk_ref_copy)
msp.add_entity(blk_ref_copy)
assert blk_ref_copy.dxf.extrusion.isclose(Z_AXIS)
for e0, e1 in zip(
blk_ref.virtual_entities(), blk_ref_copy.virtual_entities()
):
assert e0.dxftype() == e1.dxftype(), "same DXF type expected"
p0 = path.make_path(e0)
assert len(p0) > 0, "source path cannot be empty"
p1 = path.make_path(e1)
assert len(p1) > 0, "upright path cannot be empty"
assert path.have_close_control_vertices(
p0, p1
), "expected same WCS representation"
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_03_dxf_layouts/test_304_new_entity_space.py
```python
import pytest
from ezdxf.entitydb import EntitySpace
class Entity:
def __init__(self, value):
self.value = value
self.is_alive = True
@pytest.fixture
def space():
return EntitySpace(Entity(p) for p in [1, 4, 5, 6, 76, -4, 7])
def test_init(space):
assert len(EntitySpace()) == 0
assert len(space) == 7
def test_existence(space):
e = space[3]
assert e in space
assert len(space) == 7
e.is_alive = False
assert e not in space
assert len(space) == 7, "still 7 items"
space.purge()
assert len(space) == 6, "removed dead entities"
e = Entity(1)
assert e not in space
def test_remove(space):
e = space[3]
space.remove(e)
assert e not in space
space.clear()
assert len(space) == 0
```
#### File: tests/test_04_dxf_high_level_structs/test_404a_tables.py
```python
import pytest
import ezdxf
from ezdxf.sections.tables import TablesSection
@pytest.fixture(scope="module")
def tables():
doc = ezdxf.new()
return doc.tables
def test_constructor(tables):
assert tables.layers is not None
assert tables.linetypes is not None
assert tables.appids is not None
assert tables.styles is not None
assert tables.dimstyles is not None
assert tables.views is not None
assert tables.viewports is not None
assert tables.ucs is not None
assert tables.block_records is not None
def test_getattr_upper_case(tables):
with pytest.raises(AttributeError):
_ = tables.LINETYPES
def test_error_getattr(tables):
with pytest.raises(AttributeError):
_ = tables.test
class TestAddLayerTableEntry:
def test_add_layer(self, tables: TablesSection):
layer = tables.layers.add(
"NEW_LAYER",
color=2,
true_color=ezdxf.rgb2int((0x10, 0x20, 0x30)),
linetype="DASHED",
lineweight=18,
plot=True,
)
assert layer.dxf.name == "NEW_LAYER"
assert layer.dxf.color == 2
assert layer.dxf.true_color == 0x00102030
assert layer.dxf.linetype == "DASHED", "no check if line type exist!"
assert layer.dxf.lineweight == 18
assert layer.dxf.plot == 1
def test_check_invalid_aci_color(self, tables: TablesSection):
with pytest.raises(ValueError):
tables.layers.add("INVALID_ACI", color=300)
def test_check_invalid_line_weight(self, tables: TablesSection):
with pytest.raises(ValueError):
tables.layers.add("INVALID_LINE_WEIGHT", lineweight=300)
class TestTextStyleTable:
def test_add_new_ttf_font_text_style(self, tables: TablesSection):
style = tables.styles.add(
"NEW_STYLE", font="Arial.ttf", dxfattribs={"flags": 3}
)
assert style.dxf.name == "NEW_STYLE"
assert style.dxf.font == "Arial.ttf"
assert style.dxf.flags == 3
def test_add_new_shape_file(self, tables: TablesSection):
style = tables.styles.add_shx("shapes1.shx")
assert style.dxf.name == "", "shape files have no name"
assert style.dxf.font == "shapes1.shx"
assert style.dxf.flags == 1
# can not add same shape file twice:
with pytest.raises(ezdxf.const.DXFTableEntryError):
tables.styles.add_shx("shapes1.shx")
def test_get_shape_file(self, tables: TablesSection):
style = tables.styles.get_shx("shapes2.shx")
assert style.dxf.name == "", "shape files have no name"
assert style.dxf.font == "shapes2.shx"
assert style.dxf.flags == 1
style2 = tables.styles.get_shx("shapes2.shx")
assert style is style2
def test_find_shape_file(self, tables: TablesSection):
tables.styles.add_shx("shapes3.shx")
style = tables.styles.find_shx("shapes3.shx")
assert style.dxf.font == "shapes3.shx"
def test_if_shape_file_entry_exist(self, tables: TablesSection):
assert tables.styles.find_shx("unknown.shx") is None
def test_add_new_line_type(tables: TablesSection):
ltype = tables.linetypes.add(
"SIMPLE_LINE_TYPE", [0.2, 0.1, -0.1], description="description"
)
assert ltype.dxf.name == "SIMPLE_LINE_TYPE"
assert ltype.dxf.description == "description"
# Correct pattern creation is tested in test suite 121.
```
#### File: tests/test_04_dxf_high_level_structs/test_404b_object_collections.py
```python
import pytest
import ezdxf
from ezdxf.entities import is_dxf_object
@pytest.fixture(scope="module")
def collection_ro():
"""Creates a read only document"""
doc = ezdxf.new()
doc.entitydb.locked = True
return doc.mleader_styles
class TestGetterMethods:
def test_len(self, collection_ro):
assert len(collection_ro) == 1
def test_iter(self, collection_ro):
assert len(list(collection_ro)) == 1
def test_is_unique_name(self, collection_ro):
assert collection_ro.is_unique_name("STANDARD") is False
def test_contains(self, collection_ro):
assert ("Standard" in collection_ro) is True
def test_contains_is_case_insensitive(self, collection_ro):
assert ("STANDARD" in collection_ro) is True
def test_getitem(self, collection_ro):
assert collection_ro["Standard"].dxf.name == "Standard"
def test_getitem_is_case_insensitive(self, collection_ro):
assert collection_ro["STANDARD"].dxf.name == "Standard"
def test_get(self, collection_ro):
assert collection_ro.get("Standard").dxf.name == "Standard"
def test_get_is_case_insensitive(self, collection_ro):
assert collection_ro.get("STANDARD").dxf.name == "Standard"
@pytest.fixture(scope="module")
def collection_rw():
doc = ezdxf.new()
return doc.mleader_styles
class TestCreateNewEntry:
def test_new_entry_is_an_object(self, collection_rw):
obj = collection_rw.new("New1")
assert is_dxf_object(obj) is True
assert obj.dxf.name == "New1"
def test_new_entry_is_added_to_collection(self, collection_rw):
count = len(collection_rw)
collection_rw.new("New2")
assert len(collection_rw) == count + 1
assert "NEW2" in collection_rw, "case insensitive names"
def test_cannot_use_existing_name(self, collection_rw):
collection_rw.new("New3")
with pytest.raises(ValueError):
collection_rw.new("NEW3"), "case insensitive names"
def test_invalid_char_in_name_raises_exception(self, collection_rw):
with pytest.raises(ValueError):
collection_rw.new("New:")
class TestDeleteEntry:
def test_delete_entry_remove_entry(self, collection_rw):
count = len(collection_rw)
collection_rw.new("Del1")
collection_rw.delete("DEL1")
assert len(collection_rw) == count
def test_delete_non_existing_entry_does_not_raise_exception(
self, collection_rw
):
count = len(collection_rw)
collection_rw.delete("DEL2")
assert len(collection_rw) == count
class TestDuplicateEntry:
def test_duplicate_existing_entry(self, collection_rw):
count = len(collection_rw)
obj = collection_rw.duplicate_entry("STANDARD", "Dup1")
assert is_dxf_object(obj) is True
assert obj.dxf.name == "Dup1"
assert len(collection_rw) == count + 1
def test_duplicate_non_existing_entry_raises_exception(self, collection_rw):
with pytest.raises(ValueError):
collection_rw.duplicate_entry("NON_EXISTING_ENTRY", "Dup1")
def test_new_entry_replaces_existing_entry(self, collection_rw):
count = len(collection_rw)
obj1 = collection_rw.duplicate_entry("STANDARD", "Dup2")
obj2 = collection_rw.duplicate_entry("Standard", "DUP2")
assert obj1 is not obj2, "obj2 must be a new object"
assert collection_rw.get("Dup2") is obj2, "obj2 should replace obj1"
assert len(collection_rw) == count + 1
def test_duplicated_entries_have_same_content(self, collection_rw):
def attribs(obj):
a = obj.dxf.all_existing_dxf_attribs()
del a['handle']
del a['name']
return a
obj0 = collection_rw.get("Standard")
obj1 = collection_rw.duplicate_entry("STANDARD", "Dup3")
attribs0 = attribs(obj0)
attribs1 = attribs(obj1)
assert len(attribs0) > 1
assert attribs0 == attribs1
assert obj1.get_reactors() == obj0.get_reactors()
def test_duplicated_entry_is_stored_in_objects_section(self, collection_rw):
obj = collection_rw.duplicate_entry("STANDARD", "Dup4")
assert obj.dxf.handle in obj.doc.objects
def test_invalid_char_in_new_name_raises_exception(self, collection_rw):
with pytest.raises(ValueError):
collection_rw.duplicate_entry("Standard", "New:")
def test_clear():
doc = ezdxf.new()
doc.mleader_styles.clear()
# This creates an invalid DXF file!!!
assert len(doc.mleader_styles) == 0
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_04_dxf_high_level_structs/test_408_objects_section.py
```python
import ezdxf
from ezdxf.tools.test import load_entities
from ezdxf.sections.objects import ObjectsSection
from ezdxf.entities import Point
def test_load_section():
doc = ezdxf.new("R2000")
ent = load_entities(TESTOBJECTS, "OBJECTS")
section = ObjectsSection(doc, ent)
assert len(section) == 6
assert section[0].dxftype() == "DICTIONARY"
def test_auditor_removes_invalid_entities():
doc = ezdxf.new()
count = len(doc.objects)
# hack hack hack!
doc.objects._entity_space.add(Point())
auditor = doc.audit()
assert len(auditor.fixes) == 1
assert len(doc.objects) == count, "should call purge() automatically"
TESTOBJECTS = """ 0
SECTION
2
OBJECTS
0
DICTIONARY
5
C
330
0
100
AcDbDictionary
281
1
3
ACAD_COLOR
350
73
3
ACAD_GROUP
350
D
3
ACAD_LAYOUT
350
1A
3
ACAD_MATERIAL
350
72
3
ACAD_MLEADERSTYLE
350
D7
3
ACAD_MLINESTYLE
350
17
3
ACAD_PLOTSETTINGS
350
19
3
ACAD_PLOTSTYLENAME
350
E
3
ACAD_SCALELIST
350
B6
3
ACAD_TABLESTYLE
350
86
3
ACAD_VISUALSTYLE
350
99
3
ACDB_RECOMPOSE_DATA
350
499
3
AcDbVariableDictionary
350
66
0
DICTIONARY
5
2A2
330
2
100
AcDbDictionary
280
1
281
1
3
ACAD_LAYERSTATES
360
2A3
0
DICTIONARY
5
E6
330
10
100
AcDbDictionary
280
1
281
1
0
DICTIONARY
5
15D
330
1F
100
AcDbDictionary
280
1
281
1
0
DICTIONARY
5
28C
330
28B
100
AcDbDictionary
280
1
281
1
3
ASDK_XREC_ANNOTATION_SCALE_INFO
360
28D
0
DICTIONARY
5
291
330
290
100
AcDbDictionary
280
1
281
1
3
ASDK_XREC_ANNOTATION_SCALE_INFO
360
292
0
ENDSEC
"""
```
#### File: tests/test_04_dxf_high_level_structs/test_413_dxfgroups.py
```python
import pytest
import ezdxf
from ezdxf.audit import Auditor
@pytest.fixture(scope="module")
def doc():
return ezdxf.new()
def test_new_group(doc):
msp = doc.modelspace()
group = doc.groups.new("test1")
with group.edit_data() as g:
g.append(msp.add_point((0, 0)))
g.append(msp.add_line((1, 1), (2, 2)))
assert len(group) == 2
assert group[0].dxftype() == "POINT"
assert group[1].dxftype() == "LINE"
assert len(list(group.handles())) == 2
handle = group[0].dxf.handle
assert handle in group
def test_unique_groups(doc):
doc.groups.new("test2")
with pytest.raises(ValueError):
doc.groups.new("test2")
def test_modify_group(doc):
msp = doc.modelspace()
group = doc.groups.new("test3")
with group.edit_data() as g:
g.append(msp.add_point((0, 0)))
g.append(msp.add_line((1, 1), (2, 2)))
assert len(group) == 2
e = [
msp.add_point((3, 3)),
msp.add_point((4, 4)),
]
group.extend(e)
assert len(group) == 4
def test_can_not_add_invalid_block_entities(doc):
group = doc.groups.new("test4")
block = doc.blocks.new("Block4")
point = block.add_point((0, 0))
with pytest.raises(ezdxf.DXFStructureError):
with group.edit_data() as g:
g.append(point)
def test_can_not_add_invalid_table_entry(doc):
group = doc.groups.new("test5")
layer = doc.layers.get("0")
with pytest.raises(ezdxf.DXFStructureError):
with group.edit_data() as g:
g.append(layer)
def test_audit_filters_invalid_entities(doc):
group = doc.groups.new("test6")
msp = doc.modelspace()
block = doc.blocks.new("Block6")
point1 = block.add_point((0, 0)) # invalid BLOCK entity
point2 = msp.add_point((0, 0)) # valid model space entity ...
point2.destroy() # ... but destroyed
layer = doc.layers.get("0") # invalid table entry
group.extend([point1, point2, layer])
auditor = Auditor(doc)
group.audit(auditor)
assert len(group) == 0
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_04_dxf_high_level_structs/test_424_audit.py
```python
import pytest
import ezdxf
from ezdxf.audit import Auditor, AuditError, BlockCycleDetector
from ezdxf.entities import factory, DXFTagStorage, Attrib
@pytest.fixture(scope="module")
def doc():
return ezdxf.new("R2000")
@pytest.fixture
def auditor(doc):
return Auditor(doc)
@pytest.fixture
def entity(doc):
return doc.modelspace().add_line((0, 0), (100, 0))
def test_color_index(entity, auditor):
entity.dxf.__dict__["color"] = -1 # by pass 'set' validator
auditor.check_entity_color_index(entity)
assert len(auditor.fixes) == 1
assert auditor.fixes[0].code == AuditError.INVALID_COLOR_INDEX
auditor.reset()
entity.dxf.__dict__["color"] = 258 # by pass 'set' validator
auditor.check_entity_color_index(entity)
assert len(auditor.fixes) == 1
assert auditor.fixes[0].code == AuditError.INVALID_COLOR_INDEX
def test_lineweight_too_small(entity, auditor):
entity.dxf.__dict__["lineweight"] = -5 # by pass 'set' validator
auditor.check_entity_lineweight(entity)
assert len(auditor.fixes) == 1
assert auditor.fixes[0].code == AuditError.INVALID_LINEWEIGHT
assert entity.dxf.lineweight == -1
def test_lineweight_too_big(entity, auditor):
entity.dxf.__dict__["lineweight"] = 212 # by pass 'set' validator
auditor.check_entity_lineweight(entity)
assert len(auditor.fixes) == 1
assert auditor.fixes[0].code == AuditError.INVALID_LINEWEIGHT
assert entity.dxf.lineweight == 211
def test_invalid_lineweight(entity, auditor):
entity.dxf.__dict__["lineweight"] = 10 # by pass 'set' validator
auditor.check_entity_lineweight(entity)
assert len(auditor.fixes) == 1
assert auditor.fixes[0].code == AuditError.INVALID_LINEWEIGHT
assert entity.dxf.lineweight == 13
def test_for_valid_layer_name(entity, auditor):
entity.dxf.__dict__["layer"] = "Invalid/" # by pass 'set' validator
auditor.check_for_valid_layer_name(entity)
assert len(auditor) == 1
assert auditor.errors[0].code == AuditError.INVALID_LAYER_NAME
def test_for_existing_owner(entity, auditor):
entity.dxf.owner = "FFFFFF"
auditor.check_owner_exist(entity)
auditor.empty_trashcan()
assert len(auditor.fixes) == 1
assert auditor.fixes[0].code == AuditError.INVALID_OWNER_HANDLE
assert entity.is_alive is False, "delete entity without valid owner"
@pytest.mark.parametrize("TYPE", ("TEXT", "MTEXT", "ATTRIB", "ATTDEF"))
def test_for_existing_text_style(TYPE, auditor, doc):
text = factory.new(TYPE, dxfattribs={"style": "UNDEFINED"}, doc=doc)
auditor.check_text_style(text)
assert len(auditor.fixes) == 1
assert auditor.fixes[0].code == AuditError.UNDEFINED_TEXT_STYLE
assert text.dxf.style == "Standard"
def test_block_cycle_detector_setup():
doc = ezdxf.new()
a = doc.blocks.new("a")
b = doc.blocks.new("b")
c = doc.blocks.new("c")
a.add_blockref("b", (0, 0))
a.add_blockref("c", (0, 0))
b.add_blockref("c", (0, 0))
c.add_blockref("a", (0, 0)) # cycle
detector = BlockCycleDetector(doc)
assert detector.has_cycle("a") is True
assert detector.has_cycle("b") is True
assert detector.has_cycle("c") is True
auditor = Auditor(doc)
auditor.check_block_reference_cycles()
assert (
len(auditor.errors) == 3
), "one entry for each involved block: 'a', 'b', 'c'"
assert auditor.errors[0].code == AuditError.INVALID_BLOCK_REFERENCE_CYCLE
assert auditor.errors[1].code == AuditError.INVALID_BLOCK_REFERENCE_CYCLE
assert auditor.errors[2].code == AuditError.INVALID_BLOCK_REFERENCE_CYCLE
def test_block_cycle_detector(doc):
detector = BlockCycleDetector(doc)
data = {
"a": set("bcd"), # no cycle
"b": set(),
"c": set("x"),
"d": set("xy"),
"e": set("e"), # short cycle
"f": set("g"),
"g": set("h"),
"h": set("i"),
"i": set("f"), # long cycle
"j": set("k"),
"k": set("j"), # short cycle
"x": set(),
"y": set(),
}
detector.blocks = data
assert detector.has_cycle("a") is False
assert detector.has_cycle("b") is False
assert detector.has_cycle("c") is False
assert detector.has_cycle("d") is False
assert detector.has_cycle("e") is True
assert detector.has_cycle("f") is True
assert detector.has_cycle("g") is True
assert detector.has_cycle("h") is True
assert detector.has_cycle("i") is True
assert detector.has_cycle("j") is True
assert detector.has_cycle("k") is True
assert detector.has_cycle("x") is False
assert detector.has_cycle("y") is False
def test_broken_block_cycle_detector(doc):
detector = BlockCycleDetector(doc)
data = {
"a": set("bcd"), # 'd' does not exist
"b": set(),
"c": set(),
}
detector.blocks = data
assert detector.has_cycle("a") is False
assert detector.has_cycle("b") is False
def test_fix_invalid_leader(doc, auditor):
msp = doc.modelspace()
# no creator interface for LEADER (yet)
leader = factory.new("LEADER", doc=doc)
doc.entitydb.add(leader)
msp.add_entity(leader)
assert leader.is_alive is True
leader.audit(auditor)
assert leader.is_alive is False
assert auditor.fixes[-1].code == AuditError.INVALID_VERTEX_COUNT
def test_fix_invalid_insert(doc, auditor):
msp = doc.modelspace()
insert = msp.add_blockref("TEST_INVALID_INSERT", (0, 0))
insert.audit(auditor)
auditor.empty_trashcan() # explicit call required
assert insert.is_alive is False
assert auditor.fixes[-1].code == AuditError.UNDEFINED_BLOCK
def test_fix_insert_scale(doc, auditor):
msp = doc.modelspace()
test_block = "TEST_INSERT"
if test_block not in doc.blocks:
doc.blocks.new(test_block)
insert = msp.add_blockref(
test_block, (0, 0), dxfattribs={"xscale": 0, "yscale": 0, "zscale": 0}
)
insert.audit(auditor)
assert insert.dxf.xscale == 1.0
assert insert.dxf.xscale == 1.0
assert insert.dxf.xscale == 1.0
def test_remove_invalid_entities_from_blocks():
# The model space is just a BLOCK!
doc = ezdxf.new()
msp = doc.modelspace()
# hack hack hack!
msp.entity_space.add(DXFTagStorage())
auditor = doc.audit()
assert len(list(msp)) == 0
assert len(auditor.fixes) == 1
def test_remove_standalone_attrib_entities_from_blocks():
# The model space is just a BLOCK!
doc = ezdxf.new()
msp = doc.modelspace()
msp.add_entity(Attrib())
auditor = doc.audit()
assert len(list(msp)) == 0
assert len(auditor.fixes) == 1
def test_fix_invalid_transparency():
doc = ezdxf.new()
msp = doc.modelspace()
line = msp.add_line((0, 0), (1, 0))
# transparency value requires 0x02000000 bit set
line.dxf.unprotected_set("transparency", 0x10000000)
auditor = Auditor(doc)
line.audit(auditor)
assert line.dxf.hasattr("transparency") is False
assert len(auditor.fixes) == 1
```
#### File: tests/test_05_tools/test_517_text_layout.py
```python
from typing import Iterable, List
import pytest
from itertools import permutations
import ezdxf.tools.text_layout as tl
@pytest.mark.parametrize(
"margins,expected",
[
[None, (0, 0, 0, 0)],
[(1,), (1, 1, 1, 1)],
[(1, 2), (1, 2, 1, 2)],
[(1, 2, 3), (1, 2, 3, 2)],
[(1, 2, 3, 4), (1, 2, 3, 4)],
],
)
def test_resolve_margins(margins, expected):
assert tl.resolve_margins(margins) == expected
@pytest.mark.parametrize(
"align,expected",
[
[tl.LayoutAlignment.TOP_LEFT, (0, 0)],
[tl.LayoutAlignment.TOP_CENTER, (-2, 0)],
[tl.LayoutAlignment.TOP_RIGHT, (-4, 0)],
[tl.LayoutAlignment.MIDDLE_LEFT, (0, 3)],
[tl.LayoutAlignment.MIDDLE_CENTER, (-2, 3)],
[tl.LayoutAlignment.MIDDLE_RIGHT, (-4, 3)],
[tl.LayoutAlignment.BOTTOM_LEFT, (0, 6)],
[tl.LayoutAlignment.BOTTOM_CENTER, (-2, 6)],
[tl.LayoutAlignment.BOTTOM_RIGHT, (-4, 6)],
],
)
def test_insert_location(align, expected):
assert tl.insert_location(align, width=4, height=6) == expected
class Rect(tl.ContentRenderer):
def __init__(self, name: str, result: List = None):
if result is None:
result = []
self.result = result # store test results
self.name = name
def render(
self, left: float, bottom: float, right: float, top: float, m=None
) -> None:
self.result.append(
f"{self.name}({left:.1f}, {bottom:.1f}, {right:.1f}, {top:.1f})"
)
def line(self, x1: float, y1: float, x2: float, y2: float, m=None) -> None:
self.result.append(f"LINE({x1:.1f}, {y1:.1f})TO({x2:.1f}, {y2:.1f})")
class TestTopLevelLayout:
@pytest.fixture
def layout1(self):
return tl.Layout(
width=10, height=None, margins=(1, 1), renderer=Rect("Layout1")
)
def test_create_empty_layout_top_left(self, layout1):
# layout1 has no height, only margins
# 1. do layout placing
layout1.place(align=tl.LayoutAlignment.TOP_LEFT)
# 2. render content
layout1.render()
result = layout1.renderer.result
assert len(result) == 1
assert result[0] == "Layout1(0.0, -2.0, 12.0, 0.0)"
def test_create_empty_layout_middle_center(self, layout1):
# layout1 has no height, only margins
# 1. do layout placing
layout1.place(align=tl.LayoutAlignment.MIDDLE_CENTER)
# 2. render content
layout1.render()
result = layout1.renderer.result
assert len(result) == 1
assert result[0] == "Layout1(-6.0, -1.0, 6.0, 1.0)"
def test_add_one_column_by_reference_width(self, layout1):
height = 17
width = layout1.content_width # reference column width
result = layout1.renderer.result # use same result container
layout1.append_column(height=height, renderer=Rect("Col1", result))
assert layout1.total_width == width + 2
assert layout1.total_height == height + 2
layout1.place(align=tl.LayoutAlignment.BOTTOM_LEFT)
layout1.render()
assert len(result) == 2
assert result[0] == "Layout1(0.0, 0.0, 12.0, 19.0)"
assert result[1] == "Col1(1.0, 1.0, 11.0, 18.0)"
def test_add_two_equal_columns(self, layout1):
margins = (1,)
layout1.append_column(
width=5, height=10, gutter=2, margins=margins, renderer=Rect("Col1")
)
layout1.append_column(
width=7, height=20, margins=margins, renderer=Rect("Col2")
)
# width1 + margins + gutter + width2 + margins
assert layout1.content_width == (5 + 2 + 2 + 7 + 2)
# max(height) + margins
assert layout1.content_height == (20 + 2)
def test_bounding_box_for_not_placed_layout(self, layout1):
# applies default alignment top/left, margins = (1, 1)
layout1.append_column(10, 10)
bbox = layout1.bbox()
assert bbox.extmin == (0, -12) # left/bottom
assert bbox.extmax == (12, 0) # right/top
def test_bounding_box_for_placed_layout(self, layout1):
# margins = (1, 1)
layout1.append_column(10, 10)
layout1.place(0, 0, tl.LayoutAlignment.MIDDLE_CENTER)
bbox = layout1.bbox()
assert bbox.extmin == (-6, -6) # left/bottom
assert bbox.extmax == (6, 6) # right/top
def test_next_existing_column(self, layout1):
layout1.append_column(height=10)
layout1.append_column(height=10)
assert len(layout1) == 2
assert layout1.current_column_index == 0
layout1.next_column()
assert layout1.current_column_index == 1
def test_next_column_creates_a_new_column(self, layout1):
layout1.append_column(height=10)
assert len(layout1) == 1
assert layout1.current_column_index == 0
layout1.next_column()
assert layout1.current_column_index == 1
assert len(layout1) == 2, "a new column should be created"
class TestColumn:
@pytest.fixture
def c1(self):
return tl.Column(
# margins = top, right, bottom, left - same order as for CSS
width=5,
height=7,
margins=(1, 2, 3, 4),
renderer=Rect("C1"),
)
def test_size_calculation(self, c1):
c1.place(0, 0)
assert c1.content_width == 5
assert c1.content_height == 7
assert c1.total_width == 2 + 5 + 4
assert c1.total_height == 1 + 7 + 3
def test_render(self, c1):
c1.place(0, 0)
c1.render()
result = c1.renderer.result
assert result[0] == "C1(0.0, -11.0, 11.0, 0.0)"
def test_paragraph_available_line_content_space():
par = tl.Paragraph(width=12, indent=(0.7, 0.5, 0.9))
assert par.line_width(first=True) == 12 - 0.7 - 0.9
assert par.line_width(first=False) == 12 - 0.5 - 0.9
class TestParagraphWithUnrestrictedHeight:
# default values:
# column width = 10
# content width = 3
# space width = 0.5
@pytest.fixture
def par(self):
# Paragraph alignment is not important for content distribution,
# because the required space is independent from alignment (left,
# right, center or justified).
# This may change by implementing regular tabulator support.
return tl.Paragraph(width=10, renderer=Rect("PAR"))
def test_empty_paragraph_dimensions(self, par):
assert par.content_height == 0
assert par.content_width == 10
def test_render_empty_paragraph(self, par):
par.place(0, 0)
par.render()
result = par.renderer.result
assert len(result) == 1
assert result[0] == "PAR(0.0, 0.0, 10.0, 0.0)"
def test_distribute_invalid_content(self, par):
par.append_content(str2cells("ttt"))
with pytest.raises(ValueError):
par.distribute_content(height=None)
def test_distribute_common_case_without_nbsp(self, par):
# column width = 10
# content width = 3
# space width = 0.5
par.append_content(str2cells("t t t t t t t t t"))
par.distribute_content(height=None)
assert lines2str(par) == [
"t t t", # width = 3x3 + 2x0.5 = 10
"t t t", # remove line breaking spaces!
"t t t",
]
def test_distribute_with_nbsp(self, par):
# column width = 10
# content width = 3
# space width = 0.5
par.append_content(str2cells("t t t~t t t"))
par.distribute_content(height=None)
assert lines2str(par) == [
"t t", # t~t does not fit and goes to next line
"t~t t", # width = 3x3 + 2x0.5 = 10
"t",
]
def test_distribute_too_long_lines(self, par):
# column width = 10
par.append_content(str2cells("t t t", content=12))
par.distribute_content(height=None)
assert lines2str(par) == ["t", "t", "t"]
def test_distribute_too_long_lines_including_nbsp(self, par):
# column width = 10
par.append_content(str2cells("t~t~t t~t t", content=5))
par.distribute_content(height=None)
assert lines2str(par) == [
"t~t~t", # width = 3x5 + 2x0.5 = 17
"t~t", # width = 2x5 + 0.5 = 10.5
"t",
]
class TestParagraphWithRestrictedHeight:
# default values:
# column width = 10
# content width = 3
# space width = 0.5
# cap height = 1,
# line spacing 3-on-5 by 100% = 1.667
THREE_LINE_SPACE = tl.leading(1, 1) * 2 + 1
@pytest.fixture
def par(self):
# Paragraph alignment is not important for content distribution.
return tl.Paragraph(width=10, renderer=Rect("PAR"))
def test_distribute_with_exact_height_match(self, par):
par.append_content(str2cells("t t t t t t t t t"))
par.distribute_content(height=self.THREE_LINE_SPACE)
assert lines2str(par) == [
"t t t", # width = 3x3 + 2x0.5 = 10
"t t t",
"t t t",
]
def test_distribute_with_one_line_left_over(self, par):
par.append_content(str2cells("t t t t t t t t t"))
# Paragraph has only space for 2 lines by reducing the available space
# by a small amount:
height = self.THREE_LINE_SPACE - 0.01
leftover = par.distribute_content(height=height)
assert lines2str(par) == [
"t t t",
"t t t",
]
leftover.distribute_content(height=1)
assert lines2str(leftover) == ["t t t"]
def test_distribute_with_all_lines_left_over(self, par):
par.append_content(str2cells("t t t~t t t t t t"))
# Paragraph has no space at all:
leftover = par.distribute_content(height=0)
assert lines2str(par) == []
# None = unrestricted height
leftover.distribute_content(height=None)
assert lines2str(leftover) == [
"t t",
"t~t t",
"t t t",
"t",
]
def set_paragraph_content(flow):
flow.append_content(str2cells("t t t t t t t t t"))
flow.distribute_content()
class TestParagraphLeftAlignment:
# default values:
# content width = 3
# space width = 0.5
def test_without_indentation(self):
par = tl.Paragraph(width=12, align=tl.ParagraphAlignment.LEFT)
set_paragraph_content(par)
par.place(0, 0)
for line in par:
assert line.total_width == 10
assert line.final_location()[0] == 0
def test_left_indentation(self):
par = tl.Paragraph(
width=12, indent=(0.7, 0.5, 0), align=tl.ParagraphAlignment.LEFT
)
set_paragraph_content(par)
par.place(0, 0)
lines = list(par)
# first line:
assert par.line_width(True) == 12 - 0.7 # available content space
assert lines[0].final_location()[0] == 0.7
assert lines[0].total_width == 10
# remaining lines:
for line in lines[1:]:
assert par.line_width(False) == 12 - 0.5 # available content space
assert line.total_width == 10
assert line.final_location()[0] == 0.5
def test_move_tab_to_next_line_if_following_content_does_not_fit(self):
result = []
par = tl.Paragraph(width=10, tab_stops=[tl.TabStop(4)])
par.append_content(str2cells("t#t", content=6, result=result))
# The tab (#) should move the following text to the tab stop
# in the next line at position 4.
par.distribute_content()
par.place(0, 0)
par.render()
assert result[0] == "Text(0.0, -1.0, 6.0, 0.0)"
assert result[1] == "Text(4.0, -2.7, 10.0, -1.7)", "x1 has to be 4.0"
class TestParagraphAlignment:
# default values:
# content width = 3
# space width = 0.5
def test_without_indentation(self):
par = tl.Paragraph(width=12, align=tl.ParagraphAlignment.RIGHT)
set_paragraph_content(par)
par.place(0, 0)
for line in par:
assert line.total_width == 10
assert line.final_location()[0] == 2
def test_right_indentation(self):
par = tl.Paragraph(
width=12, indent=(0.5, 0.5, 0.5), align=tl.ParagraphAlignment.RIGHT
)
set_paragraph_content(par)
par.place(0, 0)
for line in par:
assert line.total_width == 10
assert line.final_location()[0] == 1.5 # 12 - 0.5 - 10
class TestParagraphCenterAlignment:
# default values:
# content width = 3
# space width = 0.5
def test_without_indentation(self):
par = tl.Paragraph(width=12, align=tl.ParagraphAlignment.CENTER)
set_paragraph_content(par)
par.place(0, 0)
for line in par:
assert line.total_width == 10
assert line.final_location()[0] == 1
def test_left_indentation(self):
par = tl.Paragraph(
width=12, indent=(0.5, 0.5, 0), align=tl.ParagraphAlignment.CENTER
)
set_paragraph_content(par)
par.place(0, 0)
for line in par:
assert line.total_width == 10
assert line.final_location()[0] == 1.25 # 0.5 + (11.5 - 10) / 2
def test_right_indentation(self):
par = tl.Paragraph(
width=12, indent=(0, 0, 0.5), align=tl.ParagraphAlignment.CENTER
)
set_paragraph_content(par)
par.place(0, 0)
for line in par:
assert line.total_width == 10
assert line.final_location()[0] == 0.75 # (11.5 - 10) / 2
class TestParagraphJustifiedAlignment:
# default values:
# content width = 3
# space width = 0.5
def test_without_indentation(self):
par = tl.Paragraph(width=12, align=tl.ParagraphAlignment.JUSTIFIED)
set_paragraph_content(par)
par.place(0, 0)
lines = list(par)
for line in lines[:-1]:
assert line.total_width == 12 # expand across paragraph width
assert line.final_location()[0] == 0
# last line is not expanded
last_line = lines[-1]
assert last_line.total_width == 10
assert last_line.final_location()[0] == 0
def test_with_indentation(self):
par = tl.Paragraph(
width=12,
indent=(0.7, 0.5, 0.5),
align=tl.ParagraphAlignment.JUSTIFIED,
)
set_paragraph_content(par)
par.place(0, 0)
lines = list(par)
# first line:
assert lines[0].total_width == 10.8 # 12 - (0.7 + 0.5)
assert lines[0].final_location()[0] == 0.7
# remaining lines:
for line in lines[1:-1]:
assert line.total_width == 11 # 12 - (0.5 + 0.5)
assert line.final_location()[0] == 0.5
# last line is not expanded:
assert lines[-1].total_width == 10
assert lines[-1].final_location()[0] == 0.5
class TestVerticalCellAlignment:
@staticmethod
def build_line(align):
line = tl.LeftLine(width=7)
big0 = tl.Text(width=3, height=3)
small = tl.Text(width=1, height=1, valign=align, renderer=Rect("CELL"))
big1 = tl.Text(width=3, height=3)
line.append(big0)
line.append(small)
line.append(big1)
line.place(0, 0)
return line
def test_line_properties(self):
line = self.build_line(tl.CellAlignment.BOTTOM)
assert len(list(line)) == 3
assert line.total_width == 7
assert line.total_height == 3
def test_bottom_alignment(self):
line = self.build_line(tl.CellAlignment.BOTTOM)
big0, small, big1 = line
# final location is always the top/left corner of the cell:
assert big0.final_location() == (0, 0)
assert small.final_location() == (3, -2)
assert big1.final_location() == (4, 0)
small.render()
result = small.renderer.result
# left, bottom, right, top
assert result[0] == "CELL(3.0, -3.0, 4.0, -2.0)"
def test_center_alignment(self):
line = self.build_line(tl.CellAlignment.CENTER)
big0, small, big1 = line
# final location is always the top/left corner of the cell:
assert big0.final_location() == (0, 0)
assert small.final_location() == (3, -1)
assert big1.final_location() == (4, 0)
small.render()
result = small.renderer.result
# left, bottom, right, top
assert result[0] == "CELL(3.0, -2.0, 4.0, -1.0)"
def test_top_alignment(self):
line = self.build_line(tl.CellAlignment.TOP)
big0, small, big1 = line
# final location is always the top/left corner of the cell:
assert big0.final_location() == (0, 0)
assert small.final_location() == (3, 0)
assert big1.final_location() == (4, 0)
small.render()
result = small.renderer.result
# left, bottom, right, top
assert result[0] == "CELL(3.0, -1.0, 4.0, 0.0)"
def test_mixed_alignment(self):
big0 = tl.Text(width=3, height=3)
bottom = tl.Text(width=1, height=1, valign=tl.CellAlignment.BOTTOM)
center = tl.Text(width=1, height=1, valign=tl.CellAlignment.CENTER)
top = tl.Text(width=1, height=1, valign=tl.CellAlignment.TOP)
big1 = tl.Text(width=3, height=3)
line = tl.LeftLine(width=9)
for cell in [big0, top, center, bottom, big1]:
line.append(cell)
line.place(0, 0)
assert bottom.final_location() == (5, -2)
assert center.final_location() == (4, -1)
assert top.final_location() == (3, 0)
class StrokeRender(Rect):
def line(self, x1: float, y1: float, x2: float, y2: float, m=None) -> None:
length = x2 - x1
if y1 < -1:
location = "UNDERLINE"
elif y1 > 0:
location = "OVERLINE"
else:
location = "STRIKE_THROUGH"
self.result.append(f"{self.name}({location}, {length:.1f})")
class TestTextStrokeRendering:
@staticmethod
def render_text(stroke, result):
text = tl.Text(
width=3,
height=1,
stroke=stroke,
renderer=StrokeRender("STROKE", result),
)
text.place(0, 0)
tl.render_text_strokes([text])
@pytest.mark.parametrize(
"stroke,expected",
[
(tl.Stroke.UNDERLINE, "STROKE(UNDERLINE, 3.0)"),
(tl.Stroke.OVERLINE, "STROKE(OVERLINE, 3.0)"),
(tl.Stroke.STRIKE_THROUGH, "STROKE(STRIKE_THROUGH, 3.0)"),
],
)
def test_simple_stroke(self, stroke, expected):
result = []
self.render_text(stroke, result)
assert result[0] == expected
class TestTextContinueStroke:
@staticmethod
def make_text(stroke, result):
text = tl.Text(
width=3,
height=1,
stroke=stroke,
renderer=StrokeRender("STROKE", result),
)
text.place(0, 0)
return text
def test_continue_stroke_across_one_space(self):
result = []
word = self.make_text(tl.Stroke.UNDERLINE + tl.Stroke.CONTINUE, result)
space = tl.Space(width=0.5)
tl.render_text_strokes([word, space, word])
assert len(result) == 2
assert result[0] == "STROKE(UNDERLINE, 3.5)", "space should be included"
assert result[1] == "STROKE(UNDERLINE, 3.0)", "no following space"
def test_continue_stroke_across_multiple_spaces(self):
result = []
word = self.make_text(tl.Stroke.UNDERLINE + tl.Stroke.CONTINUE, result)
space = tl.Space(width=0.5)
nbsp = tl.NonBreakingSpace(width=0.5)
tl.render_text_strokes([word, space, nbsp, space, word])
assert len(result) == 2
assert (
result[0] == "STROKE(UNDERLINE, 4.5)"
), "3 spaces should be included"
assert result[1] == "STROKE(UNDERLINE, 3.0)", "no following spaces"
def test_do_not_continue_stroke_automatically(self):
result = []
word = self.make_text(tl.Stroke.UNDERLINE, result)
space = tl.Space(width=0.5)
tl.render_text_strokes([word, space, word])
assert len(result) == 2
assert result[0] == "STROKE(UNDERLINE, 3.0)", "do not continue stroke"
class TestFractionCell:
@staticmethod
def fraction(stacking, x, y):
result = []
a = tl.Text(1, 1, renderer=Rect("A", result))
b = tl.Text(1, 1, renderer=Rect("B", result))
fr = tl.Fraction(a, b, stacking, renderer=Rect("Fraction", result))
fr.place(x, y)
fr.render()
return result
def test_a_over_b(self):
# y = total height = (a.total_height + b.total_height) * HEIGHT_SCALE
result = self.fraction(
tl.Stacking.OVER, x=0, y=2 * tl.Fraction.HEIGHT_SCALE
)
assert len(result) == 2
assert result[0] == "A(0.0, 1.4, 1.0, 2.4)" # L, B, R, T
assert result[1] == "B(0.0, 0.0, 1.0, 1.0)" # L, B, R, T
def test_a_over_line_b(self):
# y = total height = (a.total_height + a.total_height) * HEIGHT_SCALE
result = self.fraction(
tl.Stacking.LINE, x=0, y=2 * tl.Fraction.HEIGHT_SCALE
)
assert len(result) == 3
assert result[0] == "A(0.0, 1.4, 1.0, 2.4)" # L, B, R, T
assert result[1] == "B(0.0, 0.0, 1.0, 1.0)" # L, B, R, T
assert result[2] == "LINE(0.0, 1.2)TO(1.0, 1.2)"
def test_a_slanted_b(self):
# y = total height = (a.total_height + a.total_height)
result = self.fraction(tl.Stacking.SLANTED, x=0, y=2)
assert len(result) == 3
assert result[0] == "A(0.0, 1.0, 1.0, 2.0)" # L, B, R, T
assert result[1] == "B(1.0, 0.0, 2.0, 1.0)" # L, B, R, T
assert result[2] == "LINE(0.0, 0.0)TO(2.0, 2.0)"
def str2cells(
s: str, content: float = 3, space: float = 0.5, tab: float = 0, result=None
):
# t ... text cell
# f ... fraction cell
# space is space
# ~ ... non breaking space (nbsp)
# # ... tabulator
if result is None:
result = []
for c in s.lower():
if c == "t":
yield tl.Text(
width=content, height=1, renderer=Rect("Text", result=result)
)
elif c == "f":
cell = tl.Text(content / 2, 1)
yield tl.Fraction(
top=cell,
bottom=cell,
stacking=tl.Stacking.SLANTED,
renderer=Rect("Fraction", result=result),
)
elif c == " ":
yield tl.Space(width=space)
elif c == "~":
yield tl.NonBreakingSpace(width=space)
elif c == "#":
yield tl.Tabulator(width=tab) # Tabulators do not need a width
else:
raise ValueError(f'unknown cell type "{c}"')
CELL2STR = {
tl.Text: "t",
tl.Fraction: "f",
tl.Space: " ",
tl.NonBreakingSpace: "~",
tl.Tabulator: "#",
}
def cells2str(cells: Iterable[tl.Cell]) -> str:
return "".join(CELL2STR[type(cell)] for cell in cells)
def lines2str(lines):
return [cells2str(line) for line in lines]
def test_cell_converter():
assert cells2str(str2cells("tf ~#")) == "tf ~#"
with pytest.raises(ValueError):
list(str2cells("x"))
with pytest.raises(KeyError):
cells2str([0])
class TestNormalizeCells:
@pytest.mark.parametrize("content", ["tt", "tf", "ft", "ff"])
def test_no_glue_between_content_raises_value_error(self, content):
cells = str2cells(content)
with pytest.raises(ValueError):
list(tl.normalize_cells(cells))
@pytest.mark.parametrize("content", ["t~f", "f~f", "f~t"])
def test_ignore_non_breaking_space_between_text_and_fraction(self, content):
cells = str2cells(content)
result = tl.normalize_cells(cells)
assert len(result) == 3
def test_ignore_pending_non_breaking_space(self):
cells = str2cells("t~t~")
result = tl.normalize_cells(cells)
assert len(result) == 3
@pytest.mark.parametrize("content", ["t~t", "t~~t", "t~~~t"])
def test_preserve_multiple_nbsp(self, content):
cells = tl.normalize_cells(str2cells(content))
assert cells2str(cells) == content
@pytest.mark.parametrize(
"content",
[
"t~ t",
"t ~t",
"t~~ t",
"t ~~t",
"~t",
"~~t",
"t#~t",
"t~#t",
"t~#~t",
],
)
def test_replace_useless_nbsp_by_spaces(self, content):
cells = tl.normalize_cells(str2cells(content))
assert cells2str(cells) == content.replace("~", " ")
@pytest.mark.parametrize("content", ["t t", "t t", "t t"])
def test_preserve_multiple_spaces(self, content):
cells = tl.normalize_cells(str2cells(content))
assert cells2str(cells) == content
def test_remove_pending_glue(self):
for glue in permutations([" ", "~", " ", "#"]):
content = "t" + "".join(glue)
cells = list(tl.normalize_cells(str2cells(content)))
assert cells2str(cells) == "t"
@pytest.mark.parametrize("content", [" t", " t", " t"])
def test_preserve_prepending_space(self, content):
cells = list(tl.normalize_cells(str2cells(content)))
assert cells2str(cells) == content
class TestSpace:
def test_shrink_space(self):
space = tl.Space(1, min_width=0.1)
space.resize(0.5)
assert space.total_width == 0.5
space.resize(0)
assert space.total_width == 0.1
def test_default_min_width(self):
space = tl.Space(1)
space.resize(0.5)
assert space.total_width == 1.0
def test_expand_restricted_space(self):
space = tl.Space(1, max_width=2)
space.resize(1.5)
assert space.total_width == 1.5
space.resize(3)
assert space.total_width == 2
def test_expand_unrestricted_space(self):
space = tl.Space(1)
space.resize(1.5)
assert space.total_width == 1.5
space.resize(30)
assert space.total_width == 30
def test_total_height_is_zero(self):
assert tl.Space(1).total_height == 0
def test_non_breaking_space_to_space(self):
space = tl.NonBreakingSpace(1).to_space()
assert type(space) == tl.Space
def test_can_shrink(self):
assert tl.Space(1).can_shrink is False
assert tl.Space(1, min_width=0.5).can_shrink is True
def test_can_grow(self):
assert tl.Space(1).can_grow is True
assert tl.Space(1, max_width=1.0).can_grow is False
class TestRigidConnection:
def test_rigid_connection(self):
cells = tl.normalize_cells(str2cells("t~t t t"))
result = tl.group_non_breakable_cells(cells)
assert isinstance(result[0], tl.RigidConnection)
assert isinstance(result[1], tl.Space)
assert isinstance(result[2], tl.Text)
assert isinstance(result[3], tl.Space)
assert isinstance(result[4], tl.Text)
@pytest.mark.parametrize("content", ["t~t", "t~t~t"])
def test_create_one_connection(self, content):
cells = tl.normalize_cells(str2cells(content))
result = tl.group_non_breakable_cells(cells)
assert len(result) == 1
@pytest.mark.parametrize("content", ["t~t t~t", "t~t~t t~t~t"])
def test_create_two_connections(self, content):
cells = tl.normalize_cells(str2cells(content))
result = tl.group_non_breakable_cells(cells)
assert len(result) == 3
assert isinstance(result[1], tl.Space)
def test_ignore_pending_non_breaking_space(self):
cells = tl.normalize_cells(str2cells("t~t~"))
result = tl.group_non_breakable_cells(cells)
assert len(result) == 1
def render_line_with_tabs(
line: tl.AbstractLine,
cells: Iterable[tl.Cell],
):
result = []
tab = None
tab_renderer = Rect("TAB-TEXT", result=result)
text_renderer = Rect("TEXT", result=result)
for cell in cells:
cell.renderer = text_renderer
if tab is not None:
cell.renderer = tab_renderer
line.append_with_tab(cell, tab)
tab = None
continue
if isinstance(cell, tl.Tabulator):
tab = cell
else:
tab = None
line.append(cell)
line.place(0, 0)
line.render()
return result
class TestLeftLine:
def test_setup(self):
line = tl.LeftLine(10)
assert line.line_width == 10
assert line.total_width == 0
def test_line_height_is_defined_by_max_content_height(self):
line = tl.LeftLine(10)
line.append(tl.Text(1, 1))
assert line.total_height == 1
line.append(tl.Text(1, 2))
assert line.total_height == 2
line.append(tl.Text(1, 3))
assert line.total_height == 3
def test_line_total_width_is_defined_by_content(self):
line = tl.LeftLine(10)
line.append(tl.Text(1, 1))
assert line.total_width == 1
line.append(tl.Text(1, 1))
assert line.total_width == 2
line.append(tl.Text(1, 1))
assert line.total_width == 3
def test_fill_until_line_is_full(self):
line = tl.LeftLine(10)
assert line.append(tl.Text(5, 1)) == tl.AppendType.SUCCESS
assert line.append(tl.Text(5, 1)) == tl.AppendType.SUCCESS
assert line.append(tl.Text(5, 1)) == tl.AppendType.FAIL
assert line.total_width <= line.line_width
def test_left_tab(self):
line = tl.LeftLine(
width=20,
tab_stops=[
tl.TabStop(6, tl.TabStopType.LEFT),
tl.TabStop(12, tl.TabStopType.LEFT),
],
)
cells = str2cells("t#t#t t", content=2, space=0.5)
result = render_line_with_tabs(line, cells)
assert result[0] == "TEXT(0.0, -1.0, 2.0, 0.0)"
assert result[1] == "TAB-TEXT(6.0, -1.0, 8.0, 0.0)"
assert result[2] == "TAB-TEXT(12.0, -1.0, 14.0, 0.0)"
assert result[3] == "TEXT(14.5, -1.0, 16.5, 0.0)"
def test_left_tab_without_tab_stops(self):
line = tl.LeftLine(20)
cells = str2cells("t#t", content=2, space=0.5, tab=0.5)
result = render_line_with_tabs(line, cells)
assert result[0] == "TEXT(0.0, -1.0, 2.0, 0.0)"
# replace tab by a space width = 0.5
assert result[1] == "TAB-TEXT(2.5, -1.0, 4.5, 0.0)"
def test_center_tab(self):
line = tl.LeftLine(
width=20,
tab_stops=[
tl.TabStop(6, tl.TabStopType.CENTER),
tl.TabStop(12, tl.TabStopType.CENTER),
],
)
cells = str2cells("t#t#t t", content=2, space=0.5)
result = render_line_with_tabs(line, cells)
assert result[0] == "TEXT(0.0, -1.0, 2.0, 0.0)"
assert result[1] == "TAB-TEXT(5.0, -1.0, 7.0, 0.0)"
assert result[2] == "TAB-TEXT(11.0, -1.0, 13.0, 0.0)"
assert result[3] == "TEXT(13.5, -1.0, 15.5, 0.0)"
def test_right_tab(self):
line = tl.LeftLine(
width=20,
tab_stops=[
tl.TabStop(6, tl.TabStopType.RIGHT),
tl.TabStop(12, tl.TabStopType.RIGHT),
],
)
cells = str2cells("t#t#t t", content=2, space=0.5)
result = render_line_with_tabs(line, cells)
assert result[0] == "TEXT(0.0, -1.0, 2.0, 0.0)"
assert result[1] == "TAB-TEXT(4.0, -1.0, 6.0, 0.0)"
assert result[2] == "TAB-TEXT(10.0, -1.0, 12.0, 0.0)"
assert result[3] == "TEXT(12.5, -1.0, 14.5, 0.0)"
def tab_stops(*values):
return [tl.TabStop(v) for v in values]
def test_shift_tab_stop_left_in_range():
result = tl.shift_tab_stops(tab_stops(4, 8), -2, 10)
assert result[0].pos == 2
assert result[1].pos == 6
def test_shift_tab_stops_beyond_left_border():
result = tl.shift_tab_stops(tab_stops(4, 8), -4, 10)
assert len(result) == 1, "tab stop at pos=0 should be removed"
assert result[0].pos == 4
def test_shift_tab_stops_beyond_right_border():
result = tl.shift_tab_stops(tab_stops(4, 8), 4, 8)
assert len(result) == 1
assert (
result[0].pos == 8
), "tab stop at the right border should be preserved"
def test_empty_paragraph():
cap_height = 2
p = tl.EmptyParagraph(cap_height, 1)
assert p.total_width == 0
assert p.total_height == cap_height
assert p.distance_to_next_paragraph > cap_height / 2
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_05_tools/test_528_difftags.py
```python
import pytest
from ezdxf.tools.difftags import diff_tags, OpCode, round_tags
from ezdxf.lldxf.tags import Tags, dxftag
A = dxftag(0, "TagA")
B = dxftag(0, "TagB")
C = dxftag(0, "TagC")
D = dxftag(0, "TagD")
def test_equal_string_tags():
a = Tags([A, B])
result = list(diff_tags(a, a))
assert result == [(OpCode.equal, 0, 2, 0, 2)]
def test_round_tags():
a = Tags([dxftag(40, 1.0001), dxftag(40, 2.0001)])
b = list(round_tags(a, ndigits=3))
assert b[0].value == 1.000
assert b[1].value == 2.000
def test_equal_rounded_float_tags():
a = Tags([dxftag(40, 1.0001), dxftag(40, 2.0001)])
b = Tags([dxftag(40, 1.0002), dxftag(40, 2.0002)])
result = list(diff_tags(a, b, ndigits=3))
assert result == [(OpCode.equal, 0, 2, 0, 2)]
def test_equal_vertex_tags():
a = Tags([A, dxftag(10, (1, 2, 3))])
result = list(diff_tags(a, a))
assert result == [(OpCode.equal, 0, 2, 0, 2)]
def test_prepend_tag():
a = Tags([A, B])
b = Tags([C, A, B])
result = list(diff_tags(a, b))
assert result == [
(OpCode.insert, 0, 0, 0, 1),
(OpCode.equal, 0, 2, 1, 3),
]
def test_insert_tag():
a = Tags([A, B])
b = Tags([A, C, B])
result = list(diff_tags(a, b))
assert result == [
(OpCode.equal, 0, 1, 0, 1),
(OpCode.insert, 1, 1, 1, 2),
(OpCode.equal, 1, 2, 2, 3),
]
def test_append_tag():
a = Tags([A, B])
b = Tags([A, B, C])
result = list(diff_tags(a, b))
assert result == [
(OpCode.equal, 0, 2, 0, 2),
(OpCode.insert, 2, 2, 2, 3),
]
def test_replace_last_tag():
a = Tags([A, B])
b = Tags([A, C])
result = list(diff_tags(a, b))
assert result == [
(OpCode.equal, 0, 1, 0, 1),
(OpCode.replace, 1, 2, 1, 2),
]
def test_replace_inner_tag():
a = Tags([A, B, C])
b = Tags([A, D, C])
result = list(diff_tags(a, b))
assert result == [
(OpCode.equal, 0, 1, 0, 1),
(OpCode.replace, 1, 2, 1, 2),
(OpCode.equal, 2, 3, 2, 3),
]
def test_delete_last_tag():
a = Tags([A, B, C])
b = Tags([A, B])
result = list(diff_tags(a, b))
assert result == [
(OpCode.equal, 0, 2, 0, 2),
(OpCode.delete, 2, 3, 2, 2),
]
def test_delete_inner_tag():
a = Tags([A, B, C])
b = Tags([A, C])
result = list(diff_tags(a, b))
assert result == [
(OpCode.equal, 0, 1, 0, 1),
(OpCode.delete, 1, 2, 1, 1),
(OpCode.equal, 2, 3, 1, 2),
]
if __name__ == '__main__':
pytest.main([__file__])
```
#### File: tests/test_06_math/test_604_banded_matrix.py
```python
from typing import Iterable
import pytest
import math
from ezdxf.math import (
Matrix,
detect_banded_matrix,
compact_banded_matrix,
BandedMatrixLU,
gauss_vector_solver,
banded_matrix,
)
BANDED_MATRIX = Matrix(
matrix=[
[3, 1, 0, 0, 0, 0, 0],
[4, 1, 5, 0, 0, 0, 0],
[9, 2, 6, 5, 0, 0, 0],
[0, 3, 5, 8, 9, 0, 0],
[0, 0, 7, 9, 3, 2, 0],
[0, 0, 0, 3, 8, 4, 6],
[0, 0, 0, 0, 2, 4, 4],
]
)
TRICKY = Matrix(
matrix=[
[3, 1, 0, 0, 0, 0, 1],
[4, 1, 5, 0, 0, 0, 0],
[9, 2, 6, 5, 0, 0, 0],
[0, 3, 5, 8, 9, 0, 0],
[0, 0, 7, 9, 3, 2, 0],
[0, 0, 0, 3, 8, 4, 6],
[0, 1, 0, 0, 2, 4, 4],
]
)
def are_close_vectors(
v1: Iterable[float], v2: Iterable[float], abs_tol: float = 1e-12
):
for i, j in zip(v1, v2):
assert math.isclose(i, j, abs_tol=abs_tol)
def test_detect_banded_matrix():
m1, m2 = detect_banded_matrix(BANDED_MATRIX)
assert (m1, m2) == (2, 1)
m1, m2 = detect_banded_matrix(TRICKY, check_all=False)
assert (m1, m2) == (2, 1)
m1, m2 = detect_banded_matrix(TRICKY, check_all=True)
assert (m1, m2) == (5, 6)
assert detect_banded_matrix(Matrix(shape=(10, 10))) == (0, 0)
identity = Matrix.identity(shape=(10, 10))
assert detect_banded_matrix(identity) == (0, 0)
def test_compact_banded_matrix():
m1, m2 = detect_banded_matrix(BANDED_MATRIX)
m = compact_banded_matrix(BANDED_MATRIX, m1, m2)
assert m.ncols == 4
assert m.nrows == 7
assert m.col(0) == [0, 0, 9, 3, 7, 3, 2]
assert m.col(1) == [0, 4, 2, 5, 9, 8, 4]
assert m.col(2) == [3, 1, 6, 8, 3, 4, 4]
assert m.col(3) == [1, 5, 5, 9, 2, 6, 0]
B1 = [5, 3, 2, 6, 8, 2, 1]
B2 = [9, 1, 7, 6, 4, 5, 0]
B3 = [0, 9, 3, 7, 1, 9, 9]
CHK1 = gauss_vector_solver(BANDED_MATRIX, B1)
CHK2 = gauss_vector_solver(BANDED_MATRIX, B2)
CHK3 = gauss_vector_solver(BANDED_MATRIX, B3)
def test_solve_banded_matrix_vector():
m, m1, m2 = banded_matrix(BANDED_MATRIX)
lu = BandedMatrixLU(m, m1, m2)
are_close_vectors(lu.solve_vector(B1), CHK1)
are_close_vectors(lu.solve_vector(B2), CHK2)
are_close_vectors(lu.solve_vector(B3), CHK3)
def test_solve_banded_matrix_matrix():
m, m1, m2 = banded_matrix(BANDED_MATRIX)
lu = BandedMatrixLU(m, m1, m2)
r = lu.solve_matrix(list(zip(B1, B2, B3)))
are_close_vectors(r.col(0), CHK1)
are_close_vectors(r.col(1), CHK2)
are_close_vectors(r.col(2), CHK3)
assert math.isclose(lu.determinant(), BANDED_MATRIX.determinant())
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_06_math/test_607_perlin_noise.py
```python
from ezdxf.math.perlin import snoise2, snoise3
def test_simplex_2d_range():
for i in range(-100, 100, 10):
x = i * 0.49
y = -i * 0.67
n = snoise2(x, y)
assert -1.0 <= n <= 1.0, (x, y, n)
def test_simplex_2d_octaves_range():
for i in range(-100, 100, 10):
for o in range(10):
x = -i * 0.49
y = i * 0.67
n = snoise2(x, y)
assert -1.0 <= n <= 1.0, (x, n)
def test_simplex_3d_range():
for i in range(-100, 100, 10):
x = i * 0.31
y = -i * 0.7
z = i * 0.19
n = snoise3(x, y, z)
assert -1.0 <= n <= 1.0, (x, y, z, n)
def test_simplex_3d_octaves_range():
for i in range(-100, 100, 10):
x = -i * 0.12
y = i * 0.55
z = i * 0.34
for o in range(10):
n = snoise3(x, y, z)
assert -1.0 <= n <= 1.0, (x, y, z, o + 1, n)
```
#### File: tests/test_06_math/test_610_ocs.py
```python
from ezdxf.math import OCS, Matrix44, Vec3
EXTRUSION = (0.7081979129501316, 0.0754851955385861, 0.7019670229772758)
def is_close_points(p1, p2, places=6):
for v1, v2 in zip(p1, p2):
if not round(v1, places) == round(v2, places):
return False
return True
def test_wcs_to_ocs():
ocs = OCS(EXTRUSION)
assert is_close_points(
ocs.from_wcs((-9.56460754, 8.44764172, 9.97894327)),
(9.41378764657076, 13.15481838975576, 0.8689258932616031),
places=6,
)
assert is_close_points(
ocs.from_wcs((-1.60085321, 9.29648008, 1.85322122)),
(9.41378764657076, 1.745643639268379, 0.8689258932616031),
places=6,
)
assert is_close_points(
ocs.from_wcs((-3.56027455, 9.08762984, 3.85249348)),
(9.41378764657076, 4.552784531093068, 0.8689258932616031),
places=6,
)
assert is_close_points(
ocs.from_wcs((-5.53851623, 8.87677359, 5.87096886)),
(9.41378764657076, 7.386888158025531, 0.8689258932616031),
places=6,
)
def test_ocs_to_wcs():
ocs = OCS(EXTRUSION)
wcs = ocs.to_wcs((9.41378764657076, 13.15481838975576, 0.8689258932616031))
assert is_close_points(
wcs,
(-9.56460754, 8.44764172, 9.97894327),
places=6,
)
assert is_close_points(
ocs.to_wcs((9.41378764657076, 1.745643639268379, 0.8689258932616031)),
(-1.60085321, 9.29648008, 1.85322122),
places=6,
)
assert is_close_points(
ocs.to_wcs((9.41378764657076, 4.552784531093068, 0.8689258932616031)),
(-3.56027455, 9.08762984, 3.85249348),
places=6,
)
assert is_close_points(
ocs.to_wcs((9.41378764657076, 7.386888158025531, 0.8689258932616031)),
(-5.53851623, 8.87677359, 5.87096886),
places=6,
)
def test_matrix44_to_ocs():
ocs = OCS(EXTRUSION)
matrix = Matrix44.ucs(ocs.ux, ocs.uy, ocs.uz)
assert is_close_points(
matrix.ocs_from_wcs(Vec3(-9.56460754, 8.44764172, 9.97894327)),
(9.41378764657076, 13.15481838975576, 0.8689258932616031),
places=6,
)
def test_matrix44_to_wcs():
ocs = OCS(EXTRUSION)
matrix = Matrix44.ucs(ocs.ux, ocs.uy, ocs.uz)
assert is_close_points(
matrix.ocs_to_wcs(
Vec3(9.41378764657076, 13.15481838975576, 0.8689258932616031)
),
(-9.56460754, 8.44764172, 9.97894327),
places=6,
)
```
#### File: tests/test_06_math/test_616_plane.py
```python
import pytest
from ezdxf.math import Plane, Vec3
def test_init():
p = Plane(Vec3(1, 0, 0), 5)
assert p.normal == (1, 0, 0)
assert p.distance_from_origin == 5
def test_init_form_3p():
p = Plane.from_3p(Vec3(5, 0, 0), Vec3(5, 1, 5), Vec3(5, 0, 1))
assert p.normal == (1, 0, 0)
assert p.distance_from_origin == 5
def test_equal():
p1 = Plane.from_vector((5, 0, 0))
p2 = Plane.from_vector((5, 0, 0))
assert p1 is not p2
assert p1 == p1
def test_init_form_vector():
p = Plane.from_vector((5, 0, 0))
assert p.normal == (1, 0, 0)
assert p.distance_from_origin == 5
with pytest.raises(ZeroDivisionError):
Plane.from_vector((0, 0, 0))
def test_signed_distance_to():
p = Plane.from_vector((5, 0, 0))
assert p.signed_distance_to(Vec3(10, 0, 0)) == 5
assert p.signed_distance_to(Vec3(0, 0, 0)) == -5
def test_distance_to():
p = Plane.from_vector((5, 0, 0))
assert p.distance_to(Vec3(10, 0, 0)) == 5
assert p.distance_to(Vec3(0, 0, 0)) == 5
def test_is_coplanar():
p = Plane.from_vector((5, 0, 0))
assert p.is_coplanar_vertex(Vec3(5, 5, 0)) is True
assert p.is_coplanar_vertex(Vec3(5, 0, 5)) is True
def test_is_coplanar_plane():
p1 = Plane.from_vector((5, 0, 0))
p2 = Plane.from_vector((-1, 0, 0))
assert p1.is_coplanar_plane(p2) is True
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_06_math/test_630b_bezier4p_functions.py
```python
import pytest
import random
from ezdxf.math import (
cubic_bezier_interpolation,
Vec3,
Bezier3P,
quadratic_to_cubic_bezier,
Bezier4P,
have_bezier_curves_g1_continuity,
bezier_to_bspline,
)
def test_vertex_interpolation():
points = [(0, 0), (3, 1), (5, 3), (0, 8)]
result = list(cubic_bezier_interpolation(points))
assert len(result) == 3
c1, c2, c3 = result
p = c1.control_points
assert p[0].isclose((0, 0))
assert p[1].isclose((0.9333333333333331, 0.3111111111111111))
assert p[2].isclose((1.8666666666666663, 0.6222222222222222))
assert p[3].isclose((3, 1))
p = c2.control_points
assert p[0].isclose((3, 1))
assert p[1].isclose((4.133333333333334, 1.3777777777777778))
assert p[2].isclose((5.466666666666667, 1.822222222222222))
assert p[3].isclose((5, 3))
p = c3.control_points
assert p[0].isclose((5, 3))
assert p[1].isclose((4.533333333333333, 4.177777777777778))
assert p[2].isclose((2.2666666666666666, 6.088888888888889))
assert p[3].isclose((0, 8))
def test_quadratic_to_cubic_bezier():
r = random.Random(0)
def random_vec() -> Vec3:
return Vec3(r.uniform(-10, 10), r.uniform(-10, 10), r.uniform(-10, 10))
for i in range(1000):
quadratic = Bezier3P((random_vec(), random_vec(), random_vec()))
quadratic_approx = list(quadratic.approximate(10))
cubic = quadratic_to_cubic_bezier(quadratic)
cubic_approx = list(cubic.approximate(10))
assert len(quadratic_approx) == len(cubic_approx)
for p1, p2 in zip(quadratic_approx, cubic_approx):
assert p1.isclose(p2)
# G1 continuity: normalized end-tangent == normalized start-tangent of next curve
B1 = Bezier4P([(0, 0), (1, 1), (2, 1), (3, 0)])
# B1/B2 has G1 continuity:
B2 = Bezier4P([(3, 0), (4, -1), (5, -1), (6, 0)])
# B1/B3 has no G1 continuity:
B3 = Bezier4P([(3, 0), (4, 1), (5, 1), (6, 0)])
# B1/B4 G1 continuity off tolerance:
B4 = Bezier4P([(3, 0), (4, -1.03), (5, -1.0), (6, 0)])
# B1/B5 has a gap between B1 end and B5 start:
B5 = Bezier4P([(4, 0), (5, -1), (6, -1), (7, 0)])
def test_g1_continuity_for_bezier_curves():
assert have_bezier_curves_g1_continuity(B1, B2) is True
assert have_bezier_curves_g1_continuity(B1, B3) is False
assert (
have_bezier_curves_g1_continuity(B1, B4, g1_tol=1e-4) is False
), "should be outside of tolerance "
assert (
have_bezier_curves_g1_continuity(B1, B5) is False
), "end- and start point should match"
D1 = Bezier4P([(0, 0), (1, 1), (3, 0), (3, 0)])
D2 = Bezier4P([(3, 0), (3, 0), (5, -1), (6, 0)])
def test_g1_continuity_for_degenerated_bezier_curves():
assert have_bezier_curves_g1_continuity(D1, B2) is False
assert have_bezier_curves_g1_continuity(B1, D2) is False
assert have_bezier_curves_g1_continuity(D1, D2) is False
@pytest.mark.parametrize("curve", [D1, D2])
def test_flatten_degenerated_bezier_curves(curve):
# Degenerated Bezier curves behave like regular curves!
assert len(list(curve.flattening(0.1))) > 4
@pytest.mark.parametrize(
"b1,b2",
[
(B1, B2), # G1 continuity, the common case
(B1, B3), # without G1 continuity is also a regular B-spline
(B1, B5), # regular B-spline, but first control point of B5 is lost
],
ids=["G1", "without G1", "gap"],
)
def test_bezier_curves_to_bspline(b1, b2):
bspline = bezier_to_bspline([b1, b2])
# Remove duplicate control point between two adjacent curves:
expected = list(b1.control_points) + list(b2.control_points)[1:]
assert bspline.degree == 3, "should be a cubic B-spline"
assert bspline.control_points == tuple(expected)
def test_quality_of_bezier_to_bspline_conversion_1():
# This test shows the close relationship between cubic Bézier- and
# cubic B-spline curves.
points0 = B1.approximate(10)
points1 = bezier_to_bspline([B1]).approximate(10)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_quality_of_bezier_to_bspline_conversion_2():
# This test shows the close relationship between cubic Bézier- and
# cubic B-spline curves.
# Remove duplicate point between the two curves:
points0 = list(B1.approximate(10)) + list(B2.approximate(10))[1:]
points1 = bezier_to_bspline([B1, B2]).approximate(20)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_bezier_curves_to_bspline_error():
with pytest.raises(ValueError):
bezier_to_bspline([]) # one or more curves expected
```
#### File: tests/test_07_render/test_714_mleader_render_engine.py
```python
import pytest
import ezdxf
from ezdxf.math import Vec2
from ezdxf.render import mleader
from ezdxf.entities import MText, MultiLeader, Insert
@pytest.fixture(scope="module")
def doc():
return ezdxf.new()
def make_multi_leader(doc) -> MultiLeader:
style = doc.mleader_styles.get("Standard")
ml = MultiLeader.new(doc=doc)
ml.dxf.style_handle = style.dxf.handle
return ml
class TestRenderEngine:
"""The RenderEngine renders DXF primitives from a MULTILEADER entity."""
@pytest.fixture
def ml_mtext(self, doc):
ml = make_multi_leader(doc)
builder = mleader.MultiLeaderMTextBuilder(ml)
builder.set_content("line")
builder.build(insert=Vec2(0, 0))
return ml
def test_add_mtext_content(self, ml_mtext):
engine = mleader.RenderEngine(ml_mtext, ml_mtext.doc)
engine.add_mtext_content()
assert isinstance(engine.entities[0], MText)
if __name__ == "__main__":
pytest.main([__file__])
```
#### File: tests/test_10_issues/test_issue_557_reload_dimension_text_style.py
```python
import pytest
import ezdxf
from ezdxf.lldxf.tagwriter import TagCollector, Tags
TEXTSTYLE_NAME = "TextStyle"
DIMSTYLE_NAME = "DimensionStyle"
FONT_NAME = "any_font.shx"
@pytest.fixture
def doc():
doc = ezdxf.new("R2010", setup=False)
doc.styles.new(TEXTSTYLE_NAME, dxfattribs={"font": FONT_NAME})
doc.dimstyles.new(DIMSTYLE_NAME, dxfattribs={"dimtxsty": TEXTSTYLE_NAME})
return doc
def test_export_dimtxsty(doc):
dimstyle = doc.dimstyles.get(DIMSTYLE_NAME)
style = doc.styles.get(TEXTSTYLE_NAME)
t = TagCollector()
dimstyle.export_dxf(t)
tags = Tags(t.tags)
dimtxsty_handle = tags.get_first_value(340)
assert style.dxf.handle == dimtxsty_handle
def test_reload_dimtxsty(doc, tmpdir):
filename = tmpdir.join("dim_text_style.dxf")
doc.saveas(filename)
# reload file
doc2 = ezdxf.readfile(filename)
style = doc2.styles.get(TEXTSTYLE_NAME)
assert style.dxf.font == FONT_NAME
dimstyle = doc2.dimstyles.get(DIMSTYLE_NAME)
assert dimstyle.dxf.dimtxsty == TEXTSTYLE_NAME
if __name__ == "__main__":
pytest.main([__file__])
``` |
{
"source": "jkjung-avt/caffe_merge_batchnorm",
"score": 2
} |
#### File: jkjung-avt/caffe_merge_batchnorm/merge_bn.py
```python
import os
import sys
import argparse
import logging
import numpy as np
try:
caffe_root = os.environ['HOME'] + '/caffe/'
sys.path.insert(0, caffe_root + 'python')
import caffe
except ImportError:
logging.fatal("Cannot import caffe!")
from caffe.proto import caffe_pb2
from google.protobuf import text_format
# Global variables
bn_maps = {}
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, required=True,
help='.prototxt file')
parser.add_argument('--weights', type=str, required=True,
help='.caffemodel file')
return parser
def find_top_after_bn(layers, name, top):
bn_maps[name] = {}
for l in layers:
if len(l.bottom) == 0:
continue
if l.bottom[0] == top and l.type == "BatchNorm":
bn_maps[name]["bn"] = l.name
top = l.top[0]
if l.bottom[0] == top and l.type == "Scale":
bn_maps[name]["scale"] = l.name
top = l.top[0]
return top
def pre_process(expected_proto, new_proto):
net_specs = caffe_pb2.NetParameter()
net_specs2 = caffe_pb2.NetParameter()
with open(expected_proto, "r") as fp:
text_format.Merge(str(fp.read()), net_specs)
net_specs2.MergeFrom(net_specs)
layers = net_specs.layer
num_layers = len(layers)
for i in range(num_layers - 1, -1, -1):
del net_specs2.layer[i]
for idx in range(num_layers):
l = layers[idx]
if l.type == "BatchNorm" or l.type == "Scale":
continue
elif l.type == "Convolution" or l.type == "Deconvolution":
top = find_top_after_bn(layers, l.name, l.top[0])
bn_maps[l.name]["type"] = l.type
layer = net_specs2.layer.add()
layer.MergeFrom(l)
layer.top[0] = top
layer.convolution_param.bias_term = True
else:
layer = net_specs2.layer.add()
layer.MergeFrom(l)
with open(new_proto, "w") as fp:
fp.write("{}".format(net_specs2))
def load_weights(net, nobn):
if sys.version_info > (3, 0):
listKeys = nobn.params.keys()
else:
listKeys = nobn.params.iterkeys()
for key in listKeys:
if type(nobn.params[key]) is caffe._caffe.BlobVec:
conv = net.params[key]
if key not in bn_maps or "bn" not in bn_maps[key]:
for i, w in enumerate(conv):
nobn.params[key][i].data[...] = w.data
else:
print(key)
bn = net.params[bn_maps[key]["bn"]]
scale = net.params[bn_maps[key]["scale"]]
wt = conv[0].data
channels = 0
if bn_maps[key]["type"] == "Convolution":
channels = wt.shape[0]
elif bn_maps[key]["type"] == "Deconvolution":
channels = wt.shape[1]
else:
print("error type " + bn_maps[key]["type"])
exit(-1)
bias = np.zeros(channels)
if len(conv) > 1:
bias = conv[1].data
mean = bn[0].data
var = bn[1].data
scalef = bn[2].data
scales = scale[0].data
shift = scale[1].data
if scalef != 0:
scalef = 1. / scalef
mean = mean * scalef
var = var * scalef
rstd = 1. / np.sqrt(var + 1e-5)
if bn_maps[key]["type"] == "Convolution":
rstd1 = rstd.reshape((channels, 1, 1, 1))
scales1 = scales.reshape((channels, 1, 1, 1))
wt = wt * rstd1 * scales1
else:
rstd1 = rstd.reshape((1, channels, 1, 1))
scales1 = scales.reshape((1, channels, 1, 1))
wt = wt * rstd1 * scales1
bias = (bias - mean) * rstd * scales + shift
nobn.params[key][0].data[...] = wt
nobn.params[key][1].data[...] = bias
if __name__ == '__main__':
parser1 = make_parser()
args = parser1.parse_args()
out = args.model.split('.')[0]
pre_process(args.model, out + '_no_bn.prototxt')
net = caffe.Net(args.model, args.weights, caffe.TEST)
net2 = caffe.Net(out + '_no_bn.prototxt', caffe.TEST)
load_weights(net, net2)
net2.save(out + '_no_bn.caffemodel')
``` |
{
"source": "jkjung-avt/DeepLens_Notifier",
"score": 2
} |
#### File: jkjung-avt/DeepLens_Notifier/agent.py
```python
import os
import sys
import logging
import logging as log
import cv2
from openvino.inference_engine import IENetwork, IEPlugin
from line_notify import send_message
LOG_FILE = os.environ['HOME'] + '/deeplens_agent.log'
MODEL = os.environ['HOME'] + \
'/models/openvino/googlenet_fc_coco_SSD_300x300/FP16/deploy.xml'
DEVICE = 'GPU'
DETECT_CLASS = (1,) # COCO class 1: 'person'
CONF_THRESHOLD = 0.35
VIDEO_IN = '/opt/awscam/out/ch2_out.mjpeg'
IMG_W = 1280
IMG_H = 720
DO_IMSHOW = False
TMP_IMG = '/tmp/deeplens_agent.jpg'
LINE_TOKEN = os.environ['LINE_TOKEN']
EVENT_AVERAGE = 0.0
EVENT_TRIGGERED = True
def check_notify(detected, frame):
"""Check whether to send a notification based on detection status"""
global EVENT_AVERAGE, EVENT_TRIGGERED
EVENT_AVERAGE = EVENT_AVERAGE * 0.97 + float(detected) * 0.03
if EVENT_AVERAGE >= 0.8 and not EVENT_TRIGGERED:
log.info('Event triggered!')
EVENT_TRIGGERED = True
cv2.imwrite(TMP_IMG, frame)
status = send_message(LINE_TOKEN,
'D5D01 meeting room is occupied.',
TMP_IMG)
log.info('HTTP request status = {}'.format(status))
if EVENT_AVERAGE < 0.2 and EVENT_TRIGGERED:
log.info('Event relieved.')
EVENT_TRIGGERED = False
status = send_message(LINE_TOKEN,
'D5D01 meeting room is empty now...')
log.info('HTTP request status = {}'.format(status))
def main():
log.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s',
filename=LOG_FILE)
model_xml = MODEL
model_bin = os.path.splitext(model_xml)[0] + '.bin'
# Plugin initialization
log.info('Initializing plugin for {} device...'.format(DEVICE))
plugin = IEPlugin(device=DEVICE, plugin_dirs='')
# Read IR
log.info('Reading IR...')
net = IENetwork(model=model_xml, weights=model_bin)
assert plugin.device != 'CPU'
assert len(net.inputs.keys()) == 1
assert len(net.outputs) == 1
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
log.info('Loading IR to the plugin...')
exec_net = plugin.load(network=net, num_requests=2)
# Read and pre-process input image
n, c, h, w = net.inputs[input_blob].shape
del net
cap = cv2.VideoCapture(VIDEO_IN)
cur_request_id = 0
next_request_id = 1
log.info("Starting inference in async mode...")
log.info("To stop the demo execution press Esc button")
initial_w = IMG_W
initial_h = IMG_H
ret, frame = cap.read()
if not ret:
sys.exit('No input frame!')
frame = cv2.resize(frame, (IMG_W, IMG_H))
while cap.isOpened():
ret, next_frame = cap.read()
if not ret:
break
next_frame = cv2.resize(next_frame, (IMG_W, IMG_H))
# Main sync point:
# in the Async mode we start the NEXT infer request, while
# waiting for the CURRENT to complete
in_frame = cv2.resize(next_frame, (w, h))
in_frame = in_frame.transpose((2, 0, 1)) # HWC to CHW
in_frame = in_frame.reshape((n, c, h, w))
exec_net.start_async(request_id=next_request_id,
inputs={input_blob: in_frame})
if exec_net.requests[cur_request_id].wait(-1) == 0:
# Parse detection results of the current request
res = exec_net.requests[cur_request_id].outputs[out_blob]
event_detected = 0
for obj in res[0][0]:
if int(obj[1]) in DETECT_CLASS and obj[2] > CONF_THRESHOLD:
event_detected = 1
xmin = int(obj[3] * initial_w)
ymin = int(obj[4] * initial_h)
xmax = int(obj[5] * initial_w)
ymax = int(obj[6] * initial_h)
# Draw bounding box
color = (0, 255, 0)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
check_notify(event_detected, frame)
if DO_IMSHOW:
cv2.imshow("Detection Results", frame)
if cv2.waitKey(1) == 27:
break
cur_request_id, next_request_id = next_request_id, cur_request_id
frame = next_frame
cv2.destroyAllWindows()
del exec_net
del plugin
if __name__ == '__main__':
main()
``` |
{
"source": "jkjung-avt/keras-yolo3",
"score": 3
} |
#### File: jkjung-avt/keras-yolo3/yolo_test.py
```python
import os
import sys
import argparse
from yolo import YOLO, detect_video
from PIL import Image
FLAGS = None
TEST_DIR = 'open-images-dataset/kaggle-2018-object-detection/test_challenge_2018'
OUTPUT_CSV = 'submit/output.csv'
def detect_img(yolo, img_path):
try:
image = Image.open(img_path)
except:
sys.exit('Cannot open image file: {}'.format(img_path))
else:
r_image = yolo.detect_image(image)
r_image.show()
def detect_test_imgs(yolo):
global FLAGS
jpgs = [f for f in os.listdir(TEST_DIR) if f.endswith('.jpg')]
if FLAGS.shuffle:
from random import shuffle
shuffle(jpgs)
for jpg in jpgs:
img_path = os.path.join(TEST_DIR, jpg)
detect_img(yolo, img_path)
str_in = input('{}, <ENTER> for next or "q" to quit: '.format(img_path))
if str_in.lower() == 'q':
break
yolo.close_session()
def infer_img(yolo, img_path):
try:
image = Image.open(img_path)
except:
#sys.exit('Cannot open image file: {}'.format(img_path))
print('!!! Cannot open image file: {}'.format(img_path))
return []
else:
return yolo.infer_image(image)
def submit_test_imgs(yolo):
jpgs = [f for f in os.listdir(TEST_DIR) if f.endswith('.jpg')]
os.makedirs(os.path.split(OUTPUT_CSV)[0], exist_ok=True)
with open(OUTPUT_CSV, 'w') as f:
f.write('ImageId,PredictionString\n')
for jpg in jpgs:
print(jpg)
img_path = os.path.join(TEST_DIR, jpg)
boxes = infer_img(yolo, img_path)
f.write('{},'.format(os.path.splitext(jpg)[0]))
# 1 record: [label, confidence, x_min, y_min, x_max, y_max]
box_strings = ['{:s} {:.5f} {:.5f} {:.5f} {:.5f} {:.5f}'.format(b[0], b[1], b[2], b[3], b[4], b[5]) for b in boxes]
if box_strings:
f.write(' '.join(box_strings))
f.write('\n')
yolo.close_session()
if __name__ == '__main__':
# class YOLO defines the default value, so suppress any default here
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
'''
Command line options
'''
parser.add_argument(
'--model', type=str,
help='path to model weight file, default ' + YOLO.get_defaults("model_path")
)
parser.add_argument(
'--score', type=float,
help='score (confidence) threshold, default ' + str(YOLO.get_defaults("score"))
)
parser.add_argument(
'--shuffle', default=False, action="store_true",
help='shuffle images for display mode'
)
parser.add_argument(
'--display', default=False, action="store_true",
help='display mode, to show inferred images with bounding box overlays'
)
parser.add_argument(
'--submit', default=False, action="store_true",
help='submit mode, to generate "output.csv" for Kaggle submission'
)
FLAGS = parser.parse_args()
if FLAGS.display:
print("Display mode")
detect_test_imgs(YOLO(**vars(FLAGS)))
elif FLAGS.submit:
print("Submit mode: writing to output.csv")
submit_test_imgs(YOLO(**vars(FLAGS)))
else:
print("Please specify either Display or Submit mode.")
``` |
{
"source": "jkjung-avt/py-faster-rcnn",
"score": 3
} |
#### File: lib/datasets/factory.py
```python
__sets = {}
from datasets.pascal_voc import pascal_voc
from datasets.coco import coco
from datasets.vehicles import vehicles
from datasets.brainwash import brainwash
from datasets.fisheries import fisheries
import numpy as np
import os.path as osp
# Set up voc_<year>_<split> using selective search "fast" mode
for year in ['2007', '2012']:
for split in ['train', 'val', 'trainval', 'test']:
name = 'voc_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: pascal_voc(split, year))
# Set up coco_2014_<split>
for year in ['2014']:
for split in ['train', 'val', 'minival', 'valminusminival']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up coco_2015_<split>
for year in ['2015']:
for split in ['test', 'test-dev']:
name = 'coco_{}_{}'.format(year, split)
__sets[name] = (lambda split=split, year=year: coco(split, year))
# Set up brainwash_<split>
frcn_root = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
for split in ['train', 'val']:
name = 'brainwash_{}'.format(split)
__sets[name] = (lambda split=split: brainwash(split))
# Set up fisheries_<split>
fisheries_devkit_path = osp.join(frcn_root, 'data/Kaggle_Fisheries')
for split in ['train', 'val']:
name = 'fisheries_{}'.format(split)
__sets[name] = (lambda split=split: fisheries(split, fisheries_devkit_path))
# Set up vehicles_<split>
for split in ['train', 'test']:
name = 'vehicles_{}'.format(split)
__sets[name] = (lambda split=split: vehicles(split))
def get_imdb(name):
"""Get an imdb (image database) by name."""
if not __sets.has_key(name):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
"""List all registered imdbs."""
return __sets.keys()
``` |
{
"source": "JKK1/nTime-Based-Startum-Proxy",
"score": 2
} |
#### File: JKK1/nTime-Based-Startum-Proxy/stratum.py
```python
import threading,time
import pools
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
from src import ScryptProxy
#from src import Sha256Proxy
#from src import X11Proxy
#from src import EthashProxy
class Stratum(threading.Thread):
def __init__(self):
self.pools=pools.pools
self.isAlive=True
self.stratums={}
self.stratums["scrypt"]=[ScryptProxy.ScryptStratum(self.pools["scrypt"][0], 3333, nonceBytes=1),self.pools["scrypt"][0]]
threading.Thread.__init__(self)
self.start()
def shutdown(self):
self.isAlive = False
def update(self):
for algorithm in self.pools:
if algorithm not in self.stratums:
continue
maximum = 0
maximumProfitability = -1
x=0
for pool in self.pools[algorithm]:
method = pool["profitability"]
profitability = method()
if profitability>maximumProfitability:
maximum = x
maximumProfitability = profitability
x+=1
if self.stratums[algorithm][1] != self.pools[algorithm][maximum]:
if maximumProfitability>self.stratums[algorithm][1]["profitability"]()*1.03:
if self.stratums[algorithm][0].readyToChange():
self.stratums[algorithm][0].changePool(self.pools[algorithm][maximum])
self.stratums[algorithm][1] = self.pools[algorithm][maximum]
logging.info("new best pool for {}: {}".format(algorithm,self.pools[algorithm][maximum]["url"]))
def run(self):
while self.isAlive:
try:
self.update()
time.sleep(0.03)
except:
raise
self.isAlive = False
def getShares(self):
ret = {}
for algo in self.stratums:
shares = self.stratums[algo][0].getShares()
ret[algo]={}
for worker in shares:
if worker not in ret[algo]:
ret[algo][worker]=shares[worker]
else:
ret[algo][worker][0]+=shares[worker][0]
ret[algo][worker][1]+=shares[worker][1]
return ret
def killOrder(self):
usernames = []
for proxy in self.stratums["scrypt"][0].proxies:
for miner in proxy.miners:
if miner.username in usernames:
miner.kill()
else:
usernames.append(miner.username)
stratum = None
def start():
global stratum
stratum = Stratum()
``` |
{
"source": "jkk544/tor",
"score": 2
} |
#### File: scripts/maint/updateFallbackDirs.py
```python
import StringIO
import string
import re
import datetime
import gzip
import os.path
import json
import math
import sys
import urllib
import urllib2
import hashlib
import dateutil.parser
# bson_lazy provides bson
#from bson import json_util
import copy
import re
from stem.descriptor import DocumentHandler
from stem.descriptor.remote import get_consensus, get_server_descriptors, MAX_FINGERPRINTS
import logging
logging.root.name = ''
HAVE_IPADDRESS = False
try:
# python 3 builtin, or install package py2-ipaddress
# there are several ipaddress implementations for python 2
# with slightly different semantics with str typed text
# fortunately, all our IP addresses are in unicode
import ipaddress
HAVE_IPADDRESS = True
except ImportError:
# if this happens, we avoid doing netblock analysis
logging.warning('Unable to import ipaddress, please install py2-ipaddress.' +
' A fallback list will be created, but optional netblock' +
' analysis will not be performed.')
## Top-Level Configuration
# We use semantic versioning: https://semver.org
# In particular:
# * major changes include removing a mandatory field, or anything else that
# would break an appropriately tolerant parser,
# * minor changes include adding a field,
# * patch changes include changing header comments or other unstructured
# content
FALLBACK_FORMAT_VERSION = '2.0.0'
SECTION_SEPARATOR_BASE = '====='
SECTION_SEPARATOR_COMMENT = '/* ' + SECTION_SEPARATOR_BASE + ' */'
# Output all candidate fallbacks, or only output selected fallbacks?
OUTPUT_CANDIDATES = False
# Perform DirPort checks over IPv4?
# Change this to False if IPv4 doesn't work for you, or if you don't want to
# download a consensus for each fallback
# Don't check ~1000 candidates when OUTPUT_CANDIDATES is True
PERFORM_IPV4_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else True
# Perform DirPort checks over IPv6?
# If you know IPv6 works for you, set this to True
# This will exclude IPv6 relays without an IPv6 DirPort configured
# So it's best left at False until #18394 is implemented
# Don't check ~1000 candidates when OUTPUT_CANDIDATES is True
PERFORM_IPV6_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else False
# Must relays be running now?
MUST_BE_RUNNING_NOW = (PERFORM_IPV4_DIRPORT_CHECKS
or PERFORM_IPV6_DIRPORT_CHECKS)
# Clients have been using microdesc consensuses by default for a while now
DOWNLOAD_MICRODESC_CONSENSUS = True
# If a relay delivers an expired consensus, if it expired less than this many
# seconds ago, we still allow the relay. This should never be less than -90,
# as all directory mirrors should have downloaded a consensus 90 minutes
# before it expires. It should never be more than 24 hours, because clients
# reject consensuses that are older than REASONABLY_LIVE_TIME.
# For the consensus expiry check to be accurate, the machine running this
# script needs an accurate clock.
#
# Relays on 0.3.0 and later return a 404 when they are about to serve an
# expired consensus. This makes them fail the download check.
# We use a tolerance of 0, so that 0.2.x series relays also fail the download
# check if they serve an expired consensus.
CONSENSUS_EXPIRY_TOLERANCE = 0
# Output fallback name, flags, bandwidth, and ContactInfo in a C comment?
OUTPUT_COMMENTS = True if OUTPUT_CANDIDATES else False
# Output matching ContactInfo in fallbacks list or the blacklist?
# Useful if you're trying to contact operators
CONTACT_COUNT = True if OUTPUT_CANDIDATES else False
CONTACT_BLACKLIST_COUNT = True if OUTPUT_CANDIDATES else False
# How the list should be sorted:
# fingerprint: is useful for stable diffs of fallback lists
# measured_bandwidth: is useful when pruning the list based on bandwidth
# contact: is useful for contacting operators once the list has been pruned
OUTPUT_SORT_FIELD = 'contact' if OUTPUT_CANDIDATES else 'fingerprint'
## OnionOO Settings
ONIONOO = 'https://onionoo.torproject.org/'
#ONIONOO = 'https://onionoo.thecthulhu.com/'
# Don't bother going out to the Internet, just use the files available locally,
# even if they're very old
LOCAL_FILES_ONLY = False
## Whitelist / Blacklist Filter Settings
# The whitelist contains entries that are included if all attributes match
# (IPv4, dirport, orport, id, and optionally IPv6 and IPv6 orport)
# The blacklist contains (partial) entries that are excluded if any
# sufficiently specific group of attributes matches:
# IPv4 & DirPort
# IPv4 & ORPort
# ID
# IPv6 & DirPort
# IPv6 & IPv6 ORPort
# If neither port is included in the blacklist, the entire IP address is
# blacklisted.
# What happens to entries in neither list?
# When True, they are included, when False, they are excluded
INCLUDE_UNLISTED_ENTRIES = True if OUTPUT_CANDIDATES else False
# If an entry is in both lists, what happens?
# When True, it is excluded, when False, it is included
BLACKLIST_EXCLUDES_WHITELIST_ENTRIES = True
WHITELIST_FILE_NAME = 'scripts/maint/fallback.whitelist'
BLACKLIST_FILE_NAME = 'scripts/maint/fallback.blacklist'
FALLBACK_FILE_NAME = 'src/or/fallback_dirs.inc'
# The number of bytes we'll read from a filter file before giving up
MAX_LIST_FILE_SIZE = 1024 * 1024
## Eligibility Settings
# Require fallbacks to have the same address and port for a set amount of time
# We used to have this at 1 week, but that caused many fallback failures, which
# meant that we had to rebuild the list more often. We want fallbacks to be
# stable for 2 years, so we set it to a few months.
#
# There was a bug in Tor 0.2.8.1-alpha and earlier where a relay temporarily
# submits a 0 DirPort when restarted.
# This causes OnionOO to (correctly) reset its stability timer.
# Affected relays should upgrade to Tor 0.2.9 or later, which has a fix
# for this issue.
#
# If a relay changes address or port, that's it, it's not useful any more,
# because clients can't find it
ADDRESS_AND_PORT_STABLE_DAYS = 90
# We ignore relays that have been down for more than this period
MAX_DOWNTIME_DAYS = 0 if MUST_BE_RUNNING_NOW else 7
# FallbackDirs must have a time-weighted-fraction that is greater than or
# equal to:
# Mirrors that are down half the time are still useful half the time
CUTOFF_RUNNING = .50
CUTOFF_V2DIR = .50
# Guard flags are removed for some time after a relay restarts, so we ignore
# the guard flag.
CUTOFF_GUARD = .00
# FallbackDirs must have a time-weighted-fraction that is less than or equal
# to:
# .00 means no bad exits
PERMITTED_BADEXIT = .00
# older entries' weights are adjusted with ALPHA^(age in days)
AGE_ALPHA = 0.99
# this factor is used to scale OnionOO entries to [0,1]
ONIONOO_SCALE_ONE = 999.
## Fallback Count Limits
# The target for these parameters is 20% of the guards in the network
# This is around 200 as of October 2015
_FB_POG = 0.2
FALLBACK_PROPORTION_OF_GUARDS = None if OUTPUT_CANDIDATES else _FB_POG
# Limit the number of fallbacks (eliminating lowest by advertised bandwidth)
MAX_FALLBACK_COUNT = None if OUTPUT_CANDIDATES else 200
# Emit a C #error if the number of fallbacks is less than expected
MIN_FALLBACK_COUNT = 0 if OUTPUT_CANDIDATES else MAX_FALLBACK_COUNT*0.5
# The maximum number of fallbacks on the same address, contact, or family
#
# With 150 fallbacks, this means each operator sees 5% of client bootstraps.
# For comparison:
# - We try to limit guard and exit operators to 5% of the network
# - The directory authorities used to see 11% of client bootstraps each
#
# We also don't want too much of the list to go down if a single operator
# has to move all their relays.
MAX_FALLBACKS_PER_IP = 1
MAX_FALLBACKS_PER_IPV4 = MAX_FALLBACKS_PER_IP
MAX_FALLBACKS_PER_IPV6 = MAX_FALLBACKS_PER_IP
MAX_FALLBACKS_PER_CONTACT = 7
MAX_FALLBACKS_PER_FAMILY = 7
## Fallback Bandwidth Requirements
# Any fallback with the Exit flag has its bandwidth multipled by this fraction
# to make sure we aren't further overloading exits
# (Set to 1.0, because we asked that only lightly loaded exits opt-in,
# and the extra load really isn't that much for large relays.)
EXIT_BANDWIDTH_FRACTION = 1.0
# If a single fallback's bandwidth is too low, it's pointless adding it
# We expect fallbacks to handle an extra 10 kilobytes per second of traffic
# Make sure they can support fifty times the expected extra load
#
# We convert this to a consensus weight before applying the filter,
# because all the bandwidth amounts are specified by the relay
MIN_BANDWIDTH = 50.0 * 10.0 * 1024.0
# Clients will time out after 30 seconds trying to download a consensus
# So allow fallback directories half that to deliver a consensus
# The exact download times might change based on the network connection
# running this script, but only by a few seconds
# There is also about a second of python overhead
CONSENSUS_DOWNLOAD_SPEED_MAX = 15.0
# If the relay fails a consensus check, retry the download
# This avoids delisting a relay due to transient network conditions
CONSENSUS_DOWNLOAD_RETRY = True
## Parsing Functions
def parse_ts(t):
return datetime.datetime.strptime(t, "%Y-%m-%d %H:%M:%S")
def remove_bad_chars(raw_string, bad_char_list):
# Remove each character in the bad_char_list
cleansed_string = raw_string
for c in bad_char_list:
cleansed_string = cleansed_string.replace(c, '')
return cleansed_string
def cleanse_unprintable(raw_string):
# Remove all unprintable characters
cleansed_string = ''
for c in raw_string:
if c in string.printable:
cleansed_string += c
return cleansed_string
def cleanse_whitespace(raw_string):
# Replace all whitespace characters with a space
cleansed_string = raw_string
for c in string.whitespace:
cleansed_string = cleansed_string.replace(c, ' ')
return cleansed_string
def cleanse_c_multiline_comment(raw_string):
cleansed_string = raw_string
# Embedded newlines should be removed by tor/onionoo, but let's be paranoid
cleansed_string = cleanse_whitespace(cleansed_string)
# ContactInfo and Version can be arbitrary binary data
cleansed_string = cleanse_unprintable(cleansed_string)
# Prevent a malicious / unanticipated string from breaking out
# of a C-style multiline comment
# This removes '/*' and '*/' and '//'
bad_char_list = '*/'
# Prevent a malicious string from using C nulls
bad_char_list += '\0'
# Avoid confusing parsers by making sure there is only one comma per fallback
bad_char_list += ','
# Avoid confusing parsers by making sure there is only one equals per field
bad_char_list += '='
# Be safer by removing bad characters entirely
cleansed_string = remove_bad_chars(cleansed_string, bad_char_list)
# Some compilers may further process the content of comments
# There isn't much we can do to cover every possible case
# But comment-based directives are typically only advisory
return cleansed_string
def cleanse_c_string(raw_string):
cleansed_string = raw_string
# Embedded newlines should be removed by tor/onionoo, but let's be paranoid
cleansed_string = cleanse_whitespace(cleansed_string)
# ContactInfo and Version can be arbitrary binary data
cleansed_string = cleanse_unprintable(cleansed_string)
# Prevent a malicious address/fingerprint string from breaking out
# of a C-style string
bad_char_list = '"'
# Prevent a malicious string from using escapes
bad_char_list += '\\'
# Prevent a malicious string from using C nulls
bad_char_list += '\0'
# Avoid confusing parsers by making sure there is only one comma per fallback
bad_char_list += ','
# Avoid confusing parsers by making sure there is only one equals per field
bad_char_list += '='
# Be safer by removing bad characters entirely
cleansed_string = remove_bad_chars(cleansed_string, bad_char_list)
# Some compilers may further process the content of strings
# There isn't much we can do to cover every possible case
# But this typically only results in changes to the string data
return cleansed_string
## OnionOO Source Functions
# a dictionary of source metadata for each onionoo query we've made
fetch_source = {}
# register source metadata for 'what'
# assumes we only retrieve one document for each 'what'
def register_fetch_source(what, url, relays_published, version):
fetch_source[what] = {}
fetch_source[what]['url'] = url
fetch_source[what]['relays_published'] = relays_published
fetch_source[what]['version'] = version
# list each registered source's 'what'
def fetch_source_list():
return sorted(fetch_source.keys())
# given 'what', provide a multiline C comment describing the source
def describe_fetch_source(what):
desc = '/*'
desc += '\n'
desc += 'Onionoo Source: '
desc += cleanse_c_multiline_comment(what)
desc += ' Date: '
desc += cleanse_c_multiline_comment(fetch_source[what]['relays_published'])
desc += ' Version: '
desc += cleanse_c_multiline_comment(fetch_source[what]['version'])
desc += '\n'
desc += 'URL: '
desc += cleanse_c_multiline_comment(fetch_source[what]['url'])
desc += '\n'
desc += '*/'
return desc
## File Processing Functions
def write_to_file(str, file_name, max_len):
try:
with open(file_name, 'w') as f:
f.write(str[0:max_len])
except EnvironmentError, error:
logging.error('Writing file %s failed: %d: %s'%
(file_name,
error.errno,
error.strerror)
)
def read_from_file(file_name, max_len):
try:
if os.path.isfile(file_name):
with open(file_name, 'r') as f:
return f.read(max_len)
except EnvironmentError, error:
logging.info('Loading file %s failed: %d: %s'%
(file_name,
error.errno,
error.strerror)
)
return None
def parse_fallback_file(file_name):
file_data = read_from_file(file_name, MAX_LIST_FILE_SIZE)
file_data = cleanse_unprintable(file_data)
file_data = remove_bad_chars(file_data, '\n"\0')
file_data = re.sub('/\*.*?\*/', '', file_data)
file_data = file_data.replace(',', '\n')
file_data = file_data.replace(' weight=10', '')
return file_data
def load_possibly_compressed_response_json(response):
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO.StringIO( response.read() )
f = gzip.GzipFile(fileobj=buf)
return json.load(f)
else:
return json.load(response)
def load_json_from_file(json_file_name):
# An exception here may be resolved by deleting the .last_modified
# and .json files, and re-running the script
try:
with open(json_file_name, 'r') as f:
return json.load(f)
except EnvironmentError, error:
raise Exception('Reading not-modified json file %s failed: %d: %s'%
(json_file_name,
error.errno,
error.strerror)
)
## OnionOO Functions
def datestr_to_datetime(datestr):
# Parse datetimes like: Fri, 02 Oct 2015 13:34:14 GMT
if datestr is not None:
dt = dateutil.parser.parse(datestr)
else:
# Never modified - use start of epoch
dt = datetime.datetime.utcfromtimestamp(0)
# strip any timezone out (in case they're supported in future)
dt = dt.replace(tzinfo=None)
return dt
def onionoo_fetch(what, **kwargs):
params = kwargs
params['type'] = 'relay'
#params['limit'] = 10
params['first_seen_days'] = '%d-'%(ADDRESS_AND_PORT_STABLE_DAYS)
params['last_seen_days'] = '-%d'%(MAX_DOWNTIME_DAYS)
params['flag'] = 'V2Dir'
url = ONIONOO + what + '?' + urllib.urlencode(params)
# Unfortunately, the URL is too long for some OS filenames,
# but we still don't want to get files from different URLs mixed up
base_file_name = what + '-' + hashlib.sha1(url).hexdigest()
full_url_file_name = base_file_name + '.full_url'
MAX_FULL_URL_LENGTH = 1024
last_modified_file_name = base_file_name + '.last_modified'
MAX_LAST_MODIFIED_LENGTH = 64
json_file_name = base_file_name + '.json'
if LOCAL_FILES_ONLY:
# Read from the local file, don't write to anything
response_json = load_json_from_file(json_file_name)
else:
# store the full URL to a file for debugging
# no need to compare as long as you trust SHA-1
write_to_file(url, full_url_file_name, MAX_FULL_URL_LENGTH)
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
# load the last modified date from the file, if it exists
last_mod_date = read_from_file(last_modified_file_name,
MAX_LAST_MODIFIED_LENGTH)
if last_mod_date is not None:
request.add_header('If-modified-since', last_mod_date)
# Parse last modified date
last_mod = datestr_to_datetime(last_mod_date)
# Not Modified and still recent enough to be useful
# Onionoo / Globe used to use 6 hours, but we can afford a day
required_freshness = datetime.datetime.utcnow()
# strip any timezone out (to match dateutil.parser)
required_freshness = required_freshness.replace(tzinfo=None)
required_freshness -= datetime.timedelta(hours=24)
# Make the OnionOO request
response_code = 0
try:
response = urllib2.urlopen(request)
response_code = response.getcode()
except urllib2.HTTPError, error:
response_code = error.code
if response_code == 304: # not modified
pass
else:
raise Exception("Could not get " + url + ": "
+ str(error.code) + ": " + error.reason)
if response_code == 200: # OK
last_mod = datestr_to_datetime(response.info().get('Last-Modified'))
# Check for freshness
if last_mod < required_freshness:
if last_mod_date is not None:
# This check sometimes fails transiently, retry the script if it does
date_message = "Outdated data: last updated " + last_mod_date
else:
date_message = "No data: never downloaded "
raise Exception(date_message + " from " + url)
# Process the data
if response_code == 200: # OK
response_json = load_possibly_compressed_response_json(response)
with open(json_file_name, 'w') as f:
# use the most compact json representation to save space
json.dump(response_json, f, separators=(',',':'))
# store the last modified date in its own file
if response.info().get('Last-modified') is not None:
write_to_file(response.info().get('Last-Modified'),
last_modified_file_name,
MAX_LAST_MODIFIED_LENGTH)
elif response_code == 304: # Not Modified
response_json = load_json_from_file(json_file_name)
else: # Unexpected HTTP response code not covered in the HTTPError above
raise Exception("Unexpected HTTP response code to " + url + ": "
+ str(response_code))
register_fetch_source(what,
url,
response_json['relays_published'],
response_json['version'])
return response_json
def fetch(what, **kwargs):
#x = onionoo_fetch(what, **kwargs)
# don't use sort_keys, as the order of or_addresses is significant
#print json.dumps(x, indent=4, separators=(',', ': '))
#sys.exit(0)
return onionoo_fetch(what, **kwargs)
## Fallback Candidate Class
class Candidate(object):
CUTOFF_ADDRESS_AND_PORT_STABLE = (datetime.datetime.utcnow()
- datetime.timedelta(ADDRESS_AND_PORT_STABLE_DAYS))
def __init__(self, details):
for f in ['fingerprint', 'nickname', 'last_changed_address_or_port',
'consensus_weight', 'or_addresses', 'dir_address']:
if not f in details: raise Exception("Document has no %s field."%(f,))
if not 'contact' in details:
details['contact'] = None
if not 'flags' in details or details['flags'] is None:
details['flags'] = []
if (not 'advertised_bandwidth' in details
or details['advertised_bandwidth'] is None):
# relays without advertised bandwdith have it calculated from their
# consensus weight
details['advertised_bandwidth'] = 0
if (not 'effective_family' in details
or details['effective_family'] is None):
details['effective_family'] = []
if not 'platform' in details:
details['platform'] = None
details['last_changed_address_or_port'] = parse_ts(
details['last_changed_address_or_port'])
self._data = details
self._stable_sort_or_addresses()
self._fpr = self._data['fingerprint']
self._running = self._guard = self._v2dir = 0.
self._split_dirport()
self._compute_orport()
if self.orport is None:
raise Exception("Failed to get an orport for %s."%(self._fpr,))
self._compute_ipv6addr()
if not self.has_ipv6():
logging.debug("Failed to get an ipv6 address for %s."%(self._fpr,))
self._compute_version()
self._extra_info_cache = None
def _stable_sort_or_addresses(self):
# replace self._data['or_addresses'] with a stable ordering,
# sorting the secondary addresses in string order
# leave the received order in self._data['or_addresses_raw']
self._data['or_addresses_raw'] = self._data['or_addresses']
or_address_primary = self._data['or_addresses'][:1]
# subsequent entries in the or_addresses array are in an arbitrary order
# so we stabilise the addresses by sorting them in string order
or_addresses_secondaries_stable = sorted(self._data['or_addresses'][1:])
or_addresses_stable = or_address_primary + or_addresses_secondaries_stable
self._data['or_addresses'] = or_addresses_stable
def get_fingerprint(self):
return self._fpr
# is_valid_ipv[46]_address by gsathya, karsten, 2013
@staticmethod
def is_valid_ipv4_address(address):
if not isinstance(address, (str, unicode)):
return False
# check if there are four period separated values
if address.count(".") != 3:
return False
# checks that each value in the octet are decimal values between 0-255
for entry in address.split("."):
if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:
return False
elif entry[0] == "0" and len(entry) > 1:
return False # leading zeros, for instance in "1.2.3.001"
return True
@staticmethod
def is_valid_ipv6_address(address):
if not isinstance(address, (str, unicode)):
return False
# remove brackets
address = address[1:-1]
# addresses are made up of eight colon separated groups of four hex digits
# with leading zeros being optional
# https://en.wikipedia.org/wiki/IPv6#Address_format
colon_count = address.count(":")
if colon_count > 7:
return False # too many groups
elif colon_count != 7 and not "::" in address:
return False # not enough groups and none are collapsed
elif address.count("::") > 1 or ":::" in address:
return False # multiple groupings of zeros can't be collapsed
found_ipv4_on_previous_entry = False
for entry in address.split(":"):
# If an IPv6 address has an embedded IPv4 address,
# it must be the last entry
if found_ipv4_on_previous_entry:
return False
if not re.match("^[0-9a-fA-f]{0,4}$", entry):
if not Candidate.is_valid_ipv4_address(entry):
return False
else:
found_ipv4_on_previous_entry = True
return True
def _split_dirport(self):
# Split the dir_address into dirip and dirport
(self.dirip, _dirport) = self._data['dir_address'].split(':', 2)
self.dirport = int(_dirport)
def _compute_orport(self):
# Choose the first ORPort that's on the same IPv4 address as the DirPort.
# In rare circumstances, this might not be the primary ORPort address.
# However, _stable_sort_or_addresses() ensures we choose the same one
# every time, even if onionoo changes the order of the secondaries.
self._split_dirport()
self.orport = None
for i in self._data['or_addresses']:
if i != self._data['or_addresses'][0]:
logging.debug('Secondary IPv4 Address Used for %s: %s'%(self._fpr, i))
(ipaddr, port) = i.rsplit(':', 1)
if (ipaddr == self.dirip) and Candidate.is_valid_ipv4_address(ipaddr):
self.orport = int(port)
return
def _compute_ipv6addr(self):
# Choose the first IPv6 address that uses the same port as the ORPort
# Or, choose the first IPv6 address in the list
# _stable_sort_or_addresses() ensures we choose the same IPv6 address
# every time, even if onionoo changes the order of the secondaries.
self.ipv6addr = None
self.ipv6orport = None
# Choose the first IPv6 address that uses the same port as the ORPort
for i in self._data['or_addresses']:
(ipaddr, port) = i.rsplit(':', 1)
if (port == self.orport) and Candidate.is_valid_ipv6_address(ipaddr):
self.ipv6addr = ipaddr
self.ipv6orport = int(port)
return
# Choose the first IPv6 address in the list
for i in self._data['or_addresses']:
(ipaddr, port) = i.rsplit(':', 1)
if Candidate.is_valid_ipv6_address(ipaddr):
self.ipv6addr = ipaddr
self.ipv6orport = int(port)
return
def _compute_version(self):
# parse the version out of the platform string
# The platform looks like: "Tor 0.2.7.6 on Linux"
self._data['version'] = None
if self._data['platform'] is None:
return
# be tolerant of weird whitespacing, use a whitespace split
tokens = self._data['platform'].split()
for token in tokens:
vnums = token.split('.')
# if it's at least a.b.c.d, with potentially an -alpha-dev, -alpha, -rc
if (len(vnums) >= 4 and vnums[0].isdigit() and vnums[1].isdigit() and
vnums[2].isdigit()):
self._data['version'] = token
return
# From #20509
# bug #20499 affects versions from 0.2.9.1-alpha-dev to 0.2.9.4-alpha-dev
# and version 0.3.0.0-alpha-dev
# Exhaustive lists are hard to get wrong
STALE_CONSENSUS_VERSIONS = ['0.2.9.1-alpha-dev',
'0.2.9.2-alpha',
'0.2.9.2-alpha-dev',
'0.2.9.3-alpha',
'0.2.9.3-alpha-dev',
'0.2.9.4-alpha',
'0.2.9.4-alpha-dev',
'0.3.0.0-alpha-dev'
]
def is_valid_version(self):
# call _compute_version before calling this
# is the version of the relay a version we want as a fallback?
# checks both recommended versions and bug #20499 / #20509
#
# if the relay doesn't have a recommended version field, exclude the relay
if not self._data.has_key('recommended_version'):
log_excluded('%s not a candidate: no recommended_version field',
self._fpr)
return False
if not self._data['recommended_version']:
log_excluded('%s not a candidate: version not recommended', self._fpr)
return False
# if the relay doesn't have version field, exclude the relay
if not self._data.has_key('version'):
log_excluded('%s not a candidate: no version field', self._fpr)
return False
if self._data['version'] in Candidate.STALE_CONSENSUS_VERSIONS:
logging.warning('%s not a candidate: version delivers stale consensuses',
self._fpr)
return False
return True
@staticmethod
def _extract_generic_history(history, which='unknown'):
# given a tree like this:
# {
# "1_month": {
# "count": 187,
# "factor": 0.001001001001001001,
# "first": "2015-02-27 06:00:00",
# "interval": 14400,
# "last": "2015-03-30 06:00:00",
# "values": [
# 999,
# 999
# ]
# },
# "1_week": {
# "count": 169,
# "factor": 0.001001001001001001,
# "first": "2015-03-23 07:30:00",
# "interval": 3600,
# "last": "2015-03-30 07:30:00",
# "values": [ ...]
# },
# "1_year": {
# "count": 177,
# "factor": 0.001001001001001001,
# "first": "2014-04-11 00:00:00",
# "interval": 172800,
# "last": "2015-03-29 00:00:00",
# "values": [ ...]
# },
# "3_months": {
# "count": 185,
# "factor": 0.001001001001001001,
# "first": "2014-12-28 06:00:00",
# "interval": 43200,
# "last": "2015-03-30 06:00:00",
# "values": [ ...]
# }
# },
# extract exactly one piece of data per time interval,
# using smaller intervals where available.
#
# returns list of (age, length, value) dictionaries.
generic_history = []
periods = history.keys()
periods.sort(key = lambda x: history[x]['interval'])
now = datetime.datetime.utcnow()
newest = now
for p in periods:
h = history[p]
interval = datetime.timedelta(seconds = h['interval'])
this_ts = parse_ts(h['last'])
if (len(h['values']) != h['count']):
logging.warning('Inconsistent value count in %s document for %s'
%(p, which))
for v in reversed(h['values']):
if (this_ts <= newest):
agt1 = now - this_ts
agt2 = interval
agetmp1 = (agt1.microseconds + (agt1.seconds + agt1.days * 24 * 3600)
* 10**6) / 10**6
agetmp2 = (agt2.microseconds + (agt2.seconds + agt2.days * 24 * 3600)
* 10**6) / 10**6
generic_history.append(
{ 'age': agetmp1,
'length': agetmp2,
'value': v
})
newest = this_ts
this_ts -= interval
if (this_ts + interval != parse_ts(h['first'])):
logging.warning('Inconsistent time information in %s document for %s'
%(p, which))
#print json.dumps(generic_history, sort_keys=True,
# indent=4, separators=(',', ': '))
return generic_history
@staticmethod
def _avg_generic_history(generic_history):
a = []
for i in generic_history:
if i['age'] > (ADDRESS_AND_PORT_STABLE_DAYS * 24 * 3600):
continue
if (i['length'] is not None
and i['age'] is not None
and i['value'] is not None):
w = i['length'] * math.pow(AGE_ALPHA, i['age']/(3600*24))
a.append( (i['value'] * w, w) )
sv = math.fsum(map(lambda x: x[0], a))
sw = math.fsum(map(lambda x: x[1], a))
if sw == 0.0:
svw = 0.0
else:
svw = sv/sw
return svw
def _add_generic_history(self, history):
periods = r['read_history'].keys()
periods.sort(key = lambda x: r['read_history'][x]['interval'] )
print periods
def add_running_history(self, history):
pass
def add_uptime(self, uptime):
logging.debug('Adding uptime %s.'%(self._fpr,))
# flags we care about: Running, V2Dir, Guard
if not 'flags' in uptime:
logging.debug('No flags in document for %s.'%(self._fpr,))
return
for f in ['Running', 'Guard', 'V2Dir']:
if not f in uptime['flags']:
logging.debug('No %s in flags for %s.'%(f, self._fpr,))
return
running = self._extract_generic_history(uptime['flags']['Running'],
'%s-Running'%(self._fpr))
guard = self._extract_generic_history(uptime['flags']['Guard'],
'%s-Guard'%(self._fpr))
v2dir = self._extract_generic_history(uptime['flags']['V2Dir'],
'%s-V2Dir'%(self._fpr))
if 'BadExit' in uptime['flags']:
badexit = self._extract_generic_history(uptime['flags']['BadExit'],
'%s-BadExit'%(self._fpr))
self._running = self._avg_generic_history(running) / ONIONOO_SCALE_ONE
self._guard = self._avg_generic_history(guard) / ONIONOO_SCALE_ONE
self._v2dir = self._avg_generic_history(v2dir) / ONIONOO_SCALE_ONE
self._badexit = None
if 'BadExit' in uptime['flags']:
self._badexit = self._avg_generic_history(badexit) / ONIONOO_SCALE_ONE
def is_candidate(self):
try:
if (MUST_BE_RUNNING_NOW and not self.is_running()):
log_excluded('%s not a candidate: not running now, unable to check ' +
'DirPort consensus download', self._fpr)
return False
if (self._data['last_changed_address_or_port'] >
self.CUTOFF_ADDRESS_AND_PORT_STABLE):
log_excluded('%s not a candidate: changed address/port recently (%s)',
self._fpr, self._data['last_changed_address_or_port'])
return False
if self._running < CUTOFF_RUNNING:
log_excluded('%s not a candidate: running avg too low (%lf)',
self._fpr, self._running)
return False
if self._v2dir < CUTOFF_V2DIR:
log_excluded('%s not a candidate: v2dir avg too low (%lf)',
self._fpr, self._v2dir)
return False
if self._badexit is not None and self._badexit > PERMITTED_BADEXIT:
log_excluded('%s not a candidate: badexit avg too high (%lf)',
self._fpr, self._badexit)
return False
# this function logs a message depending on which check fails
if not self.is_valid_version():
return False
if self._guard < CUTOFF_GUARD:
log_excluded('%s not a candidate: guard avg too low (%lf)',
self._fpr, self._guard)
return False
if (not self._data.has_key('consensus_weight')
or self._data['consensus_weight'] < 1):
log_excluded('%s not a candidate: consensus weight invalid', self._fpr)
return False
except BaseException as e:
logging.warning("Exception %s when checking if fallback is a candidate",
str(e))
return False
return True
def is_in_whitelist(self, relaylist):
""" A fallback matches if each key in the whitelist line matches:
ipv4
dirport
orport
id
ipv6 address and port (if present)
If the fallback has an ipv6 key, the whitelist line must also have
it, and vice versa, otherwise they don't match. """
ipv6 = None
if self.has_ipv6():
ipv6 = '%s:%d'%(self.ipv6addr, self.ipv6orport)
for entry in relaylist:
if entry['id'] != self._fpr:
# can't log here unless we match an IP and port, because every relay's
# fingerprint is compared to every entry's fingerprint
if entry['ipv4'] == self.dirip and int(entry['orport']) == self.orport:
logging.warning('%s excluded: has OR %s:%d changed fingerprint to ' +
'%s?', entry['id'], self.dirip, self.orport,
self._fpr)
if self.has_ipv6() and entry.has_key('ipv6') and entry['ipv6'] == ipv6:
logging.warning('%s excluded: has OR %s changed fingerprint to ' +
'%s?', entry['id'], ipv6, self._fpr)
continue
if entry['ipv4'] != self.dirip:
logging.warning('%s excluded: has it changed IPv4 from %s to %s?',
self._fpr, entry['ipv4'], self.dirip)
continue
if int(entry['dirport']) != self.dirport:
logging.warning('%s excluded: has it changed DirPort from %s:%d to ' +
'%s:%d?', self._fpr, self.dirip, int(entry['dirport']),
self.dirip, self.dirport)
continue
if int(entry['orport']) != self.orport:
logging.warning('%s excluded: has it changed ORPort from %s:%d to ' +
'%s:%d?', self._fpr, self.dirip, int(entry['orport']),
self.dirip, self.orport)
continue
if entry.has_key('ipv6') and self.has_ipv6():
# if both entry and fallback have an ipv6 address, compare them
if entry['ipv6'] != ipv6:
logging.warning('%s excluded: has it changed IPv6 ORPort from %s ' +
'to %s?', self._fpr, entry['ipv6'], ipv6)
continue
# if the fallback has an IPv6 address but the whitelist entry
# doesn't, or vice versa, the whitelist entry doesn't match
elif entry.has_key('ipv6') and not self.has_ipv6():
logging.warning('%s excluded: has it lost its former IPv6 address %s?',
self._fpr, entry['ipv6'])
continue
elif not entry.has_key('ipv6') and self.has_ipv6():
logging.warning('%s excluded: has it gained an IPv6 address %s?',
self._fpr, ipv6)
continue
return True
return False
def is_in_blacklist(self, relaylist):
""" A fallback matches a blacklist line if a sufficiently specific group
of attributes matches:
ipv4 & dirport
ipv4 & orport
id
ipv6 & dirport
ipv6 & ipv6 orport
If the fallback and the blacklist line both have an ipv6 key,
their values will be compared, otherwise, they will be ignored.
If there is no dirport and no orport, the entry matches all relays on
that ip. """
for entry in relaylist:
for key in entry:
value = entry[key]
if key == 'id' and value == self._fpr:
log_excluded('%s is in the blacklist: fingerprint matches',
self._fpr)
return True
if key == 'ipv4' and value == self.dirip:
# if the dirport is present, check it too
if entry.has_key('dirport'):
if int(entry['dirport']) == self.dirport:
log_excluded('%s is in the blacklist: IPv4 (%s) and ' +
'DirPort (%d) match', self._fpr, self.dirip,
self.dirport)
return True
# if the orport is present, check it too
elif entry.has_key('orport'):
if int(entry['orport']) == self.orport:
log_excluded('%s is in the blacklist: IPv4 (%s) and ' +
'ORPort (%d) match', self._fpr, self.dirip,
self.orport)
return True
else:
log_excluded('%s is in the blacklist: IPv4 (%s) matches, and ' +
'entry has no DirPort or ORPort', self._fpr,
self.dirip)
return True
ipv6 = None
if self.has_ipv6():
ipv6 = '%s:%d'%(self.ipv6addr, self.ipv6orport)
if (key == 'ipv6' and self.has_ipv6()):
# if both entry and fallback have an ipv6 address, compare them,
# otherwise, disregard ipv6 addresses
if value == ipv6:
# if the dirport is present, check it too
if entry.has_key('dirport'):
if int(entry['dirport']) == self.dirport:
log_excluded('%s is in the blacklist: IPv6 (%s) and ' +
'DirPort (%d) match', self._fpr, ipv6,
self.dirport)
return True
# we've already checked the ORPort, it's part of entry['ipv6']
else:
log_excluded('%s is in the blacklist: IPv6 (%s) matches, and' +
'entry has no DirPort', self._fpr, ipv6)
return True
elif (key == 'ipv6' or self.has_ipv6()):
# only log if the fingerprint matches but the IPv6 doesn't
if entry.has_key('id') and entry['id'] == self._fpr:
log_excluded('%s skipping IPv6 blacklist comparison: relay ' +
'has%s IPv6%s, but entry has%s IPv6%s', self._fpr,
'' if self.has_ipv6() else ' no',
(' (' + ipv6 + ')') if self.has_ipv6() else '',
'' if key == 'ipv6' else ' no',
(' (' + value + ')') if key == 'ipv6' else '')
logging.warning('Has %s %s IPv6 address %s?', self._fpr,
'gained an' if self.has_ipv6() else 'lost its former',
ipv6 if self.has_ipv6() else value)
return False
def cw_to_bw_factor(self):
# any relays with a missing or zero consensus weight are not candidates
# any relays with a missing advertised bandwidth have it set to zero
return self._data['advertised_bandwidth'] / self._data['consensus_weight']
# since advertised_bandwidth is reported by the relay, it can be gamed
# to avoid this, use the median consensus weight to bandwidth factor to
# estimate this relay's measured bandwidth, and make that the upper limit
def measured_bandwidth(self, median_cw_to_bw_factor):
cw_to_bw= median_cw_to_bw_factor
# Reduce exit bandwidth to make sure we're not overloading them
if self.is_exit():
cw_to_bw *= EXIT_BANDWIDTH_FRACTION
measured_bandwidth = self._data['consensus_weight'] * cw_to_bw
if self._data['advertised_bandwidth'] != 0:
# limit advertised bandwidth (if available) to measured bandwidth
return min(measured_bandwidth, self._data['advertised_bandwidth'])
else:
return measured_bandwidth
def set_measured_bandwidth(self, median_cw_to_bw_factor):
self._data['measured_bandwidth'] = self.measured_bandwidth(
median_cw_to_bw_factor)
def is_exit(self):
return 'Exit' in self._data['flags']
def is_guard(self):
return 'Guard' in self._data['flags']
def is_running(self):
return 'Running' in self._data['flags']
# does this fallback have an IPv6 address and orport?
def has_ipv6(self):
return self.ipv6addr is not None and self.ipv6orport is not None
# strip leading and trailing brackets from an IPv6 address
# safe to use on non-bracketed IPv6 and on IPv4 addresses
# also convert to unicode, and make None appear as ''
@staticmethod
def strip_ipv6_brackets(ip):
if ip is None:
return unicode('')
if len(ip) < 2:
return unicode(ip)
if ip[0] == '[' and ip[-1] == ']':
return unicode(ip[1:-1])
return unicode(ip)
# are ip_a and ip_b in the same netblock?
# mask_bits is the size of the netblock
# takes both IPv4 and IPv6 addresses
# the versions of ip_a and ip_b must be the same
# the mask must be valid for the IP version
@staticmethod
def netblocks_equal(ip_a, ip_b, mask_bits):
if ip_a is None or ip_b is None:
return False
ip_a = Candidate.strip_ipv6_brackets(ip_a)
ip_b = Candidate.strip_ipv6_brackets(ip_b)
a = ipaddress.ip_address(ip_a)
b = ipaddress.ip_address(ip_b)
if a.version != b.version:
raise Exception('Mismatching IP versions in %s and %s'%(ip_a, ip_b))
if mask_bits > a.max_prefixlen:
logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b))
mask_bits = a.max_prefixlen
if mask_bits < 0:
logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b))
mask_bits = 0
a_net = ipaddress.ip_network('%s/%d'%(ip_a, mask_bits), strict=False)
return b in a_net
# is this fallback's IPv4 address (dirip) in the same netblock as other's
# IPv4 address?
# mask_bits is the size of the netblock
def ipv4_netblocks_equal(self, other, mask_bits):
return Candidate.netblocks_equal(self.dirip, other.dirip, mask_bits)
# is this fallback's IPv6 address (ipv6addr) in the same netblock as
# other's IPv6 address?
# Returns False if either fallback has no IPv6 address
# mask_bits is the size of the netblock
def ipv6_netblocks_equal(self, other, mask_bits):
if not self.has_ipv6() or not other.has_ipv6():
return False
return Candidate.netblocks_equal(self.ipv6addr, other.ipv6addr, mask_bits)
# is this fallback's IPv4 DirPort the same as other's IPv4 DirPort?
def dirport_equal(self, other):
return self.dirport == other.dirport
# is this fallback's IPv4 ORPort the same as other's IPv4 ORPort?
def ipv4_orport_equal(self, other):
return self.orport == other.orport
# is this fallback's IPv6 ORPort the same as other's IPv6 ORPort?
# Returns False if either fallback has no IPv6 address
def ipv6_orport_equal(self, other):
if not self.has_ipv6() or not other.has_ipv6():
return False
return self.ipv6orport == other.ipv6orport
# does this fallback have the same DirPort, IPv4 ORPort, or
# IPv6 ORPort as other?
# Ignores IPv6 ORPort if either fallback has no IPv6 address
def port_equal(self, other):
return (self.dirport_equal(other) or self.ipv4_orport_equal(other)
or self.ipv6_orport_equal(other))
# return a list containing IPv4 ORPort, DirPort, and IPv6 ORPort (if present)
def port_list(self):
ports = [self.dirport, self.orport]
if self.has_ipv6() and not self.ipv6orport in ports:
ports.append(self.ipv6orport)
return ports
# does this fallback share a port with other, regardless of whether the
# port types match?
# For example, if self's IPv4 ORPort is 80 and other's DirPort is 80,
# return True
def port_shared(self, other):
for p in self.port_list():
if p in other.port_list():
return True
return False
# log how long it takes to download a consensus from dirip:dirport
# returns True if the download failed, False if it succeeded within max_time
@staticmethod
def fallback_consensus_download_speed(dirip, dirport, nickname, fingerprint,
max_time):
download_failed = False
# some directory mirrors respond to requests in ways that hang python
# sockets, which is why we log this line here
logging.info('Initiating %sconsensus download from %s (%s:%d) %s.',
'microdesc ' if DOWNLOAD_MICRODESC_CONSENSUS else '',
nickname, dirip, dirport, fingerprint)
# there appears to be about 1 second of overhead when comparing stem's
# internal trace time and the elapsed time calculated here
TIMEOUT_SLOP = 1.0
start = datetime.datetime.utcnow()
try:
consensus = get_consensus(
endpoints = [(dirip, dirport)],
timeout = (max_time + TIMEOUT_SLOP),
validate = True,
retries = 0,
fall_back_to_authority = False,
document_handler = DocumentHandler.BARE_DOCUMENT,
microdescriptor = DOWNLOAD_MICRODESC_CONSENSUS
).run()[0]
end = datetime.datetime.utcnow()
time_since_expiry = (end - consensus.valid_until).total_seconds()
except Exception, stem_error:
end = datetime.datetime.utcnow()
log_excluded('Unable to retrieve a consensus from %s: %s', nickname,
stem_error)
status = 'error: "%s"' % (stem_error)
level = logging.WARNING
download_failed = True
elapsed = (end - start).total_seconds()
if download_failed:
# keep the error failure status, and avoid using the variables
pass
elif elapsed > max_time:
status = 'too slow'
level = logging.WARNING
download_failed = True
elif (time_since_expiry > 0):
status = 'outdated consensus, expired %ds ago'%(int(time_since_expiry))
if time_since_expiry <= CONSENSUS_EXPIRY_TOLERANCE:
status += ', tolerating up to %ds'%(CONSENSUS_EXPIRY_TOLERANCE)
level = logging.INFO
else:
status += ', invalid'
level = logging.WARNING
download_failed = True
else:
status = 'ok'
level = logging.DEBUG
logging.log(level, 'Consensus download: %0.1fs %s from %s (%s:%d) %s, ' +
'max download time %0.1fs.', elapsed, status, nickname,
dirip, dirport, fingerprint, max_time)
return download_failed
# does this fallback download the consensus fast enough?
def check_fallback_download_consensus(self):
# include the relay if we're not doing a check, or we can't check (IPv6)
ipv4_failed = False
ipv6_failed = False
if PERFORM_IPV4_DIRPORT_CHECKS:
ipv4_failed = Candidate.fallback_consensus_download_speed(self.dirip,
self.dirport,
self._data['nickname'],
self._fpr,
CONSENSUS_DOWNLOAD_SPEED_MAX)
if self.has_ipv6() and PERFORM_IPV6_DIRPORT_CHECKS:
# Clients assume the IPv6 DirPort is the same as the IPv4 DirPort
ipv6_failed = Candidate.fallback_consensus_download_speed(self.ipv6addr,
self.dirport,
self._data['nickname'],
self._fpr,
CONSENSUS_DOWNLOAD_SPEED_MAX)
return ((not ipv4_failed) and (not ipv6_failed))
# if this fallback has not passed a download check, try it again,
# and record the result, available in get_fallback_download_consensus
def try_fallback_download_consensus(self):
if not self.get_fallback_download_consensus():
self._data['download_check'] = self.check_fallback_download_consensus()
# did this fallback pass the download check?
def get_fallback_download_consensus(self):
# if we're not performing checks, return True
if not PERFORM_IPV4_DIRPORT_CHECKS and not PERFORM_IPV6_DIRPORT_CHECKS:
return True
# if we are performing checks, but haven't done one, return False
if not self._data.has_key('download_check'):
return False
return self._data['download_check']
# output an optional header comment and info for this fallback
# try_fallback_download_consensus before calling this
def fallbackdir_line(self, fallbacks, prefilter_fallbacks):
s = ''
if OUTPUT_COMMENTS:
s += self.fallbackdir_comment(fallbacks, prefilter_fallbacks)
# if the download speed is ok, output a C string
# if it's not, but we OUTPUT_COMMENTS, output a commented-out C string
if self.get_fallback_download_consensus() or OUTPUT_COMMENTS:
s += self.fallbackdir_info(self.get_fallback_download_consensus())
return s
# output a header comment for this fallback
def fallbackdir_comment(self, fallbacks, prefilter_fallbacks):
# /*
# nickname
# flags
# adjusted bandwidth, consensus weight
# [contact]
# [identical contact counts]
# */
# Multiline C comment
s = '/*'
s += '\n'
s += cleanse_c_multiline_comment(self._data['nickname'])
s += '\n'
s += 'Flags: '
s += cleanse_c_multiline_comment(' '.join(sorted(self._data['flags'])))
s += '\n'
# this is an adjusted bandwidth, see calculate_measured_bandwidth()
bandwidth = self._data['measured_bandwidth']
weight = self._data['consensus_weight']
s += 'Bandwidth: %.1f MByte/s, Consensus Weight: %d'%(
bandwidth/(1024.0*1024.0),
weight)
s += '\n'
if self._data['contact'] is not None:
s += cleanse_c_multiline_comment(self._data['contact'])
if CONTACT_COUNT or CONTACT_BLACKLIST_COUNT:
fallback_count = len([f for f in fallbacks
if f._data['contact'] == self._data['contact']])
if fallback_count > 1:
s += '\n'
s += '%d identical contacts listed' % (fallback_count)
if CONTACT_BLACKLIST_COUNT:
prefilter_count = len([f for f in prefilter_fallbacks
if f._data['contact'] == self._data['contact']])
filter_count = prefilter_count - fallback_count
if filter_count > 0:
if fallback_count > 1:
s += ' '
else:
s += '\n'
s += '%d blacklisted' % (filter_count)
s += '\n'
s += '*/'
s += '\n'
return s
# output the fallback info C string for this fallback
# this is the text that would go after FallbackDir in a torrc
# if this relay failed the download test and we OUTPUT_COMMENTS,
# comment-out the returned string
def fallbackdir_info(self, dl_speed_ok):
# "address:dirport orport=port id=fingerprint"
# (insert additional madatory fields here)
# "[ipv6=addr:orport]"
# (insert additional optional fields here)
# /* nickname=name */
# /* extrainfo={0,1} */
# (insert additional comment fields here)
# /* ===== */
# ,
#
# Do we want a C string, or a commented-out string?
c_string = dl_speed_ok
comment_string = not dl_speed_ok and OUTPUT_COMMENTS
# If we don't want either kind of string, bail
if not c_string and not comment_string:
return ''
s = ''
# Comment out the fallback directory entry if it's too slow
# See the debug output for which address and port is failing
if comment_string:
s += '/* Consensus download failed or was too slow:\n'
# Multi-Line C string with trailing comma (part of a string list)
# This makes it easier to diff the file, and remove IPv6 lines using grep
# Integers don't need escaping
s += '"%s orport=%d id=%s"'%(
cleanse_c_string(self._data['dir_address']),
self.orport,
cleanse_c_string(self._fpr))
s += '\n'
# (insert additional madatory fields here)
if self.has_ipv6():
s += '" ipv6=%s:%d"'%(cleanse_c_string(self.ipv6addr), self.ipv6orport)
s += '\n'
# (insert additional optional fields here)
if not comment_string:
s += '/* '
s += 'nickname=%s'%(cleanse_c_string(self._data['nickname']))
if not comment_string:
s += ' */'
s += '\n'
# if we know that the fallback is an extrainfo cache, flag it
# and if we don't know, assume it is not
if not comment_string:
s += '/* '
s += 'extrainfo=%d'%(1 if self._extra_info_cache else 0)
if not comment_string:
s += ' */'
s += '\n'
# (insert additional comment fields here)
# The terminator and comma must be the last line in each fallback entry
if not comment_string:
s += '/* '
s += SECTION_SEPARATOR_BASE
if not comment_string:
s += ' */'
s += '\n'
s += ','
if comment_string:
s += '\n'
s += '*/'
return s
## Fallback Candidate List Class
class CandidateList(dict):
def __init__(self):
pass
def _add_relay(self, details):
if not 'dir_address' in details: return
c = Candidate(details)
self[ c.get_fingerprint() ] = c
def _add_uptime(self, uptime):
try:
fpr = uptime['fingerprint']
except KeyError:
raise Exception("Document has no fingerprint field.")
try:
c = self[fpr]
except KeyError:
logging.debug('Got unknown relay %s in uptime document.'%(fpr,))
return
c.add_uptime(uptime)
def _add_details(self):
logging.debug('Loading details document.')
d = fetch('details',
fields=('fingerprint,nickname,contact,last_changed_address_or_port,' +
'consensus_weight,advertised_bandwidth,or_addresses,' +
'dir_address,recommended_version,flags,effective_family,' +
'platform'))
logging.debug('Loading details document done.')
if not 'relays' in d: raise Exception("No relays found in document.")
for r in d['relays']: self._add_relay(r)
def _add_uptimes(self):
logging.debug('Loading uptime document.')
d = fetch('uptime')
logging.debug('Loading uptime document done.')
if not 'relays' in d: raise Exception("No relays found in document.")
for r in d['relays']: self._add_uptime(r)
def add_relays(self):
self._add_details()
self._add_uptimes()
def count_guards(self):
guard_count = 0
for fpr in self.keys():
if self[fpr].is_guard():
guard_count += 1
return guard_count
# Find fallbacks that fit the uptime, stability, and flags criteria,
# and make an array of them in self.fallbacks
def compute_fallbacks(self):
self.fallbacks = map(lambda x: self[x],
filter(lambda x: self[x].is_candidate(),
self.keys()))
# sort fallbacks by their consensus weight to advertised bandwidth factor,
# lowest to highest
# used to find the median cw_to_bw_factor()
def sort_fallbacks_by_cw_to_bw_factor(self):
self.fallbacks.sort(key=lambda f: f.cw_to_bw_factor())
# sort fallbacks by their measured bandwidth, highest to lowest
# calculate_measured_bandwidth before calling this
# this is useful for reviewing candidates in priority order
def sort_fallbacks_by_measured_bandwidth(self):
self.fallbacks.sort(key=lambda f: f._data['measured_bandwidth'],
reverse=True)
# sort fallbacks by the data field data_field, lowest to highest
def sort_fallbacks_by(self, data_field):
self.fallbacks.sort(key=lambda f: f._data[data_field])
@staticmethod
def load_relaylist(file_obj):
""" Read each line in the file, and parse it like a FallbackDir line:
an IPv4 address and optional port:
<IPv4 address>:<port>
which are parsed into dictionary entries:
ipv4=<IPv4 address>
dirport=<port>
followed by a series of key=value entries:
orport=<port>
id=<fingerprint>
ipv6=<IPv6 address>:<IPv6 orport>
each line's key/value pairs are placed in a dictonary,
(of string -> string key/value pairs),
and these dictionaries are placed in an array.
comments start with # and are ignored """
file_data = file_obj['data']
file_name = file_obj['name']
relaylist = []
if file_data is None:
return relaylist
for line in file_data.split('\n'):
relay_entry = {}
# ignore comments
line_comment_split = line.split('#')
line = line_comment_split[0]
# cleanup whitespace
line = cleanse_whitespace(line)
line = line.strip()
if len(line) == 0:
continue
for item in line.split(' '):
item = item.strip()
if len(item) == 0:
continue
key_value_split = item.split('=')
kvl = len(key_value_split)
if kvl < 1 or kvl > 2:
print '#error Bad %s item: %s, format is key=value.'%(
file_name, item)
if kvl == 1:
# assume that entries without a key are the ipv4 address,
# perhaps with a dirport
ipv4_maybe_dirport = key_value_split[0]
ipv4_maybe_dirport_split = ipv4_maybe_dirport.split(':')
dirl = len(ipv4_maybe_dirport_split)
if dirl < 1 or dirl > 2:
print '#error Bad %s IPv4 item: %s, format is ipv4:port.'%(
file_name, item)
if dirl >= 1:
relay_entry['ipv4'] = ipv4_maybe_dirport_split[0]
if dirl == 2:
relay_entry['dirport'] = ipv4_maybe_dirport_split[1]
elif kvl == 2:
relay_entry[key_value_split[0]] = key_value_split[1]
relaylist.append(relay_entry)
return relaylist
# apply the fallback whitelist and blacklist
def apply_filter_lists(self, whitelist_obj, blacklist_obj):
excluded_count = 0
logging.debug('Applying whitelist and blacklist.')
# parse the whitelist and blacklist
whitelist = self.load_relaylist(whitelist_obj)
blacklist = self.load_relaylist(blacklist_obj)
filtered_fallbacks = []
for f in self.fallbacks:
in_whitelist = f.is_in_whitelist(whitelist)
in_blacklist = f.is_in_blacklist(blacklist)
if in_whitelist and in_blacklist:
if BLACKLIST_EXCLUDES_WHITELIST_ENTRIES:
# exclude
excluded_count += 1
logging.warning('Excluding %s: in both blacklist and whitelist.',
f._fpr)
else:
# include
filtered_fallbacks.append(f)
elif in_whitelist:
# include
filtered_fallbacks.append(f)
elif in_blacklist:
# exclude
excluded_count += 1
log_excluded('Excluding %s: in blacklist.', f._fpr)
else:
if INCLUDE_UNLISTED_ENTRIES:
# include
filtered_fallbacks.append(f)
else:
# exclude
excluded_count += 1
log_excluded('Excluding %s: in neither blacklist nor whitelist.',
f._fpr)
self.fallbacks = filtered_fallbacks
return excluded_count
@staticmethod
def summarise_filters(initial_count, excluded_count):
return '/* Whitelist & blacklist excluded %d of %d candidates. */'%(
excluded_count, initial_count)
# calculate each fallback's measured bandwidth based on the median
# consensus weight to advertised bandwdith ratio
def calculate_measured_bandwidth(self):
self.sort_fallbacks_by_cw_to_bw_factor()
median_fallback = self.fallback_median(True)
if median_fallback is not None:
median_cw_to_bw_factor = median_fallback.cw_to_bw_factor()
else:
# this will never be used, because there are no fallbacks
median_cw_to_bw_factor = None
for f in self.fallbacks:
f.set_measured_bandwidth(median_cw_to_bw_factor)
# remove relays with low measured bandwidth from the fallback list
# calculate_measured_bandwidth for each relay before calling this
def remove_low_bandwidth_relays(self):
if MIN_BANDWIDTH is None:
return
above_min_bw_fallbacks = []
for f in self.fallbacks:
if f._data['measured_bandwidth'] >= MIN_BANDWIDTH:
above_min_bw_fallbacks.append(f)
else:
# the bandwidth we log here is limited by the relay's consensus weight
# as well as its adverttised bandwidth. See set_measured_bandwidth
# for details
log_excluded('%s not a candidate: bandwidth %.1fMByte/s too low, ' +
'must be at least %.1fMByte/s', f._fpr,
f._data['measured_bandwidth']/(1024.0*1024.0),
MIN_BANDWIDTH/(1024.0*1024.0))
self.fallbacks = above_min_bw_fallbacks
# the minimum fallback in the list
# call one of the sort_fallbacks_* functions before calling this
def fallback_min(self):
if len(self.fallbacks) > 0:
return self.fallbacks[-1]
else:
return None
# the median fallback in the list
# call one of the sort_fallbacks_* functions before calling this
def fallback_median(self, require_advertised_bandwidth):
# use the low-median when there are an evan number of fallbacks,
# for consistency with the bandwidth authorities
if len(self.fallbacks) > 0:
median_position = (len(self.fallbacks) - 1) / 2
if not require_advertised_bandwidth:
return self.fallbacks[median_position]
# if we need advertised_bandwidth but this relay doesn't have it,
# move to a fallback with greater consensus weight until we find one
while not self.fallbacks[median_position]._data['advertised_bandwidth']:
median_position += 1
if median_position >= len(self.fallbacks):
return None
return self.fallbacks[median_position]
else:
return None
# the maximum fallback in the list
# call one of the sort_fallbacks_* functions before calling this
def fallback_max(self):
if len(self.fallbacks) > 0:
return self.fallbacks[0]
else:
return None
# return a new bag suitable for storing attributes
@staticmethod
def attribute_new():
return dict()
# get the count of attribute in attribute_bag
# if attribute is None or the empty string, return 0
@staticmethod
def attribute_count(attribute, attribute_bag):
if attribute is None or attribute == '':
return 0
if attribute not in attribute_bag:
return 0
return attribute_bag[attribute]
# does attribute_bag contain more than max_count instances of attribute?
# if so, return False
# if not, return True
# if attribute is None or the empty string, or max_count is invalid,
# always return True
@staticmethod
def attribute_allow(attribute, attribute_bag, max_count=1):
if attribute is None or attribute == '' or max_count <= 0:
return True
elif CandidateList.attribute_count(attribute, attribute_bag) >= max_count:
return False
else:
return True
# add attribute to attribute_bag, incrementing the count if it is already
# present
# if attribute is None or the empty string, or count is invalid,
# do nothing
@staticmethod
def attribute_add(attribute, attribute_bag, count=1):
if attribute is None or attribute == '' or count <= 0:
pass
attribute_bag.setdefault(attribute, 0)
attribute_bag[attribute] += count
# make sure there are only MAX_FALLBACKS_PER_IP fallbacks per IPv4 address,
# and per IPv6 address
# there is only one IPv4 address on each fallback: the IPv4 DirPort address
# (we choose the IPv4 ORPort which is on the same IPv4 as the DirPort)
# there is at most one IPv6 address on each fallback: the IPv6 ORPort address
# we try to match the IPv4 ORPort, but will use any IPv6 address if needed
# (clients only use the IPv6 ORPort)
# if there is no IPv6 address, only the IPv4 address is checked
# return the number of candidates we excluded
def limit_fallbacks_same_ip(self):
ip_limit_fallbacks = []
ip_list = CandidateList.attribute_new()
for f in self.fallbacks:
if (CandidateList.attribute_allow(f.dirip, ip_list,
MAX_FALLBACKS_PER_IPV4)
and CandidateList.attribute_allow(f.ipv6addr, ip_list,
MAX_FALLBACKS_PER_IPV6)):
ip_limit_fallbacks.append(f)
CandidateList.attribute_add(f.dirip, ip_list)
if f.has_ipv6():
CandidateList.attribute_add(f.ipv6addr, ip_list)
elif not CandidateList.attribute_allow(f.dirip, ip_list,
MAX_FALLBACKS_PER_IPV4):
log_excluded('Eliminated %s: already have %d fallback(s) on IPv4 %s'
%(f._fpr, CandidateList.attribute_count(f.dirip, ip_list),
f.dirip))
elif (f.has_ipv6() and
not CandidateList.attribute_allow(f.ipv6addr, ip_list,
MAX_FALLBACKS_PER_IPV6)):
log_excluded('Eliminated %s: already have %d fallback(s) on IPv6 %s'
%(f._fpr, CandidateList.attribute_count(f.ipv6addr,
ip_list),
f.ipv6addr))
original_count = len(self.fallbacks)
self.fallbacks = ip_limit_fallbacks
return original_count - len(self.fallbacks)
# make sure there are only MAX_FALLBACKS_PER_CONTACT fallbacks for each
# ContactInfo
# if there is no ContactInfo, allow the fallback
# this check can be gamed by providing no ContactInfo, or by setting the
# ContactInfo to match another fallback
# However, given the likelihood that relays with the same ContactInfo will
# go down at similar times, its usefulness outweighs the risk
def limit_fallbacks_same_contact(self):
contact_limit_fallbacks = []
contact_list = CandidateList.attribute_new()
for f in self.fallbacks:
if CandidateList.attribute_allow(f._data['contact'], contact_list,
MAX_FALLBACKS_PER_CONTACT):
contact_limit_fallbacks.append(f)
CandidateList.attribute_add(f._data['contact'], contact_list)
else:
log_excluded(
'Eliminated %s: already have %d fallback(s) on ContactInfo %s'
%(f._fpr, CandidateList.attribute_count(f._data['contact'],
contact_list),
f._data['contact']))
original_count = len(self.fallbacks)
self.fallbacks = contact_limit_fallbacks
return original_count - len(self.fallbacks)
# make sure there are only MAX_FALLBACKS_PER_FAMILY fallbacks per effective
# family
# if there is no family, allow the fallback
# we use effective family, which ensures mutual family declarations
# but the check can be gamed by not declaring a family at all
# if any indirect families exist, the result depends on the order in which
# fallbacks are sorted in the list
def limit_fallbacks_same_family(self):
family_limit_fallbacks = []
fingerprint_list = CandidateList.attribute_new()
for f in self.fallbacks:
if CandidateList.attribute_allow(f._fpr, fingerprint_list,
MAX_FALLBACKS_PER_FAMILY):
family_limit_fallbacks.append(f)
CandidateList.attribute_add(f._fpr, fingerprint_list)
for family_fingerprint in f._data['effective_family']:
CandidateList.attribute_add(family_fingerprint, fingerprint_list)
else:
# we already have a fallback with this fallback in its effective
# family
log_excluded(
'Eliminated %s: already have %d fallback(s) in effective family'
%(f._fpr, CandidateList.attribute_count(f._fpr, fingerprint_list)))
original_count = len(self.fallbacks)
self.fallbacks = family_limit_fallbacks
return original_count - len(self.fallbacks)
# try once to get the descriptors for fingerprint_list using stem
# returns an empty list on exception
@staticmethod
def get_fallback_descriptors_once(fingerprint_list):
desc_list = get_server_descriptors(fingerprints=fingerprint_list).run(suppress=True)
return desc_list
# try up to max_retries times to get the descriptors for fingerprint_list
# using stem. Stops retrying when all descriptors have been retrieved.
# returns a list containing the descriptors that were retrieved
@staticmethod
def get_fallback_descriptors(fingerprint_list, max_retries=5):
# we can't use stem's retries=, because we want to support more than 96
# descriptors
#
# add an attempt for every MAX_FINGERPRINTS (or part thereof) in the list
max_retries += (len(fingerprint_list) + MAX_FINGERPRINTS - 1) / MAX_FINGERPRINTS
remaining_list = fingerprint_list
desc_list = []
for _ in xrange(max_retries):
if len(remaining_list) == 0:
break
new_desc_list = CandidateList.get_fallback_descriptors_once(remaining_list[0:MAX_FINGERPRINTS])
for d in new_desc_list:
try:
remaining_list.remove(d.fingerprint)
except ValueError:
# warn and ignore if a directory mirror returned a bad descriptor
logging.warning("Directory mirror returned unwanted descriptor %s, ignoring",
d.fingerprint)
continue
desc_list.append(d)
return desc_list
# find the fallbacks that cache extra-info documents
# Onionoo doesn't know this, so we have to use stem
def mark_extra_info_caches(self):
fingerprint_list = [ f._fpr for f in self.fallbacks ]
logging.info("Downloading fallback descriptors to find extra-info caches")
desc_list = CandidateList.get_fallback_descriptors(fingerprint_list)
for d in desc_list:
self[d.fingerprint]._extra_info_cache = d.extra_info_cache
missing_descriptor_list = [ f._fpr for f in self.fallbacks
if f._extra_info_cache is None ]
for f in missing_descriptor_list:
logging.warning("No descriptor for {}. Assuming extrainfo=0.".format(f))
# try a download check on each fallback candidate in order
# stop after max_count successful downloads
# but don't remove any candidates from the array
def try_download_consensus_checks(self, max_count):
dl_ok_count = 0
for f in self.fallbacks:
f.try_fallback_download_consensus()
if f.get_fallback_download_consensus():
# this fallback downloaded a consensus ok
dl_ok_count += 1
if dl_ok_count >= max_count:
# we have enough fallbacks
return
# put max_count successful candidates in the fallbacks array:
# - perform download checks on each fallback candidate
# - retry failed candidates if CONSENSUS_DOWNLOAD_RETRY is set
# - eliminate failed candidates
# - if there are more than max_count candidates, eliminate lowest bandwidth
# - if there are fewer than max_count candidates, leave only successful
# Return the number of fallbacks that failed the consensus check
def perform_download_consensus_checks(self, max_count):
self.sort_fallbacks_by_measured_bandwidth()
self.try_download_consensus_checks(max_count)
if CONSENSUS_DOWNLOAD_RETRY:
# try unsuccessful candidates again
# we could end up with more than max_count successful candidates here
self.try_download_consensus_checks(max_count)
# now we have at least max_count successful candidates,
# or we've tried them all
original_count = len(self.fallbacks)
self.fallbacks = filter(lambda x: x.get_fallback_download_consensus(),
self.fallbacks)
# some of these failed the check, others skipped the check,
# if we already had enough successful downloads
failed_count = original_count - len(self.fallbacks)
self.fallbacks = self.fallbacks[:max_count]
return failed_count
# return a string that describes a/b as a percentage
@staticmethod
def describe_percentage(a, b):
if b != 0:
return '%d/%d = %.0f%%'%(a, b, (a*100.0)/b)
else:
# technically, 0/0 is undefined, but 0.0% is a sensible result
return '%d/%d = %.0f%%'%(a, b, 0.0)
# return a dictionary of lists of fallbacks by IPv4 netblock
# the dictionary is keyed by the fingerprint of an arbitrary fallback
# in each netblock
# mask_bits is the size of the netblock
def fallbacks_by_ipv4_netblock(self, mask_bits):
netblocks = {}
for f in self.fallbacks:
found_netblock = False
for b in netblocks.keys():
# we found an existing netblock containing this fallback
if f.ipv4_netblocks_equal(self[b], mask_bits):
# add it to the list
netblocks[b].append(f)
found_netblock = True
break
# make a new netblock based on this fallback's fingerprint
if not found_netblock:
netblocks[f._fpr] = [f]
return netblocks
# return a dictionary of lists of fallbacks by IPv6 netblock
# where mask_bits is the size of the netblock
def fallbacks_by_ipv6_netblock(self, mask_bits):
netblocks = {}
for f in self.fallbacks:
# skip fallbacks without IPv6 addresses
if not f.has_ipv6():
continue
found_netblock = False
for b in netblocks.keys():
# we found an existing netblock containing this fallback
if f.ipv6_netblocks_equal(self[b], mask_bits):
# add it to the list
netblocks[b].append(f)
found_netblock = True
break
# make a new netblock based on this fallback's fingerprint
if not found_netblock:
netblocks[f._fpr] = [f]
return netblocks
# log a message about the proportion of fallbacks in each IPv4 netblock,
# where mask_bits is the size of the netblock
def describe_fallback_ipv4_netblock_mask(self, mask_bits):
fallback_count = len(self.fallbacks)
shared_netblock_fallback_count = 0
most_frequent_netblock = None
netblocks = self.fallbacks_by_ipv4_netblock(mask_bits)
for b in netblocks.keys():
if len(netblocks[b]) > 1:
# how many fallbacks are in a netblock with other fallbacks?
shared_netblock_fallback_count += len(netblocks[b])
# what's the netblock with the most fallbacks?
if (most_frequent_netblock is None
or len(netblocks[b]) > len(netblocks[most_frequent_netblock])):
most_frequent_netblock = b
logging.debug('Fallback IPv4 addresses in the same /%d:'%(mask_bits))
for f in netblocks[b]:
logging.debug('%s - %s', f.dirip, f._fpr)
if most_frequent_netblock is not None:
logging.warning('There are %s fallbacks in the IPv4 /%d containing %s'%(
CandidateList.describe_percentage(
len(netblocks[most_frequent_netblock]),
fallback_count),
mask_bits,
self[most_frequent_netblock].dirip))
if shared_netblock_fallback_count > 0:
logging.warning(('%s of fallbacks are in an IPv4 /%d with other ' +
'fallbacks')%(CandidateList.describe_percentage(
shared_netblock_fallback_count,
fallback_count),
mask_bits))
# log a message about the proportion of fallbacks in each IPv6 netblock,
# where mask_bits is the size of the netblock
def describe_fallback_ipv6_netblock_mask(self, mask_bits):
fallback_count = len(self.fallbacks_with_ipv6())
shared_netblock_fallback_count = 0
most_frequent_netblock = None
netblocks = self.fallbacks_by_ipv6_netblock(mask_bits)
for b in netblocks.keys():
if len(netblocks[b]) > 1:
# how many fallbacks are in a netblock with other fallbacks?
shared_netblock_fallback_count += len(netblocks[b])
# what's the netblock with the most fallbacks?
if (most_frequent_netblock is None
or len(netblocks[b]) > len(netblocks[most_frequent_netblock])):
most_frequent_netblock = b
logging.debug('Fallback IPv6 addresses in the same /%d:'%(mask_bits))
for f in netblocks[b]:
logging.debug('%s - %s', f.ipv6addr, f._fpr)
if most_frequent_netblock is not None:
logging.warning('There are %s fallbacks in the IPv6 /%d containing %s'%(
CandidateList.describe_percentage(
len(netblocks[most_frequent_netblock]),
fallback_count),
mask_bits,
self[most_frequent_netblock].ipv6addr))
if shared_netblock_fallback_count > 0:
logging.warning(('%s of fallbacks are in an IPv6 /%d with other ' +
'fallbacks')%(CandidateList.describe_percentage(
shared_netblock_fallback_count,
fallback_count),
mask_bits))
# log a message about the proportion of fallbacks in each IPv4 /8, /16,
# and /24
def describe_fallback_ipv4_netblocks(self):
# this doesn't actually tell us anything useful
#self.describe_fallback_ipv4_netblock_mask(8)
self.describe_fallback_ipv4_netblock_mask(16)
#self.describe_fallback_ipv4_netblock_mask(24)
# log a message about the proportion of fallbacks in each IPv6 /12 (RIR),
# /23 (smaller RIR blocks), /32 (LIR), /48 (Customer), and /64 (Host)
# https://www.iana.org/assignments/ipv6-unicast-address-assignments/
def describe_fallback_ipv6_netblocks(self):
# these don't actually tell us anything useful
#self.describe_fallback_ipv6_netblock_mask(12)
#self.describe_fallback_ipv6_netblock_mask(23)
self.describe_fallback_ipv6_netblock_mask(32)
#self.describe_fallback_ipv6_netblock_mask(48)
self.describe_fallback_ipv6_netblock_mask(64)
# log a message about the proportion of fallbacks in each IPv4 and IPv6
# netblock
def describe_fallback_netblocks(self):
self.describe_fallback_ipv4_netblocks()
self.describe_fallback_ipv6_netblocks()
# return a list of fallbacks which are on the IPv4 ORPort port
def fallbacks_on_ipv4_orport(self, port):
return filter(lambda x: x.orport == port, self.fallbacks)
# return a list of fallbacks which are on the IPv6 ORPort port
def fallbacks_on_ipv6_orport(self, port):
return filter(lambda x: x.ipv6orport == port, self.fallbacks_with_ipv6())
# return a list of fallbacks which are on the DirPort port
def fallbacks_on_dirport(self, port):
return filter(lambda x: x.dirport == port, self.fallbacks)
# log a message about the proportion of fallbacks on IPv4 ORPort port
# and return that count
def describe_fallback_ipv4_orport(self, port):
port_count = len(self.fallbacks_on_ipv4_orport(port))
fallback_count = len(self.fallbacks)
logging.warning('%s of fallbacks are on IPv4 ORPort %d'%(
CandidateList.describe_percentage(port_count,
fallback_count),
port))
return port_count
# log a message about the proportion of IPv6 fallbacks on IPv6 ORPort port
# and return that count
def describe_fallback_ipv6_orport(self, port):
port_count = len(self.fallbacks_on_ipv6_orport(port))
fallback_count = len(self.fallbacks_with_ipv6())
logging.warning('%s of IPv6 fallbacks are on IPv6 ORPort %d'%(
CandidateList.describe_percentage(port_count,
fallback_count),
port))
return port_count
# log a message about the proportion of fallbacks on DirPort port
# and return that count
def describe_fallback_dirport(self, port):
port_count = len(self.fallbacks_on_dirport(port))
fallback_count = len(self.fallbacks)
logging.warning('%s of fallbacks are on DirPort %d'%(
CandidateList.describe_percentage(port_count,
fallback_count),
port))
return port_count
# log a message about the proportion of fallbacks on each dirport,
# each IPv4 orport, and each IPv6 orport
def describe_fallback_ports(self):
fallback_count = len(self.fallbacks)
ipv4_or_count = fallback_count
ipv4_or_count -= self.describe_fallback_ipv4_orport(443)
ipv4_or_count -= self.describe_fallback_ipv4_orport(9001)
logging.warning('%s of fallbacks are on other IPv4 ORPorts'%(
CandidateList.describe_percentage(ipv4_or_count,
fallback_count)))
ipv6_fallback_count = len(self.fallbacks_with_ipv6())
ipv6_or_count = ipv6_fallback_count
ipv6_or_count -= self.describe_fallback_ipv6_orport(443)
ipv6_or_count -= self.describe_fallback_ipv6_orport(9001)
logging.warning('%s of IPv6 fallbacks are on other IPv6 ORPorts'%(
CandidateList.describe_percentage(ipv6_or_count,
ipv6_fallback_count)))
dir_count = fallback_count
dir_count -= self.describe_fallback_dirport(80)
dir_count -= self.describe_fallback_dirport(9030)
logging.warning('%s of fallbacks are on other DirPorts'%(
CandidateList.describe_percentage(dir_count,
fallback_count)))
# return a list of fallbacks which cache extra-info documents
def fallbacks_with_extra_info_cache(self):
return filter(lambda x: x._extra_info_cache, self.fallbacks)
# log a message about the proportion of fallbacks that cache extra-info docs
def describe_fallback_extra_info_caches(self):
extra_info_falback_count = len(self.fallbacks_with_extra_info_cache())
fallback_count = len(self.fallbacks)
logging.warning('%s of fallbacks cache extra-info documents'%(
CandidateList.describe_percentage(extra_info_falback_count,
fallback_count)))
# return a list of fallbacks which have the Exit flag
def fallbacks_with_exit(self):
return filter(lambda x: x.is_exit(), self.fallbacks)
# log a message about the proportion of fallbacks with an Exit flag
def describe_fallback_exit_flag(self):
exit_falback_count = len(self.fallbacks_with_exit())
fallback_count = len(self.fallbacks)
logging.warning('%s of fallbacks have the Exit flag'%(
CandidateList.describe_percentage(exit_falback_count,
fallback_count)))
# return a list of fallbacks which have an IPv6 address
def fallbacks_with_ipv6(self):
return filter(lambda x: x.has_ipv6(), self.fallbacks)
# log a message about the proportion of fallbacks on IPv6
def describe_fallback_ip_family(self):
ipv6_falback_count = len(self.fallbacks_with_ipv6())
fallback_count = len(self.fallbacks)
logging.warning('%s of fallbacks are on IPv6'%(
CandidateList.describe_percentage(ipv6_falback_count,
fallback_count)))
def summarise_fallbacks(self, eligible_count, operator_count, failed_count,
guard_count, target_count):
s = ''
# Report:
# whether we checked consensus download times
# the number of fallback directories (and limits/exclusions, if relevant)
# min & max fallback bandwidths
# #error if below minimum count
if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS:
s += '/* Checked %s%s%s DirPorts served a consensus within %.1fs. */'%(
'IPv4' if PERFORM_IPV4_DIRPORT_CHECKS else '',
' and ' if (PERFORM_IPV4_DIRPORT_CHECKS
and PERFORM_IPV6_DIRPORT_CHECKS) else '',
'IPv6' if PERFORM_IPV6_DIRPORT_CHECKS else '',
CONSENSUS_DOWNLOAD_SPEED_MAX)
else:
s += '/* Did not check IPv4 or IPv6 DirPort consensus downloads. */'
s += '\n'
# Multiline C comment with #error if things go bad
s += '/*'
s += '\n'
# Integers don't need escaping in C comments
fallback_count = len(self.fallbacks)
if FALLBACK_PROPORTION_OF_GUARDS is None:
fallback_proportion = ''
else:
fallback_proportion = ', Target %d (%d * %.2f)'%(target_count,
guard_count,
FALLBACK_PROPORTION_OF_GUARDS)
s += 'Final Count: %d (Eligible %d%s'%(fallback_count, eligible_count,
fallback_proportion)
if MAX_FALLBACK_COUNT is not None:
s += ', Max %d'%(MAX_FALLBACK_COUNT)
s += ')\n'
if eligible_count != fallback_count:
removed_count = eligible_count - fallback_count
excess_to_target_or_max = (eligible_count - operator_count - failed_count
- fallback_count)
# some 'Failed' failed the check, others 'Skipped' the check,
# if we already had enough successful downloads
s += ('Excluded: %d (Same Operator %d, Failed/Skipped Download %d, ' +
'Excess %d)')%(removed_count, operator_count, failed_count,
excess_to_target_or_max)
s += '\n'
min_fb = self.fallback_min()
min_bw = min_fb._data['measured_bandwidth']
max_fb = self.fallback_max()
max_bw = max_fb._data['measured_bandwidth']
s += 'Bandwidth Range: %.1f - %.1f MByte/s'%(min_bw/(1024.0*1024.0),
max_bw/(1024.0*1024.0))
s += '\n'
s += '*/'
if fallback_count < MIN_FALLBACK_COUNT:
# We must have a minimum number of fallbacks so they are always
# reachable, and are in diverse locations
s += '\n'
s += '#error Fallback Count %d is too low. '%(fallback_count)
s += 'Must be at least %d for diversity. '%(MIN_FALLBACK_COUNT)
s += 'Try adding entries to the whitelist, '
s += 'or setting INCLUDE_UNLISTED_ENTRIES = True.'
return s
def process_existing():
logging.basicConfig(level=logging.INFO)
logging.getLogger('stem').setLevel(logging.INFO)
whitelist = {'data': parse_fallback_file(FALLBACK_FILE_NAME),
'name': FALLBACK_FILE_NAME}
blacklist = {'data': read_from_file(BLACKLIST_FILE_NAME, MAX_LIST_FILE_SIZE),
'name': BLACKLIST_FILE_NAME}
list_fallbacks(whitelist, blacklist)
def process_default():
logging.basicConfig(level=logging.WARNING)
logging.getLogger('stem').setLevel(logging.WARNING)
whitelist = {'data': read_from_file(WHITELIST_FILE_NAME, MAX_LIST_FILE_SIZE),
'name': WHITELIST_FILE_NAME}
blacklist = {'data': read_from_file(BLACKLIST_FILE_NAME, MAX_LIST_FILE_SIZE),
'name': BLACKLIST_FILE_NAME}
list_fallbacks(whitelist, blacklist)
## Main Function
def main():
if get_command() == 'check_existing':
process_existing()
else:
process_default()
def get_command():
if len(sys.argv) == 2:
return sys.argv[1]
else:
return None
def log_excluded(msg, *args):
if get_command() == 'check_existing':
logging.warning(msg, *args)
else:
logging.info(msg, *args)
def list_fallbacks(whitelist, blacklist):
""" Fetches required onionoo documents and evaluates the
fallback directory criteria for each of the relays """
print "/* type=fallback */"
print ("/* version={} */"
.format(cleanse_c_multiline_comment(FALLBACK_FORMAT_VERSION)))
now = datetime.datetime.utcnow()
timestamp = now.strftime('%Y%m%d%H%M%S')
print ("/* timestamp={} */"
.format(cleanse_c_multiline_comment(timestamp)))
# end the header with a separator, to make it easier for parsers
print SECTION_SEPARATOR_COMMENT
logging.warning('Downloading and parsing Onionoo data. ' +
'This may take some time.')
# find relays that could be fallbacks
candidates = CandidateList()
candidates.add_relays()
# work out how many fallbacks we want
guard_count = candidates.count_guards()
if FALLBACK_PROPORTION_OF_GUARDS is None:
target_count = guard_count
else:
target_count = int(guard_count * FALLBACK_PROPORTION_OF_GUARDS)
# the maximum number of fallbacks is the least of:
# - the target fallback count (FALLBACK_PROPORTION_OF_GUARDS * guard count)
# - the maximum fallback count (MAX_FALLBACK_COUNT)
if MAX_FALLBACK_COUNT is None:
max_count = target_count
else:
max_count = min(target_count, MAX_FALLBACK_COUNT)
candidates.compute_fallbacks()
prefilter_fallbacks = copy.copy(candidates.fallbacks)
# filter with the whitelist and blacklist
# if a relay has changed IPv4 address or ports recently, it will be excluded
# as ineligible before we call apply_filter_lists, and so there will be no
# warning that the details have changed from those in the whitelist.
# instead, there will be an info-level log during the eligibility check.
initial_count = len(candidates.fallbacks)
excluded_count = candidates.apply_filter_lists(whitelist, blacklist)
print candidates.summarise_filters(initial_count, excluded_count)
eligible_count = len(candidates.fallbacks)
# calculate the measured bandwidth of each relay,
# then remove low-bandwidth relays
candidates.calculate_measured_bandwidth()
candidates.remove_low_bandwidth_relays()
# print the raw fallback list
#for x in candidates.fallbacks:
# print x.fallbackdir_line(True)
# print json.dumps(candidates[x]._data, sort_keys=True, indent=4,
# separators=(',', ': '), default=json_util.default)
# impose mandatory conditions here, like one per contact, family, IP
# in measured bandwidth order
candidates.sort_fallbacks_by_measured_bandwidth()
operator_count = 0
# only impose these limits on the final list - operators can nominate
# multiple candidate fallbacks, and then we choose the best set
if not OUTPUT_CANDIDATES:
operator_count += candidates.limit_fallbacks_same_ip()
operator_count += candidates.limit_fallbacks_same_contact()
operator_count += candidates.limit_fallbacks_same_family()
# check if each candidate can serve a consensus
# there's a small risk we've eliminated relays from the same operator that
# can serve a consensus, in favour of one that can't
# but given it takes up to 15 seconds to check each consensus download,
# the risk is worth it
if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS:
logging.warning('Checking consensus download speeds. ' +
'This may take some time.')
failed_count = candidates.perform_download_consensus_checks(max_count)
# work out which fallbacks cache extra-infos
candidates.mark_extra_info_caches()
# analyse and log interesting diversity metrics
# like netblock, ports, exit, IPv4-only
# (we can't easily analyse AS, and it's hard to accurately analyse country)
candidates.describe_fallback_ip_family()
# if we can't import the ipaddress module, we can't do netblock analysis
if HAVE_IPADDRESS:
candidates.describe_fallback_netblocks()
candidates.describe_fallback_ports()
candidates.describe_fallback_extra_info_caches()
candidates.describe_fallback_exit_flag()
# output C comments summarising the fallback selection process
if len(candidates.fallbacks) > 0:
print candidates.summarise_fallbacks(eligible_count, operator_count,
failed_count, guard_count,
target_count)
else:
print '/* No Fallbacks met criteria */'
# output C comments specifying the OnionOO data used to create the list
for s in fetch_source_list():
print describe_fetch_source(s)
# start the list with a separator, to make it easy for parsers
print SECTION_SEPARATOR_COMMENT
# sort the list differently depending on why we've created it:
# if we're outputting the final fallback list, sort by fingerprint
# this makes diffs much more stable
# otherwise, if we're trying to find a bandwidth cutoff, or we want to
# contact operators in priority order, sort by bandwidth (not yet
# implemented)
# otherwise, if we're contacting operators, sort by contact
candidates.sort_fallbacks_by(OUTPUT_SORT_FIELD)
for x in candidates.fallbacks:
print x.fallbackdir_line(candidates.fallbacks, prefilter_fallbacks)
if __name__ == "__main__":
main()
``` |
{
"source": "jkklapp/falcon",
"score": 2
} |
#### File: falcon/tests/test_deprecations.py
```python
from falcon import request_helpers, stream
def test_bounded_stream():
assert request_helpers.Body is stream.Body
assert request_helpers.BoundedStream is stream.BoundedStream
``` |
{
"source": "jkklapp/paintshop",
"score": 4
} |
#### File: jkklapp/paintshop/generator.py
```python
from random import randint
from random import sample
class Generator:
'''
A case set generator.
Generates test cases with maximum N and M
Attrs:
c: The cases to generate
max_n: The maximum number of colors in the case
max_m: The maximum number of customers in the case.
'''
def __init__(self, max_n, max_m):
self.max_n = max_n
self.max_m = max_m
self.cases = []
'''
Helper that prints a case
'''
def print_case(self, case, n, m):
print n
print m
for t in case:
print t
'''
Generates a test case.
For N colors, generates a test case for M
customers, providing the total number of types
for the M customers is not bigger than 3000 and
for each customer variety == 1 only once.
Args:
n: An integer for the number of colors.
m: An integer for the number of customers.
Returns:
A list of strings like
["number_of_types0 color1 variety0 color2 variety1 ...",
"number_of_types1 color3 variety1 colorN variety0 ...",
...,
"number_of_typesn-1 colorN variety0 colorN-2 variety1 ..."]
'''
def generate_test_case(self, n, m):
pool_of_total_requests = 3000
case = []
for i in range(m):
n_customer_types = randint(1, n)
pool_of_total_requests -= n_customer_types
if pool_of_total_requests <= 0:
break
type_of_color = ""
already_chosen_matte = False
customer_choices = sample(range(1, n + 1), n_customer_types)
for j in customer_choices:
if already_chosen_matte:
variety = '0'
else:
variety = ['0', '1'][randint(0, 1)]
if variety == '1':
already_chosen_matte = True
type_of_color += " " + str(j) + " " + variety
case.append(str(n_customer_types) + type_of_color)
return [n, m, case]
'''
Generate c test cases.
'''
def generate_test_cases(self, c):
for i in range(c):
n = randint(1, self.max_n)
m = randint(1, self.max_m)
test_case = self.generate_test_case(n, m)
self.cases.append(test_case)
'''
Prints c test cases.
'''
def print_test_cases(self, c):
print c
for i in range(len(self.cases)):
n = self.cases[i][0]
m = self.cases[i][1]
case = self.cases[i][2]
self.print_case(case, n, m)
'''
Generates and prints c test cases.
'''
def generate_and_print_test_cases(self, c):
self.generate_test_cases(c)
self.print_test_cases(c)
'''
Prints output to a file
'''
def print_test_cases_to_file(self, c, filename):
f = open(filename, 'w')
f.write(str(c) + '\n')
for i in range(len(self.cases)):
n = self.cases[i][0]
m = self.cases[i][1]
case = self.cases[i][2]
f.write(str(n) + '\n')
f.write(str(m) + '\n')
for t in case:
f.write(t + '\n')
f.close()
```
#### File: jkklapp/paintshop/optimizer.py
```python
from tester import Tester
from random import randint
from math import pow
class Optimizer:
"""
Object that implements different
optimization strategies for a solution.
Attrs:
solution: A solution being optimized.
case: A list of customers with their requirements.
"""
def __init__(self, solution, case):
self.solution = solution
self.current_mattes = sum([int(x) for x in solution])
self.case = case
self.tester = Tester()
self.valid_solution = False
self.steps = 0
self.METHODS = {
'random_optimizer': self.random_optimizer,
'matte_minimizer': self.matte_minimizer
}
'''
Generates the optimal naive solution.
The optimal solution is to produce a batch
of each color glossy.
[0, 0, ..., 0]
Args:
n: The number of colors.
Returns:
A list of n 0s.
'''
def generate_naive_solution(self, n):
return ['0' for i in range(n)]
'''
Modifies a solution.
Args:
solution: An array with 0s and 1s.
i: Position to switch value.
Returns
The same solution with the i-th
position switched.
'''
def change_solution(self, solution, i):
solution[i] = '1' if solution[i] == '0' else '0'
return solution
'''
Checks if the solution has improved
Args:
solution: An array with 0s and 1s.
Returns:
True if the solution is still valid
and the number of 1s has decreased.
False otherwise.
'''
def solution_improved(self, s):
candidate_mattes = sum([int(x) for x in s])
if self.valid_solution and candidate_mattes < self.current_mattes:
self.current_mattes = candidate_mattes
return True
return False
'''
Uses inspection to optimize randomly a solution.
The idea is to switch the value of random positions in the
solution array and check if the solution satisfies. Does
nothing if the solution already satisfies.
Args:
solution: A solution candidate.
Returns
The first solution that satisfies
'''
def random_optimizer(self, i=1):
tester = self.tester
case = self.case
s = self.solution
for k in range(i):
self.valid_solution = self.tester.is_valid_solution(self.solution, case)
pos = randint(0, len(solution)-1)
s = self.change_solution(s, pos)
if self.solution_improved(s):
self.solution = s
else:
s = self.change_solution(s, pos)
self.steps += i
'''
Tries to improve a solution incrementally.
This method generates a solution that satisfies, and
then tries to turn 1s into 0s until it no longer satisfies.
Args:
solution: A solution candidate. None will use the optimal naive solution.
Returns
The solution that satisfies with the least number of 1s.
'''
def matte_minimizer(self):
solution = self.solution
tester = self.tester
case = self.case
i = 0
while i < len(solution):
solution = self.change_solution(solution, i)
self.valid_solution = tester.is_valid_solution(solution, case)
if self.solution_improved(solution):
self.solution = solution
else:
solution = self.change_solution(solution, i)
i += 1
self.steps = i
'''
Executes the optmization:
Args:
method: The name of your optimization method.
'''
def optimize(self, method=None):
if not method:
method = 'random_optimizer'
self.valid_solution = self.tester.is_valid_solution(self.solution, self.case)
self.steps = 0
if not self.tester.impossible:
self.METHODS[method]()
'''
Prints results
'''
def get_solution(self):
tester = self.tester
if (self.tester.impossible):
return "IMPOSSIBLE"
else:
return " ".join([str(s) for s in self.solution])
```
#### File: jkklapp/paintshop/optimizer_test.py
```python
import unittest
from optimizer import Optimizer
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.opt = Optimizer(['1', '1', '1'],
[['1 1', '2 0', '3 1'], ['1 0', '2 1', '3 1'], ['1 0', '3 0', '2 1']])
def test_matte_minimizer(self):
self.opt.matte_minimizer()
self.assertEqual(self.opt.solution, ['0', '0', '0'])
self.assertEqual(self.opt.steps, 3)
if __name__ == '__main__':
unittest.main()
```
#### File: jkklapp/paintshop/parser.py
```python
import sys
from random import randint
class Parser:
'''
Checks if a solution satisfies a
provided test case.
'''
def __init__(self, filename):
self.f = open(filename, 'r')
self.c = int(self.f.readline())
self.n = 0
'''
Close the file.
'''
def finish(self):
self.f.close()
'''
Reads the next case from a case file.
One line containing the N number of colors in the case.
One line containing the M number of customers in the case.
M lines each one containing the types for each customer.
Args:
* f: A file Object.
Returns:
A list containing the customer types, e.g.:
[['1 0', '2 1'], ['1 1'], ['3 0']]
'''
def read_next_case(self):
if self.c == 0:
self.finish()
return None
n = int(self.f.readline())
self.current_n = n
m = int(self.f.readline())
customers = []
for i in range(m):
customers.append(self.read_customer_types(self.f.readline()))
self.c -= 1
return customers
'''
Parses a customer's type from the file.
Args:
* t: A type String.
Returns:
A list containing the customer type, e.g.:
['1 0', '2 1']
'''
def read_customer_types(self, t):
raw_list = t.split()
l = [raw_list[i] + " " + raw_list[i + 1] for i in range(1, len(raw_list), 2)]
return l
``` |
{
"source": "jkklapp/sheila2",
"score": 3
} |
#### File: jkklapp/sheila2/cst_test.py
```python
import unittest
import os
from cst import CodeTable
class TestBasicCSTFunctionality(unittest.TestCase):
def setUp(self):
self.cst = CodeTable("test.cst")
def tearDown(self):
try:
os.remove("test.cst")
except OSError:
pass
def testGettingSetWithMostCommonTags(self):
cst = self.cst
cst["a"] = ["k1", "k2", "k3"]
cst["b"] = ["k3", "k4"]
cst["b"] = ["k1", "k3", "k5"]
cst["d"] = ["k1", "k2", "k4", "k5"]
cst_key = cst.get_set_with_most_common_tags(["k1", "k2", "k4"])
self.assertEqual(cst_key, "d")
if __name__ == '__main__':
unittest.main()
```
#### File: jkklapp/sheila2/sheila2_test.py
```python
import unittest
import os
from sheila2 import Sheila
class BasicTestCase(unittest.TestCase):
def setUp(self):
self.sheila = Sheila("testdb", "test.cst")
def tearDown(self):
try:
os.remove("test.cst")
except OSError:
pass
self.sheila.destroy()
class TestBasicInsertion(BasicTestCase):
def testTableEntryExpansion(self):
sheila = self.sheila
sheila.insert({"a": 1, "b": 2})
self.assertEqual(len(sheila.cst.tables()), 1)
sheila.insert({"a": 12})
self.assertEqual(len(sheila.cst.tables()), 1)
sheila.insert({"a": 1, "b": 2, "c": 3})
self.assertEqual(len(sheila.cst.tables()), 1)
def testTableExpansion(self):
sheila = self.sheila
sheila.insert({"a": 1, "b": 2})
self.assertEqual(len(sheila.cst.tables()), 1)
sheila.insert({"c": 12})
self.assertEqual(len(sheila.cst.tables()), 2)
sheila.insert({"b": 2, "c": 3})
self.assertEqual(len(sheila.cst.tables()), 2)
class TestBasicQuery(BasicTestCase):
def testGetData(self):
sheila = self.sheila
test_data = {"a": 1, "b": 2}
sheila.insert(test_data)
query_data = sheila.query({"a": 1})
self.assertIn(test_data, query_data)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jkklapp/tinizer",
"score": 3
} |
#### File: jkklapp/tinizer/tinizer.py
```python
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash, _app_ctx_stack
import pickledb as db
from floo import Floo
from urlparse import urlparse
app = Flask(__name__)
@app.route("/")
def index():
"""Shows user the landing page, with the form to submit a new
URL.
"""
return render_template('index.html', url="")
@app.route("/about")
def about():
"""Shows user the about page.
"""
return render_template('about.html', url="")
@app.route("/", methods=["POST"])
def tinize():
"""Reads from the request de original URL and generates the
tiny url for it. Stores the data in the DB.
"""
original_url = request.form["original_url"]
parsed_url = urlparse(original_url)
is_valid_url = bool(parsed_url.scheme)
if not is_valid_url:
return render_template('400.html'), 400
tiny_url = db.get("next_url")
db.set(tiny_url, original_url)
db.set("next_url", get_next_url(tiny_url))
return render_template('index.html', tinized_url=urlize(tiny_url))
@app.route("/<tiny_url>")
def untinize(tiny_url):
original_url = db.get(tiny_url)
if not original_url:
return render_template('404.html'), 404
else:
return redirect(original_url, code=302)
def get_first_url():
return counter.initial()
def get_next_url(current_url):
return counter.inc(current_url)
def urlize(url):
return request.url_root + url
if __name__ == "__main__":
# Characters valid of a shor URL
counter = Floo("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~:/?#[]@!$&'()*+,;=")
db = db.load('tinize.db', True)
n_urls = db.get("next_url")
if not n_urls:
db.set("next_url", get_first_url())
app.debug = True
app.run()
``` |
{
"source": "jkkl/Bert-Chinese-Text-Classification-Pytorch",
"score": 2
} |
#### File: jkkl/Bert-Chinese-Text-Classification-Pytorch/predict_for_tag.py
```python
import os
import time
import torch
import torch.nn.functional as F
import numpy as np
from train_eval import train, init_network, test
from importlib import import_module
import argparse
import pandas as pd
from sklearn import metrics
from utils import build_dataset, build_iterator, get_time_dif
parser = argparse.ArgumentParser(description='Chinese Text Classification')
parser.add_argument('--model', type=str, default='bert', help='choose a model: Bert, ERNIE')
args = parser.parse_args()
def predict_tag(config_list, model_list, data_iter, out_file):
result_pd = pd.DataFrame()
with torch.no_grad():
for texts, labels, queries in data_iter:
outputs = []
for config, model in zip(config_list, model_list):
batch_output = model(texts)
batch_labels = torch.max(batch_output.data, 1)[1].cpu().numpy()
batch_labels_name = [config.class_list[i] for i in batch_labels]
batch_scores = torch.max(torch.softmax(batch_output.data, 1),1)[0].cpu().numpy()
outputs.append((batch_labels, batch_scores, batch_labels_name))
is_diff = [(label == pred).cpu().numpy() for label, pred in zip(labels, outputs[0][0])]
batch_out_data = {'query': queries, 'origin_bi_label': labels.cpu().numpy(), 'predict_bi_label': outputs[0][0],'predict_bi_score': outputs[0][1],
'is_diff': is_diff, "predict_mu_label": outputs[1][2], "predict_mu_score": outputs[1][1]}
batch_result = pd.DataFrame(batch_out_data)
result_pd = result_pd.append(batch_result)
result_pd.to_csv(out_file, sep='\t')
def load_model_dict(dataset_list, model_type):
model_list = []
config_list = []
for dataset in dataset_list:
x = import_module('models.' + model_type)
config = x.Config(dataset)
model = x.Model(config).to(config.device)
model.load_state_dict(torch.load(config.save_path))
model.eval()
model_list.append(model)
config_list.append(config)
return model_list, config_list
def load_tag_data(config, data_type):
train_data, dev_data, test_data = build_dataset(config)
if data_type == 'train':
tag_data = build_iterator(train_data, config)
elif data_type == 'dev':
tag_data = build_iterator(dev_data, config)
elif data_type == 'test':
tag_data = build_iterator(test_data, config)
return tag_data
if __name__ == '__main__':
dataset2 = 'data/Intention2_V2' # 数据集
dataset135 = 'data/Intention135' # 数据集
dataset_list = [dataset2, dataset135]
model_type = args.model # bert
model_list, config_list = load_model_dict(dataset_list, model_type)
x = import_module('models.' + model_type)
tag_data_config = x.Config(dataset2)
data_type = 'dev'
tag_data = load_tag_data(tag_data_config, data_type)
out_file = dataset2 + f'/iter/{data_type}_iter.csv'
predict_tag(config_list, model_list, tag_data, out_file)
``` |
{
"source": "jkkl/KvPI",
"score": 2
} |
#### File: jkkl/KvPI/train.py
```python
import sys
import os
import torch
import argparse
from tqdm.std import trange
sys.path.append('./lib')
from bert import BERTLM
from treelstm import TreeLSTM
from kvbert import myModel
from kvbert import TreeArgs
from treelstm import treeVocab
import numpy as np
from google_bert import BasicTokenizer
from treelstm import Tree
from treelstm import Constants
from data_loader import DataLoader
from tqdm import tqdm
from torch.nn import CrossEntropyLoss
# from pytorch_pretrained_bert.optimization import BertAdam
import torch.optim as optim
from sklearn import metrics
def extract_parameters(ckpt_path):
model_ckpt = torch.load(ckpt_path)
bert_args = model_ckpt['bert_args']
model_args = model_ckpt['args']
bert_vocab = model_ckpt['bert_vocab']
model_parameters = model_ckpt['model']
tree_args = model_ckpt['tree_args']
tree_vocab = model_ckpt['tree_vocab']
return bert_args, model_args, bert_vocab, model_parameters, tree_args, tree_vocab
def init_empty_bert_model(bert_args, bert_vocab, gpu_id, approx = 'none'):
bert_model = BERTLM(gpu_id, bert_vocab, bert_args.embed_dim, bert_args.ff_embed_dim, bert_args.num_heads, \
bert_args.dropout, bert_args.layers, approx)
return bert_model
def init_empty_tree_model(t_args, tree_vocab, gpuid):
tree_model = TreeLSTM(tree_vocab.size(), t_args.input_dim, t_args.mem_dim, t_args.hidden_dim, t_args.num_classes, t_args.freeze_embed)
tree_model = tree_model.cuda(gpuid)
return tree_model
def init_sequence_classification_model(empty_bert_model, args, bert_args, gpu_id, bert_vocab, model_parameters, empty_tree_model, tree_args):
number_class = args.number_class
number_category = 3
embedding_size = bert_args.embed_dim
batch_size = args.batch_size
dropout = args.dropout
tree_hidden_dim = tree_args.hidden_dim
device = gpu_id
vocab = bert_vocab
seq_tagging_model = myModel(empty_bert_model, number_class, number_category, embedding_size, batch_size, dropout, device, vocab, empty_tree_model, tree_hidden_dim)
return seq_tagging_model
def init_sequence_classification_model_with_dict(empty_bert_model, args, bert_args, gpu_id, bert_vocab, model_parameters, empty_tree_model, tree_args):
number_class = args.number_class
number_category = 3
embedding_size = bert_args.embed_dim
batch_size = args.batch_size
dropout = args.dropout
tree_hidden_dim = tree_args.hidden_dim
device = gpu_id
vocab = bert_vocab
seq_tagging_model = myModel(empty_bert_model, number_class, number_category, embedding_size, batch_size, dropout, device, vocab, empty_tree_model, tree_hidden_dim)
seq_tagging_model.load_state_dict(model_parameters)
return seq_tagging_model
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('--max_len', type=int, default=128)
parser.add_argument('--ckpt_path', type=str)
parser.add_argument('--test_data',type=str)
parser.add_argument('--out_path',type=str)
parser.add_argument('--gpu_id',type=int, default=0)
parser.add_argument('--train_epoch',type=int, default=3)
parser.add_argument('--train_batch_size',type=int, default=32)
parser.add_argument('--dev_batch_size',type=int, default=32)
parser.add_argument('--learning_rate',type=float, default=2e-5)
parser.add_argument('--do_train',action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval',action='store_true', help='Whether to run eval.')
parser.add_argument('--do_test',action='store_true', help='Whether to run test.')
parser.add_argument('--no_cuda',action='store_true', default=False, help='Whether to gpu.')
parser.add_argument('--output_dir',type=str, default='saved_model')
return parser.parse_args()
def segment(text):
seg = [1 for _ in range(len(text))]
idx = text.index("sep")
seg[:idx] = [0 for _ in range(idx)]
return seg
def profile(text):
seg = [3 for _ in range(len(text))]
loc_idx = text.index("loc") - 1
gender_idx = text.index("gender") - 1
sep_idx = text.index("sep")
seg[:loc_idx] = [0 for _ in range(loc_idx)]
seg[loc_idx:gender_idx] = [1 for _ in range(gender_idx-loc_idx)]
seg[gender_idx:sep_idx] = [2 for _ in range(sep_idx-gender_idx)]
return seg
def read_tree(line):
parents = list(map(int, line.split()))
trees = dict()
root = None
for i in range(1, len(parents) + 1):
if i - 1 not in trees.keys() and parents[i - 1] != -1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = Tree()
if prev is not None:
tree.add_child(prev)
trees[idx - 1] = tree
tree.idx = idx - 1
if parent - 1 in trees.keys():
trees[parent - 1].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
return root
def seq_cut(seq, max_len):
if len(seq) > max_len:
seq = seq[:max_len]
return seq
def read_sentence(line, vocab):
indices = vocab.convertToIdx(line, Constants.UNK_WORD)
return torch.LongTensor(indices)
def init_model(ckpt_path):
bert_args, model_args, bert_vocab, model_parameters, tree_args, tree_vocab = extract_parameters(ckpt_path)
empty_bert_model = init_empty_bert_model(bert_args, bert_vocab, gpu_id, approx='none')
empty_tree_model = init_empty_tree_model(tree_args, tree_vocab, gpu_id)
seq_classification_model = init_sequence_classification_model(empty_bert_model, model_args, bert_args, gpu_id, bert_vocab, model_parameters, empty_tree_model, tree_args)
return seq_classification_model, tree_vocab
def eval(model, data_loader, device):
batch_num = int(data_loader.dev_num/args.dev_batch_size)
batch_num = batch_num if data_loader.dev_num% args.dev_batch_size == 0 else batch_num + 1
predict_all = np.array([], dtype=int)
label_all = np.array([], dtype=int)
loss_mean = 0
with torch.no_grad():
for step in trange(batch_num, desc=f'valid {step}/{batch_num}'):
batch_data = data_loader.get_next_batch(args.dev_batch_size, 'dev')
batch_text_list, batch_label_list, batch_seg_list, batch_type_list, batch_category_list, \
batch_a_seg_list, batch_a_tree_list, batch_b_seg_list, batch_b_tree_list = batch_data
batch_label_ids = torch.tensor(batch_label_list, dtype=torch.long).to(device)
pred_output = model(batch_text_list, batch_seg_list, batch_type_list, batch_a_seg_list, batch_a_tree_list, batch_b_seg_list, batch_b_tree_list, fine_tune=True)
logits = pred_output[0]
loss = criterion(logits.view(-1, label_nums), batch_label_ids.view(-1))
loss_mean += torch.sum(loss)
predict = torch.max(logits.data, 1)[1].cpu().numpy()
label_all = np.append(label_all, batch_label_ids.data.cpu().numpy())
predict_all = np.append(predict_all, predict)
acc = metrics.accuracy_score(label_all, predict_all)
loss_mean /= data_loader.dev_num
return acc, loss_mean
def predict(model, data_loader, device, is_train=False):
model.eval()
if is_train:
model.train()
batch_data = data_loader.get_next_batch(args.train_batch_size, 'train' if is_train else 'dev')
batch_text_list, batch_label_list, batch_seg_list, batch_type_list, batch_category_list, \
batch_a_seg_list, batch_a_tree_list, batch_b_seg_list, batch_b_tree_list = batch_data
batch_label_ids = torch.tensor(batch_label_list, dtype=torch.long).to(device)
pred_output = model(batch_text_list, batch_seg_list, batch_type_list, batch_a_seg_list, batch_a_tree_list, batch_b_seg_list, batch_b_tree_list, fine_tune=True)
logits = pred_output[0]
loss = criterion(logits.view(-1, label_nums), batch_label_ids.view(-1))
return logits, loss
if __name__ == '__main__':
args = parse_config()
ckpt_path = args.ckpt_path
test_data = args.test_data
out_path = args.out_path
gpu_id = args.gpu_id
model, tree_vocab = init_model(ckpt_path)
model.cuda(gpu_id)
tokenizer = BasicTokenizer()
if args.do_train:
train_path = 'data/KvPI_train.txt'
dev_path = 'data/KvPI_valid.txt'
data_loader = DataLoader(train_path, dev_path, tree_vocab, args.max_len)
criterion = CrossEntropyLoss()
label_nums = model.num_class
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
optimizer = optim.AdamW(model.parameters(), lr=3e-5)
# param_optimizer = list(model.named_parameters())
# no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
# optimizer_grouped_parameters = [
# {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
# {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
# ]
# optimizer = BertAdam(optimizer_grouped_parameters,
# lr=args.learning_rate,
# warmup=args.warmup_proportion,
# t_total=num_train_optimization_steps)
global_step = 0
best_dev_acc = 0.0
for epoch in trange(int(args.train_epoch), desc='Epoch'):
model.train()
batch_num = int(data_loader.train_num/args.train_batch_size)
batch_num = batch_num if data_loader.train_num % args.train_batch_size == 0 else batch_num + 1
train_loss = 0
for step in trange(batch_num, desc=f'Training {step}/{batch_num}'):
logits, loss = predict(model, data_loader, device, is_train=True)
loss.backward()
train_loss += loss.item()
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % 100 == 0:
dev_acc, loss = eval(model, data_loader, device)
if dev_acc > best_dev_acc:
best_dev_acc = dev_acc
if best_dev_acc > 0.8:
output_model_file = os.path.join(args.output_dir, str(global_step)+"_pytorch_model.bin")
torch.save(model.state_dict(), output_model_file)
print(f'global step:{global_step}, train loss:{loss}, best dev acc {best_dev_acc},current dev acc {dev_acc}, dev loss {loss}')
print(f'epoch :{epoch} training done, train mean loss:{train_loss/data_loader.train_num}')
with torch.no_grad():
with open(out_path, 'w', encoding='utf8') as o:
with open(test_data, 'r', encoding='utf8') as i:
lines = i.readlines()
for l in tqdm(lines[1:], desc='Predicting'):
content_list = l.strip().split('\t')
text = content_list[0]
text_tokenized_list = tokenizer.tokenize(text)
if len(text_tokenized_list) > args.max_len:
text_tokenized_list = text_tokenized_list[:args.max_len]
seg_list = segment(text_tokenized_list)
typ_list = profile(text_tokenized_list)
a_seg = read_sentence(seq_cut(content_list[3].split(' '), args.max_len), tree_vocab)
a_tree = read_tree(content_list[4])
b_seg = read_sentence(seq_cut(content_list[5].split(' '), args.max_len), tree_vocab)
b_tree = read_tree(content_list[6])
pred_output = model([text_tokenized_list], [seg_list], [typ_list], [a_seg], [a_tree], [b_seg], [b_tree], fine_tune=False)[0].cpu().numpy()
pred_probability = pred_output[0]
pred_label = np.argmax(pred_probability)
out_line = text + '\t' + str(pred_label)
o.writelines(out_line + '\n')
print("done.")
``` |
{
"source": "jkkl/sccl",
"score": 3
} |
#### File: sccl/utils/metric.py
```python
from __future__ import print_function
import time
import torch
import numpy as np
from scipy.optimize import linear_sum_assignment as hungarian
from sklearn.metrics.cluster import normalized_mutual_info_score, adjusted_rand_score, adjusted_mutual_info_score
cluster_nmi = normalized_mutual_info_score
def cluster_acc(y_true, y_pred):
y_true = y_true.astype(np.int64)
assert y_pred.size == y_true.size
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
# ind = sklearn.utils.linear_assignment_.linear_assignment(w.max() - w)
# row_ind, col_ind = linear_assignment(w.max() - w)
row_ind, col_ind = hungarian(w.max() - w)
return sum([w[i, j] for i, j in zip(row_ind, col_ind)]) * 1.0 / y_pred.size
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = float(self.sum) / self.count
class Timer(object):
"""
"""
def __init__(self):
self.reset()
def reset(self):
self.interval = 0
self.time = time.time()
def value(self):
return time.time() - self.time
def tic(self):
self.time = time.time()
def toc(self):
self.interval = time.time() - self.time
self.time = time.time()
return self.interval
class Confusion(object):
"""
column of confusion matrix: predicted index
row of confusion matrix: target index
"""
def __init__(self, k, normalized = False):
super(Confusion, self).__init__()
self.k = k
self.conf = torch.LongTensor(k,k)
self.normalized = normalized
self.reset()
def reset(self):
self.conf.fill_(0)
self.gt_n_cluster = None
def cuda(self):
self.conf = self.conf.cuda()
def add(self, output, target):
output = output.squeeze()
target = target.squeeze()
assert output.size(0) == target.size(0), \
'number of targets and outputs do not match'
if output.ndimension()>1: #it is the raw probabilities over classes
assert output.size(1) == self.conf.size(0), \
'number of outputs does not match size of confusion matrix'
_,pred = output.max(1) #find the predicted class
else: #it is already the predicted class
pred = output
indices = (target*self.conf.stride(0) + pred.squeeze_().type_as(target)).type_as(self.conf)
ones = torch.ones(1).type_as(self.conf).expand(indices.size(0))
self._conf_flat = self.conf.view(-1)
self._conf_flat.index_add_(0, indices, ones)
def classIoU(self,ignore_last=False):
confusion_tensor = self.conf
if ignore_last:
confusion_tensor = self.conf.narrow(0,0,self.k-1).narrow(1,0,self.k-1)
union = confusion_tensor.sum(0).view(-1) + confusion_tensor.sum(1).view(-1) - confusion_tensor.diag().view(-1)
acc = confusion_tensor.diag().float().view(-1).div(union.float()+1)
return acc
def recall(self,clsId):
i = clsId
TP = self.conf[i,i].sum().item()
TPuFN = self.conf[i,:].sum().item()
if TPuFN==0:
return 0
return float(TP)/TPuFN
def precision(self,clsId):
i = clsId
TP = self.conf[i,i].sum().item()
TPuFP = self.conf[:,i].sum().item()
if TPuFP==0:
return 0
return float(TP)/TPuFP
def f1score(self,clsId):
r = self.recall(clsId)
p = self.precision(clsId)
print("classID:{}, precision:{:.4f}, recall:{:.4f}".format(clsId, p, r))
if (p+r)==0:
return 0
return 2*float(p*r)/(p+r)
def acc(self):
TP = self.conf.diag().sum().item()
total = self.conf.sum().item()
if total==0:
return 0
return float(TP)/total
def optimal_assignment(self,gt_n_cluster=None,assign=None):
if assign is None:
mat = -self.conf.cpu().numpy() #hungaian finds the minimum cost
r,assign = hungarian(mat)
self.conf = self.conf[:,assign]
self.gt_n_cluster = gt_n_cluster
return assign
def show(self,width=6,row_labels=None,column_labels=None):
print("Confusion Matrix:")
conf = self.conf
rows = self.gt_n_cluster or conf.size(0)
cols = conf.size(1)
if column_labels is not None:
print(("%" + str(width) + "s") % '', end='')
for c in column_labels:
print(("%" + str(width) + "s") % c, end='')
print('')
for i in range(0,rows):
if row_labels is not None:
print(("%" + str(width) + "s|") % row_labels[i], end='')
for j in range(0,cols):
print(("%"+str(width)+".d")%conf[i,j],end='')
print('')
def conf2label(self):
conf=self.conf
gt_classes_count=conf.sum(1).squeeze()
n_sample = gt_classes_count.sum().item()
gt_label = torch.zeros(n_sample)
pred_label = torch.zeros(n_sample)
cur_idx = 0
for c in range(conf.size(0)):
if gt_classes_count[c]>0:
gt_label[cur_idx:cur_idx+gt_classes_count[c]].fill_(c)
for p in range(conf.size(1)):
if conf[c][p]>0:
pred_label[cur_idx:cur_idx+conf[c][p]].fill_(p)
cur_idx = cur_idx + conf[c][p];
return gt_label,pred_label
def clusterscores(self):
target,pred = self.conf2label()
NMI = normalized_mutual_info_score(target,pred)
ARI = adjusted_rand_score(target,pred)
AMI = adjusted_mutual_info_score(target,pred)
return {'NMI':NMI,'ARI':ARI,'AMI':AMI}
``` |
{
"source": "j-kk/PlantStation",
"score": 2
} |
#### File: PlantStation/core/config.py
```python
import configparser
import datetime
import logging
import shutil
from pathlib import Path
from threading import RLock
from . import parse_time
from .ext import PinManager
DEFAULT_ACTIVE_LIMIT = 1
class Config(object):
"""
Thrad safe config structure with logging
"""
_path: Path = None
_cfg_parser : configparser.RawConfigParser
_cfg_lock : RLock
_logger: logging.Logger
def __init__(self, logger: logging.Logger, path: Path, dry_run=False):
"""
Default constructor. Uses program's logger
Parameters:
-----------
logger : logging.Logger
program's logger
path : pathlib.Path
path to the config
dry_run : boolean = False
should all IO operations be mocked?
"""
self._cfg_lock = RLock()
self._cfg_parser = configparser.RawConfigParser()
self._cfg_parser.optionxform = str
self._logger = logger
self._dry_run = dry_run
if path:
self.path = path
def __getitem__(self, item):
with self._cfg_lock:
return self._cfg_parser[item]
def __setitem__(self, key, value):
with self._cfg_lock:
self._cfg_parser[key] = value
@property
def cfg_parser(self):
with self._cfg_lock:
return self._cfg_parser
@property
def logger(self):
"""
Returns global logger
"""
return self._logger
@property
def path(self):
"""
Config location's path
"""
with self._cfg_lock:
if not self._path:
self.logger.critical(f'Config path was not set')
raise ValueError(f'Config path is not set')
else:
return self._path
@path.setter
def path(self, value: Path):
with self._cfg_lock:
if value.suffix != '.cfg':
raise ValueError(f'Specified path is not a .cfg')
if value.is_dir():
raise IsADirectoryError()
if not self._dry_run:
if not value.parent.is_dir():
self._path.parent.mkdir(parents=True)
if self._path:
shutil.move(self._path, value)
self._path = value
def read(self) -> None:
"""
Reads content from config file. Thread safe
"""
with self._cfg_lock:
if not self._cfg_parser.read(self.path):
self.logger.critical(f'Config file {self.path} not found')
raise FileNotFoundError(f'Error: environment config file not found. Quitting!')
else:
self.logger.info(f'Config file {self._path} read succesfully!')
def write(self) -> None:
"""
Writes config to file. Thread safe
"""
with self._cfg_lock:
try:
cfg_file = open(self.path, 'w')
self._cfg_parser.write(cfg_file)
self.logger.info(f'Created config file in {self._path}')
except FileNotFoundError or IsADirectoryError as exc:
self.logger.warning(f'Couldn\'t create file in given directory.')
raise exc
except PermissionError as exc:
self.logger.error(
f'Couldn\'t create file in given directory. No permissions to create file in {self._path}')
raise exc
class EnvironmentConfig(Config):
"""
Configuration intended for general use. Stores information about GPIO,
creates global logger
"""
_dry_run = False
_env_name: str
debug: bool
pin_manager: PinManager
def __init__(self, env_name: str, path=None, debug=False, dry_run: bool = False):
"""
Default constructor. Uses program's logger
Parameters:
-----------
path : pathlib.Path
path to config
debug : bool = False
print extra debug information
dry_run : bool = False
should pins be mocked?
"""
# set env vars
self.env_name = env_name
self.debug = debug
self.dry_run = dry_run
# create global logger
logger = logging.getLogger('PlantStation').getChild(self.env_name)
Formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
channel = logging.StreamHandler()
channel.setFormatter(Formatter)
logger.addHandler(channel)
logger.setLevel(logging.DEBUG if debug else logging.INFO)
# initialize config
super().__init__(logger, path)
if path is None:
self.cfg_parser['GLOBAL'] = {
'env_name': self.env_name
}
# initialize pins
self.pin_manager = PinManager(dry_run=dry_run)
@property
def silent_hours(self):
try:
if self.cfg_parser['GLOBAL']['workingHours'] == 'True':
begin = datetime.time.fromisoformat(self.cfg_parser['GLOBAL']['workingHoursBegin'])
end = datetime.time.fromisoformat(self.cfg_parser['GLOBAL']['workingHoursEnd'])
return [end, begin]
else:
return None
except KeyError as exc:
self.logger.error(f'Silent hours not given!')
raise exc
except ValueError as exc:
self.logger.fatal(f'Silent hours in wrong format {exc}!')
raise exc
def disable_silent_hours(self):
with self._cfg_lock:
self.logger.info(f'Disabled silent hours')
self.cfg_parser['GLOBAL']['workingHours'] = str(False)
@silent_hours.setter
def silent_hours(self, value: (datetime.time, datetime.time)):
value = list(map(lambda t: t.strftime('%H:%M'), value))
with self._cfg_lock:
if 'GLOBAL' not in self.cfg_parser:
self.cfg_parser['GLOBAL'] = {}
self.cfg_parser['GLOBAL']['workingHours'] = str(True)
self.cfg_parser['GLOBAL']['workingHoursBegin'] = value[1]
self.cfg_parser['GLOBAL']['workingHoursEnd'] = value[0]
@property
def active_limit(self):
try:
return int(self._cfg_parser['GLOBAL']['ActiveLimit'])
except KeyError:
return DEFAULT_ACTIVE_LIMIT
@active_limit.setter
def active_limit(self, value: int):
self.pin_manager.active_limit = value
self.cfg_parser['GLOBAL']['ActiveLimit'] = str(value)
self.logger.debug(f'Active limit set to {value}')
def list_plants(self) -> [str]:
"""
Returns list of all plants' names specified in config
"""
sections = self.cfg_parser.sections()
if 'GLOBAL' in sections:
sections.remove('GLOBAL')
return sections
def update_plant_section(self, plant):
section = dir(plant)
with self._cfg_lock:
self.cfg_parser[plant.plantName] = {}
for key in section:
self.cfg_parser[plant.plantName][key] = str(getattr(plant, key))
def remove_plant_section(self, plant):
with self._cfg_lock:
self.cfg_parser.remove_section(plant.plantName)
def parse_plants(self):
"""Reads environment config file - plant section
Reads config file from location defined by self._cfg_paths
and if provided data are correct, returns Plants with provided data
"""
# read global section
plant_params = []
# read_plants
for section in self._cfg_parser:
if section == 'DEFAULT':
continue
if section != 'GLOBAL':
self.logger.debug('Found new section: %s', section)
try: # TODO How about replacing params with Dataclass?
params = {
'plantName': str(section),
'wateringDuration': datetime.timedelta(
seconds=float(self._cfg_parser[section]['wateringDuration'])),
'wateringInterval': parse_time(self._cfg_parser[section]['wateringInterval']),
'gpioPinNumber': str(self._cfg_parser[section]['gpioPinNumber']),
'isActive': bool(self._cfg_parser[section]['isActive'])}
if self._cfg_parser[section]['lastTimeWatered'] != '':
time_str = self._cfg_parser[section]['lastTimeWatered']
params['lastTimeWatered'] = datetime.datetime.strptime(time_str, '%Y-%m-%d %X')
else:
params['lastTimeWatered'] = datetime.datetime.min
plant_params.append(params)
self.logger.info(
f'Found new plant: {params["plantName"]}, pin: {params["gpioPinNumber"]}')
except KeyError as err:
self.logger.error(
f'{self._cfg_parser}: Failed to read {section} section - '
f'option not found {str(err)}')
except Exception as err:
self.logger.error(
f'{self._path} Failed to read {section} section {err}')
return plant_params
@staticmethod
def create_from_file(path: Path, debug: bool = False, dry_run: bool = False):
# check path
if not path.exists() or not path.is_file():
raise FileNotFoundError()
if not path.name.endswith('.cfg'):
raise FileExistsError('File has wrong suffix')
env_name = path.name[:-4]
env = EnvironmentConfig(env_name, path, debug, dry_run)
env.read()
env.pin_manager.active_limit = env.active_limit #TODO in future
return env
```
#### File: PlantStation/gardener/gardener.py
```python
import logging
from PlantStation.core import Environment, EnvironmentConfig
from .tasks import TaskPool, ShouldWaterTask
class Gardener(object):
"""Maintains plant and schedules watering
Holds information about environment and uses task pool to schedule watering
Parameters
----------
environment : Environment
Reference to monitored environment
pool : TaskPool
Related Task Pool
"""
environment: Environment
pool: TaskPool
_logger: logging.Logger
def __init__(self, env_config: EnvironmentConfig):
self._logger = env_config.logger
self._logger.setLevel(logging.DEBUG if env_config.debug else logging.INFO)
self._logger.debug(f'Creating environment')
self.environment = Environment(env_config)
self._logger.debug(f'Creating task pool')
self.pool = TaskPool(env_config)
def schedule_monitoring(self) -> None:
"""Sets up event scheduler - Obligatory before starting event scheduler
Schedules to check all plants
"""
self._logger.debug('Scheduling monitoring')
for plant in self.environment.plants:
self.pool.add_task(ShouldWaterTask(plant=plant, env_config=self.environment.config))
self._logger.debug(f'Scheduled monitoring - OK')
def start(self) -> None:
"""Starts to look after plants
Starts pool tasks
"""
self._logger.info('Starting scheduler')
self.pool.start()
```
#### File: tests/core/test_helpers.py
```python
import pytest
from core.helpers.format_validators import *
from random import Random
__author__ = "Jakub"
__copyright__ = "Jakub"
__license__ = "mit"
rand_seed = 1023
@pytest.mark.basic
def test_basic():
assert parse_time('10D 09:09:09').total_seconds() == datetime.timedelta(days=10, hours=9, minutes=9,
seconds=9).total_seconds()
assert parse_time('0D 00:00:00').total_seconds() == datetime.timedelta(seconds=0).total_seconds()
with pytest.raises(ValueError):
parse_time('10D 9:09:09')
@pytest.mark.slow
def test_random():
random = Random(rand_seed)
for it in range(0, 10000):
days = random.randint(0, 99)
hour = random.randint(0, 23)
minute = random.randint(0, 59)
sec = random.randint(0, 59)
parsed = parse_time('{}D {}:{}:{}'.format(days, str(hour).zfill(2), str(minute).zfill(2), str(sec).zfill(2)))
td = datetime.timedelta(days=days, hours=hour, minutes=minute, seconds=sec)
assert parsed == td
@pytest.mark.extended
def test_value_error():
with pytest.raises(ValueError):
parse_time('0D 09:9:09')
with pytest.raises(ValueError):
parse_time('0D 09:911:11')
with pytest.raises(ValueError):
parse_time('0D -1:91:11')
with pytest.raises(ValueError):
parse_time('-1D 09:11:111')
with pytest.raises(ValueError):
parse_time('0D :09:11:81')
with pytest.raises(ValueError):
parse_time('0 D 09:11:81')
with pytest.raises(ValueError):
parse_time('0D 0:3:11:81')
with pytest.raises(ValueError):
parse_time('0D fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')
with pytest.raises(ValueError):
parse_time('0D 09:11 : 81')
with pytest.raises(ValueError):
parse_time('0D 03: 11:81')
with pytest.raises(ValueError):
parse_time('0D 23 :11:81')
with pytest.raises(ValueError):
parse_time('0D ::')
@pytest.mark.extended
def test_board():
for i in range(1, 40):
assert is_gpio(f'BOARD{i}')
assert is_gpio(f'GPIO{i}')
with pytest.raises(ValueError):
assert is_gpio('BOARD 30')
with pytest.raises(ValueError):
assert is_gpio('BOARD 30 ')
with pytest.raises(ValueError):
assert is_gpio('BOARD 30')
with pytest.raises(ValueError):
assert is_gpio('GPIO30 ')
with pytest.raises(ValueError):
assert is_gpio('GPIO3 0')
``` |
{
"source": "jkkummerfeld/1ec-graph-parser",
"score": 3
} |
#### File: evaluation/nlp_util/parse_errors.py
```python
from collections import defaultdict
import pstree
class Parse_Error_Set:
def __init__(self, gold=None, test=None, include_terminals=False):
self.missing = []
self.crossing = []
self.extra = []
self.POS = []
self.spans = {}
if gold is not None and test is not None:
errors = get_errors(test, gold, include_terminals)
for error in errors:
if len(error) > 4:
self.add_error(error[0], error[1], error[2], error[3], error[4])
else:
self.add_error(error[0], error[1], error[2], error[3])
def add_error(self, etype, span, label, node, gold_label=None):
error = (etype, span, label, node)
if gold_label is not None:
error = (etype, span, label, node, gold_label)
if span not in self.spans:
self.spans[span] = {}
if label not in self.spans[span]:
self.spans[span][label] = []
self.spans[span][label].append(error)
if etype == 'missing':
self.missing.append(error)
elif etype == 'crossing':
self.crossing.append(error)
elif etype == 'extra':
self.extra.append(error)
elif etype == 'diff POS':
self.POS.append(error)
def is_extra(self, node):
if node.span in self.spans:
if node.label in self.spans[node.span]:
for error in self.spans[node.span][node.label]:
if error[0] == 'extra':
return True
return False
def __len__(self):
return len(self.missing) + len(self.extra) + len(self.crossing) + (2*len(self.POS))
def get_errors(test, gold, include_terminals=False):
ans = []
gold_spans = []
gold_POS = {}
gold_span_set = defaultdict(lambda: 0)
for span in gold:
if span.is_terminal():
if include_terminals:
gold_POS[span.span] = span.label
continue
key = (span.span[0], span.span[1], span.label)
gold_span_set[key] += 1
gold_spans.append((key, span))
test_spans = []
test_span_set = defaultdict(lambda: 0)
for span in test:
if span.is_terminal():
if include_terminals:
tnode = span
gold_label = gold_POS[tnode.span]
if gold_label != tnode.label:
ans.append(('diff POS', tnode.span, tnode.label, tnode, gold_label))
continue
key = (span.span[0], span.span[1], span.label)
test_span_set[key] += 1
test_spans.append((key, span))
# Extra
for key, span in test_spans:
count = gold_span_set.get(key)
if count is None or count == 0:
ans.append(('extra', span.span, span.label, span))
else:
gold_span_set[key] -= 1
# Missing and crossing
for key, span in gold_spans:
count = test_span_set.get(key)
if count is None or count == 0:
name = 'missing'
for tkey, tspan in test_spans:
if tkey[0] < key[0] < tkey[1] < key[1]:
name = 'crossing'
break
elif key[0] < tkey[0] < key[1] < tkey[1]:
name = 'crossing'
break
ans.append((name, span.span, span.label, span))
else:
test_span_set[key] -= 1
return ans
def counts_for_prf(test, gold, include_root=False, include_terminals=False):
# Note - currently assumes the roots match
tcount = 0
for node in test:
if node.is_terminal() and not include_terminals:
continue
if node.parent is None and not include_root:
continue
tcount += 1
gcount = 0
for node in gold:
if node.is_terminal() and not include_terminals:
continue
if node.parent is None and not include_root:
continue
gcount += 1
match = tcount
errors = Parse_Error_Set(gold, test, True)
match = tcount - len(errors.extra)
if include_terminals:
match -= len(errors.POS)
return match, gcount, tcount, len(errors.crossing), len(errors.POS)
if __name__ == '__main__':
print "No unit testing implemented for Error_Set"
```
#### File: evaluation/nlp_util/tree_transform.py
```python
import pstree
def change_label_by_node(node, new_label, in_place):
if not in_place:
node = pstree.clone_and_find(node)
node.label = new_label
return (True, (node.root(), node))
def change_label_by_span(tree, new_label, span, cur_label, in_place=True):
tree = tree.root()
for node in tree:
if node.span == span and node.label == cur_label:
return change_label_by_node(node, new_label, in_place)
return (False, "Failed to find node with ({}, {} - {})".format(cur_label, *span))
def change_label(tree, new_label, span=None, cur_label=None, in_place=True):
if span is None and cur_label is None:
return change_label_by_node(tree, new_label, in_place)
elif span is not None and cur_label is not None:
return change_label_by_span(tree, new_label, span, cur_label, in_place)
else:
raise Exception("Invalid combination of arguments for change label request")
def add_node(tree, span, label, position=0, in_place=True):
'''Introduce a new node in the tree. Position indicates what to do when a
node already exists with the same span. Zero indicates above any current
nodes, one indicates beneath the first, and so on.'''
tree = tree.root()
if not in_place:
tree = tree.clone()
# Find the node(s) that should be within the new span
nodes = tree.get_spanning_nodes(*span)
# Do not operate on the root node
if nodes[0].parent is None:
nodes = nodes[0].subtrees[:]
for i in xrange(position):
if len(nodes) > 1:
return (False, "Position {} is too deep".format(position))
nodes[0] = nodes[0].subtrees[0]
nodes.sort(key=lambda x: x.span)
# Check that all of the nodes are at the same level
parent = None
for node in nodes:
if parent is None:
parent = node.parent
if parent != node.parent:
return (False, "The span ({} - {}) would cross brackets".format(*span))
# Create the node
nnode = pstree.PSTree(None, label, span, parent)
position = parent.subtrees.index(nodes[0])
parent.subtrees.insert(position, nnode)
# Move the subtrees
for node in nodes:
node.parent.subtrees.remove(node)
nnode.subtrees.append(node)
node.parent = nnode
return (True, (tree, nnode))
def remove_node_by_node(node, in_place):
if not in_place:
node = pstree.clone_and_find(node)
parent = node.parent
position = parent.subtrees.index(node)
init_position = position
parent.subtrees.pop(position)
for subtree in node.subtrees:
subtree.parent = parent
parent.subtrees.insert(position, subtree)
position += 1
return (True, (parent, node, init_position, position))
def remove_node_by_span(tree, span, label, position, in_place):
'''Delete a node from the tree. Position indicates what to do when multiple
nodes of the requested type exist. Zero indicates to remove the top node,
one indicates to remove the second, and so on.'''
nodes = tree.get_nodes('all', span[0], span[1])
nodes = filter(lambda node: node.label == label, nodes)
if len(nodes) <= position:
return (False, "No node matching {} ({}, {} - {}) found".format(position, label, *span))
return remove_node_by_node(nodes[position], in_place)
def remove_node(tree, span=None, label=None, position=None, in_place=True):
if span is None and label is None:
return remove_node_by_node(tree, in_place)
elif span is not None and label is not None:
if position is None:
position = 0
return remove_node_by_span(tree, span, label, position, in_place)
else:
raise Exception("Invalid combination of arguments for remove node request")
# TODO: Span-centric version?
def move_nodes(nodes, new_parent, in_place=True, remove_empty=True, remove_trivial_unary=True):
if not in_place:
nodes = pstree.clone_and_find(nodes + [new_parent])
new_parent = nodes[-1]
nodes = nodes[:-1]
# Find the insertion point in the new parent's subtrees
old_parent = nodes[0].parent
nodes.sort(key=lambda x: x.span)
node_span = (nodes[0].span[0], nodes[-1].span[1])
insertion_point = 0
if new_parent.subtrees[0].span[0] == node_span[1]:
# Inserting before all that are there currently
pass
elif new_parent.subtrees[0].span[0] == node_span[0]:
# Inserting before all that are there currently
pass
else:
for subtree in new_parent.subtrees:
if subtree.span[0] == node_span[1]:
break
insertion_point += 1
if subtree.span[1] == node_span[0]:
break
if insertion_point > len(new_parent.subtrees):
return (False, "new_parent did not have suitable insertion point")
# Move the nodes across
for node in nodes:
node.parent.subtrees.remove(node)
new_parent.subtrees.insert(insertion_point, node)
node.parent = new_parent
insertion_point += 1
# If the nodes left behind are empty, remove them
to_check_for_unary = old_parent
if remove_empty and len(old_parent.subtrees) == 0:
to_remove = old_parent
while len(to_remove.parent.subtrees) == 1:
to_remove = to_remove.parent
to_remove.parent.remove(to_remove)
# If the removal applies, then we will need to check at that level for
# unaries, rather than down at the old_parent
to_check_for_unary = to_remove.parent
# Remove trivial unaries
if remove_trivial_unary:
to_check = to_check_for_unary
if len(to_check.subtrees) == 1 and to_check.label == to_check.subtrees[0].label:
to_check.subtrees = to_check.subtrees[0].subtrees
for subtree in to_check.subtrees:
subtree.parent = to_check
new_parent.root().calculate_spans()
return (True, (new_parent.root(), nodes, new_parent))
```
#### File: format-conversion/nlp_util/pstree.py
```python
import re, string
from collections import defaultdict
DEFAULT_LABEL = 'label_not_set'
TRACE_LABEL = '-NONE-'
class TreeIterator:
'''Iterator for traversal of a tree.
PSTree uses pre-order traversal by default, but this supports post-order too, e.g.:
>>> tree = tree_from_text("(ROOT (S (NP-SBJ (NNP Ms.) (NNP Haag) ) (VP (VBZ plays) (NP (NNP Elianti) )) (. .) ))")
>>> for node in TreeIterator(tree, 'post'):
... print node
(NNP Ms.)
(NNP Haag)
(NP-SBJ (NNP Ms.) (NNP Haag))
(VBZ plays)
(NNP Elianti)
(NP (NNP Elianti))
(VP (VBZ plays) (NP (NNP Elianti)))
(. .)
(S (NP-SBJ (NNP Ms.) (NNP Haag)) (VP (VBZ plays) (NP (NNP Elianti))) (. .))
(ROOT (S (NP-SBJ (NNP Ms.) (NNP Haag)) (VP (VBZ plays) (NP (NNP Elianti))) (. .)))
'''
def __init__(self, tree, order='pre'):
self.tree = tree
self.pos = [0]
self.order = order
def __iter__(self):
return self
def next(self):
while True:
if len(self.pos) == 0:
raise StopIteration
# For pre-order traversal, return nodes when first reached
ans = None
if self.order == 'pre' and self.pos[-1] == 0:
ans = self.tree
# Update internal state to point at the next node in the tree
if self.pos[-1] < len(self.tree.subtrees):
self.tree = self.tree.subtrees[self.pos[-1]]
self.pos[-1] += 1
self.pos.append(0)
else:
if self.order == 'post':
ans = self.tree
self.tree = self.tree.parent
self.pos.pop()
if ans is not None:
return ans
class PSTree:
'''Phrase Structure Tree
>>> tree = tree_from_text("(ROOT (NP (NNP Newspaper)))")
>>> print tree
(ROOT (NP (NNP Newspaper)))
>>> tree = tree_from_text("(ROOT (S (NP-SBJ (NNP Ms.) (NNP Haag) ) (VP (VBZ plays) (NP (NNP Elianti) )) (. .) ))")
>>> print tree
(ROOT (S (NP-SBJ (NNP Ms.) (NNP Haag)) (VP (VBZ plays) (NP (NNP Elianti))) (. .)))
>>> print tree.word_yield()
Ms. Haag plays Elianti .
>>> tree = tree_from_text("(ROOT (NFP ...))")
>>> print tree
(ROOT (NFP ...))
>>> tree.word_yield()
'...'
>>> tree = tree_from_text("(VP (VBD was) (VP (VBN named) (S (NP-SBJ (-NONE- *-1) ) (NP-PRD (NP (DT a) (JJ nonexecutive) (NN director) ) (PP (IN of) (NP (DT this) (JJ British) (JJ industrial) (NN conglomerate) ))))))")
>>> print tree
(VP (VBD was) (VP (VBN named) (S (NP-SBJ (-NONE- *-1)) (NP-PRD (NP (DT a) (JJ nonexecutive) (NN director)) (PP (IN of) (NP (DT this) (JJ British) (JJ industrial) (NN conglomerate)))))))
>>> tree.word_yield()
'was named *-1 a nonexecutive director of this British industrial conglomerate'
'''
def __init__(self, word=None, label=DEFAULT_LABEL, span=(0, 0), parent=None, subtrees=None):
self.word = word
self.label = label
self.span = span
self.wordspan = span
self.parent = parent
self.unique_id = None
self.subtrees = []
if subtrees is not None:
self.subtrees = subtrees
for subtree in subtrees:
subtree.parent = self
def __iter__(self):
return TreeIterator(self, 'pre')
def clone(self):
ans = PSTree(self.word, self.label, self.span)
for subtree in self.subtrees:
subclone = subtree.clone()
subclone.parent = ans
ans.subtrees.append(subclone)
return ans
def is_terminal(self):
'''Check if the tree has no children.'''
return len(self.subtrees) == 0
def is_trace(self):
'''Check if this tree is the end of a trace.'''
return self.label == TRACE_LABEL
def is_punct(self):
if self.label in {"IN", "TO", "RB", "AUX", "DT"} or self.word is None:
return False
if self.word in {"!", "#", "'", "''", "*", ",", "-", "--", ".", "...", ":", ";", "=", "?", "@", "\*", "\*\*", "`", "``"}:
return True
return False
def is_conjunction(self):
return self.label in {'CC', 'CONJP'}
def root(self):
'''Follow parents until a node is reached that has no parent.'''
if self.parent is not None:
return self.parent.root()
else:
return self
def __repr__(self):
'''Return a bracket notation style representation of the tree.'''
# TODO: Shift this to str and add more field info
ans = '('
if self.is_trace():
ans += TRACE_LABEL + ' ' + self.word
elif self.is_terminal():
ans += self.label + ' ' + self.word
else:
ans += self.label
for subtree in self.subtrees:
ans += ' ' + subtree.__repr__()
ans += ')'
return ans
def set_unique_id(self, cur=0):
'''Set a unique numerical ID for each node.'''
self.unique_id = cur
cur += 1
for subtree in self.subtrees:
cur = subtree.set_unique_id(cur)
return cur
def calculate_spans(self, left=0, wordleft=0):
'''Update the spans for every node in this tree.'''
right = left
wordright = wordleft
if self.is_terminal():
if not self.is_trace():
wordright += 1
right += 1
for subtree in self.subtrees:
right, wordright = subtree.calculate_spans(right, wordright)
self.span = (left, right)
self.wordspan = (wordleft, wordright)
return right, wordright
def check_consistency(self):
'''Check that the parents and spans are consistent with the tree
structure.'''
ans = True
if self.is_terminal():
if self.is_trace() and self.span[0] != self.span[1]:
print "non-zero span at a terminal trace node"
ans = False
elif self.span[0] + 1 != self.span[1]:
print "span changes by value other than 1 at non-trace terminal node"
ans = False
else:
for i in xrange(len(self.subtrees)):
subtree = self.subtrees[i]
if subtree.parent != self:
print "bad parent link"
ans = False
if i > 0 and self.subtrees[i - 1].span[1] != subtree.span[0]:
print "Subtree spans don't match"
ans = False
ans = ans and subtree.check_consistency()
if self.span != (self.subtrees[0].span[0], self.subtrees[-1].span[1]):
print "Span doesn't match subtree spans"
ans = False
return ans
def production_list(self, ans=None):
'''Get a list of productions as:
(node label, node span, ((subtree1, end1), (subtree2, end2)...))'''
if ans is None:
ans = []
if len(self.subtrees) > 0:
cur = (self.label, self.span, tuple([(sub.label, sub.span[1]) for sub in self.subtrees]))
ans.append(cur)
for sub in self.subtrees:
sub.production_list(ans)
return ans
def word_yield(self, span=None, as_list=False):
'''Return the set of words at terminal nodes, either as a space separated
string, or as a list.'''
if self.is_terminal():
if span is None or span[0] <= self.span[0] < span[1]:
if self.word is None:
return None
if as_list:
return [self.word]
else:
return self.word
else:
return None
else:
ans = []
for subtree in self.subtrees:
words = subtree.word_yield(span, as_list)
if words is not None:
if as_list:
ans += words
else:
ans.append(words)
if not as_list:
ans = ' '.join(ans)
return ans
def node_dict(self, depth=0, node_dict=None):
'''Get a dictionary of labelled nodes. Note that we use a dictionary to
take into consideration unaries like (NP (NP ...))'''
if node_dict is None:
node_dict = defaultdict(lambda: [])
for subtree in self.subtrees:
subtree.node_dict(depth + 1, node_dict)
node_dict[(self.label, self.span[0], self.span[1])].append(depth)
return node_dict
def get_nodes(self, request='all', start=-1, end=-1, node_list=None):
'''Get the node(s) that have a given span. Unspecified endpoints are
treated as wildcards. The request can be 'lowest', 'highest', or 'all'.
For 'all', the list of nodes is in order from the highest first.'''
if request not in ['highest', 'lowest', 'all']:
raise Exception("%s is not a valid request" % str(request))
if request == 'lowest' and start < 0 and end < 0:
raise Exception("Lowest is not well defined when both ends are wildcards")
if request == 'all' and node_list is None:
node_list = []
if request == 'highest':
if self.span[0] == start or start < 0:
if self.span[1] == end or end < 0:
return self
for subtree in self.subtrees:
# Skip subtrees with no overlapping range
if 0 < end <= subtree.span[0] or subtree.span[1] < start:
continue
ans = subtree.get_nodes(request, start, end, node_list)
if ans is not None and request != 'all':
return ans
if self.span[0] == start or start < 0:
if self.span[1] == end or end < 0:
if request == 'lowest':
return self
elif request == 'all':
node_list.insert(0, self)
return node_list
if request == 'all':
return node_list
else:
return None
def get_spanning_nodes(self, start, end, node_list=None):
return_ans = False
if node_list is None:
return_ans = True
node_list = []
if self.span[0] == start and self.span[1] <= end:
node_list.append(self)
start = self.span[1]
else:
for subtree in self.subtrees:
if subtree.span[1] < start:
continue
start = subtree.get_spanning_nodes(start, end, node_list)
if start == end:
break
if return_ans:
if start == end:
return node_list
else:
return None
else:
return start
def tree_from_text(text, allow_empty_labels=False, allow_empty_words=False):
'''Construct a PSTree from the provided string, which is assumed to represent
a tree with nested round brackets. Nodes are labeled by the text between the
open bracket and the next space (possibly an empty string). Words are the
text after that space and before the close bracket.'''
root = None
cur = None
pos = 0
word = ''
for char in text:
# Consume random text up to the first '('
if cur is None:
if char == '(':
root = PSTree()
cur = root
continue
if char == '(':
word = word.strip()
if cur.label is DEFAULT_LABEL:
if len(word) == 0 and not allow_empty_labels:
raise Exception("Empty label found\n%s" % text)
cur.label = word
word = ''
if word != '':
raise Exception("Stray '%s' while processing\n%s" % (word, text))
sub = PSTree()
cur.subtrees.append(sub)
sub.parent = cur
cur = sub
elif char == ')':
word = word.strip()
if word != '':
if len(word) == 0 and not allow_empty_words:
raise Exception("Empty word found\n%s" % text)
cur.word = word
word = ''
cur.span = (pos, pos + 1)
pos += 1
else:
cur.span = (cur.subtrees[0].span[0], cur.subtrees[-1].span[1])
cur = cur.parent
elif char == ' ':
if cur.label is DEFAULT_LABEL:
if len(word) == 0 and not allow_empty_labels:
raise Exception("Empty label found\n%s" % text)
cur.label = word
word = ''
else:
word += char
else:
word += char
if cur is not None:
raise Exception("Text did not include complete tree\n%s" % text)
root.calculate_spans()
return root
def get_reference(text, sep='-'):
# Work backwards through it
cur = []
for char in text[::-1]:
if char in string.digits:
cur.append(char)
elif char == sep:
if len(cur) == 0:
return None
else:
return ''.join(cur)
elif char in {'-', '='}:
cur = []
return None
def tree_from_shp(text, allow_empty_labels=False, allow_empty_words=False):
'''Construct a PSTree from the provided split head grammar parse.'''
# Create spine defined non-terminals
nodes = {}
sent_len = 0
nulls_to_add = []
trace_edges = {}
for line in text:
# Extract data
parts = line.strip().split()
num = int(parts[0])
sent_len = max(sent_len, num)
word = parts[1]
POS = parts[2]
spine = parts[3]
tnum = int(parts[4])
tlabel = parts[5]
structural_edge = (tnum, tlabel)
for i in range(6, len(parts), 6):
tnum = int(parts[i])
tlabel = parts[i + 1]
tlabel += "_T" if parts[i + 2] == 'T' else "_F"
slabel = parts[i + 3]
slabel += "_T" if parts[i + 4] == 'T' else "_F"
trace = parts[i + 5]
if trace.endswith("_chain"):
trace = trace[:-6]
edge = (tnum, tlabel, num, slabel, trace)
if num not in trace_edges:
trace_edges[num] = []
trace_edges[num].append(edge)
# Make spine
prev_node = PSTree(word, POS, (num - 1, num))
symbol_count = defaultdict(lambda: 0)
node_map = {
POS +"_0_F": prev_node
}
nodes[num] = (word, POS, spine, structural_edge, node_map)
if spine != '_':
cur = []
depth = 0
null_node = []
for char in spine +"_":
if char == ')':
cur.append(")")
depth -= 1
elif char == '(':
cur.append("(")
depth += 1
elif char == '_':
if depth != 0:
cur.append(" ")
else:
# Generate new node
if '(' not in cur:
symbol = ''.join(cur)
node = PSTree(None, symbol, (num - 1, num), None, [prev_node])
prev_node.parent = node
count = symbol_count[symbol +"_F"]
symbol_count[symbol +"_F"] += 1
node_map["{}_{}_F".format(symbol, count)] = node
prev_node = node
if len(null_node) > 0:
for null in null_node:
nulls_to_add.append((null, node))
null_node = []
else:
modified = []
to_add = []
for char2 in cur:
to_add.append(char2)
if char2 == ')':
if not ' ' in to_add and len(to_add) > 1:
to_add[0] += '-NONE- '
modified.append(''.join(to_add))
to_add = []
elif char2 == '(':
modified.append(''.join(to_add[:-1]))
to_add = ['(']
modified = ''.join(modified)
null = tree_from_text(modified)
for node in null:
symbol = node.label
count = symbol_count[symbol +"_T"]
symbol_count[symbol +"_T"] += 1
node_map["{}_{}_T".format(symbol, count)] = node
null_node.append(null)
cur = []
else:
cur.append(char)
# Link together with structural edges, ensuring proper nesting
root = None
decisions = {}
for length in xrange(1, len(nodes) + 1):
for pos in nodes:
word, POS, spine, structural_edge, node_map = nodes[pos]
tnum, tlabel = structural_edge
if abs(pos - tnum) == length:
# Identify the top of the spine at this position
top = None
for name in node_map:
if node_map[name].parent is None and name.endswith("_F"):
top = node_map[name]
# Attach it to the target
if tnum == 0:
root = PSTree(None, "ROOT", (0, sent_len), None, [top])
top.parent = root
else:
left = tnum > pos
target_info = nodes[tnum]
tlabel += "_F"
tnode = target_info[4][tlabel]
# Check that linking to this node won't cause a crossing
for opos in xrange(min(pos, tnum) + 1, max(pos, tnum)):
if opos in decisions:
onode = decisions[opos]
# See if this is above our node
pnode = tnode
while pnode is not None:
if pnode == onode:
tnode = onode
break
pnode = pnode.parent
top.parent = tnode
if left:
tnode.subtrees.insert(0, top)
else:
tnode.subtrees.append(top)
decisions[pos] = tnode
# TODO: gather stats on the original trees
for null, node in nulls_to_add:
pos = len(node.subtrees)
if null.label.startswith("ADVP"):
pass
elif node.label.startswith("SBAR") or null.label.startswith("NP-SBJ"):
pos = 0
else:
npos = 0
for subnode in node.subtrees:
npos += 1
if subnode.label.startswith("VB"):
pos = npos
if subnode.label.startswith("LST"):
pos = npos + 1
break
if subnode.label.startswith("VP"):
pos = max(0, npos - 1)
break
if len(node.subtrees) > pos:
sub_label = node.subtrees[pos].label
if sub_label.startswith("-L") or sub_label in {",", "LST", "``"}:
pos += 1
node.subtrees.insert(pos, null)
null.parent = node
root.calculate_spans()
# Add traces
max_index = 0
for num in trace_edges:
for tnum, tlabel, snum, slabel, trace in trace_edges[num]:
snode = nodes[snum][4][slabel]
tnode = nodes[tnum][4][tlabel]
if tlabel.endswith("_T") and slabel.endswith("_T"):
tmp = snode
snode = tnode
tnode = tmp
if trace == '=':
identity = get_reference(tnode.label)
if identity is None:
max_index += 1
identity = max_index
tnode.label += "-{}".format(identity)
snode.label += "={}".format(identity)
else:
identity = get_reference(snode.label)
if identity is None:
max_index += 1
identity = max_index
snode.label += "-{}".format(identity)
tnode.subtrees[0].word += "-{}".format(identity)
return root
def clone_and_find(nodes):
'''Clone the tree these nodes are in and finds the equivalent nodes in the
new tree.'''
return_list = True
if type(nodes) != type([]):
return_list = False
nodes = [nodes]
# Note the paths to the nodes
paths = []
for node in nodes:
paths.append([])
tree = node
while tree.parent is not None:
prev = tree
tree = tree.parent
paths[-1].append(tree.subtrees.index(prev))
# Duplicate and follow the path back to the equivalent node
ntree = nodes[0].root().clone()
ans = []
for path in paths:
tree = ntree
for index in path[::-1]:
tree = tree.subtrees[index]
ans.append(tree)
if return_list:
return ans
else:
return ans[0]
if __name__ == '__main__':
print "Running doctest"
import doctest
doctest.testmod()
```
#### File: parser/nn-tagger/pre-process.py
```python
import sys
import string
def map_token(token, pos):
# Lowercase
token = token.lower()
# Alternatives:
# - Just change the case of the first letter of the first word
# - Also, leave it as is if we've seen this capitalised elsewhere
# Replace numbers
letters = []
for letter in token:
if letter in string.digits:
if len(letters) > 0 and letters[-1] == '0':
continue
else:
letters.append("0")
else:
letters.append(letter)
token = ''.join(letters)
# Do the suffix trick?
return token
for line in sys.stdin:
if line.strip().startswith("# SentID"):
print(line.strip())
continue
mapping = True
tokens = []
token_count = 0
for pos, token in enumerate(line.strip().split()):
if token == "|||" or (not mapping):
tokens.append(token)
mapping = False
else:
token_count += 1
tokens.append(map_token(token, pos))
# Add "_" tags if needed
assert len(tokens) <= token_count * 2 + 1
if len(tokens) < token_count * 2 + 1:
if mapping:
tokens.append("|||")
while len(tokens) < token_count * 2 + 1:
tokens.append("_;")
print(" ".join(tokens))
```
#### File: 1ec-graph-parser/properties/graph_classifier.py
```python
from __future__ import print_function
import argparse
import sys
def read_arcs(src):
arcs = []
info = {}
while True:
line = src.readline()
if len(line) == 0:
if len(arcs) > 0:
yield (arcs, info)
return
elif len(line.strip()) == 0:
yield (arcs, info)
arcs = []
info = {}
elif line[0] != '#':
fields = line.strip().split()
child = int(fields[0])
for part in fields[6::6]:
parent = int(part)
arcs.append((child, parent))
else:
content = line
name = line.strip().split()[1]
if name in info:
info[name] += "\n"+ content
else:
info[name] = content
def is_projective(arcs):
for arc0 in arcs:
for arc1 in arcs:
if arc0[0] < arc1[0] < arc0[1] < arc1[1]:
return False
if arc1[0] < arc0[0] < arc1[1] < arc0[1]:
return False
return True
def has_self_arc(arcs):
for arc in arcs:
if arc[0] == arc[1]:
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Find out information for graphs: (1) projective? (2) are self-arcs present?')
args = parser.parse_args()
for arcs, info in read_arcs(sys.stdin):
if is_projective(arcs):
print("Projective", info['Sentence'])
if has_self_arc(arcs):
print("Has self arc", info['Sentence'])
``` |
{
"source": "jkkummerfeld/dstc7-noesis",
"score": 3
} |
#### File: noesis/dataset/dataset.py
```python
import random
import numpy as np
from noesis.dataset.vocabulary import Vocabulary
from noesis.dataset import utils
class Dataset(object):
"""
A class that encapsulates a dataset.
Warning:
Do not use this constructor directly, use one of the class methods to initialize.
Note:
Source or target sequences that are longer than the respective
max length will be filtered.
Args:
max_len (int): maximum source sequence length
"""
def __init__(self):
# Declare vocabulary objects
self.vocab = None
self.data = None
@classmethod
def from_file(cls, path, vocab=None, max_vocab=50000):
"""
Initialize a dataset from the file at given path. The file
must contains a list of TAB-separated pairs of sequences.
Note:
Source or target sequences that are longer than the respective
max length will be filtered.
As specified by maximum vocabulary size, source and target
vocabularies will be sorted in descending token frequency and cutoff.
Tokens that are in the dataset but not retained in the vocabulary
will be dropped in the sequences.
Args:
path (str): path to the dataset file
vocab (Vocabulary): pre-populated Vocabulary object or a path of a file containing words for the source language, default `None`. If a pre-populated Vocabulary object, `src_max_vocab` wouldn't be used.
max_vocab (int): maximum source vocabulary size
"""
obj = cls()
pairs = utils.prepare_data(path)
return cls._encode(obj, pairs, vocab, max_vocab)
def _encode(self, pairs, vocab=None, max_vocab=500000):
"""
Encodes the source and target lists of sequences using source and target vocabularies.
Note:
Source or target sequences that are longer than the respective
max length will be filtered.
As specified by maximum vocabulary size, source and target
vocabularies will be sorted in descending token frequency and cutoff.
Tokens that are in the dataset but not retained in the vocabulary
will be dropped in the sequences.
Args:
pairs (list): list of tuples (source sequences, target sequence)
vocab (Vocabulary): pre-populated Vocabulary object or a path of a file containing words for the source language,
default `None`. If a pre-populated Vocabulary object, `src_max_vocab` wouldn't be used.
max_vocab (int): maximum source vocabulary size
"""
# Read in vocabularies
self.vocab = self._init_vocab(pairs, max_vocab, vocab)
# Translate input sequences to token ids
self.data = []
for (context, candidates), target in pairs:
c = self.vocab.indices_from_sequence(context)
r = []
for candidate in candidates:
r.append(self.vocab.indices_from_sequence(candidate))
self.data.append(((c, r), target))
return self
def _init_vocab(self, data, max_num_vocab, vocab):
resp_vocab = Vocabulary(max_num_vocab)
if vocab is None:
for (context, candidates), target in data:
resp_vocab.add_sequence(context)
for candidate in candidates:
resp_vocab.add_sequence(candidate)
resp_vocab.trim()
elif isinstance(vocab, Vocabulary):
resp_vocab = vocab
elif isinstance(vocab, str):
for tok in utils.read_vocabulary(vocab, max_num_vocab):
resp_vocab.add_token(tok)
else:
raise AttributeError('{} is not a valid instance on a vocabulary. None, instance of Vocabulary class \
and str are only supported formats for the vocabulary'.format(vocab))
return resp_vocab
def _pad(self, data):
c = [pair[0][0] for pair in data]
r = [pair[0][1] for pair in data]
context = np.zeros([len(c), max([len(entry) for entry in c])], dtype=int)
context.fill(self.vocab.PAD_token_id)
context_lengths = np.zeros(len(c), dtype=int)
for i, entry in enumerate(c):
context[i, :len(entry)] = entry
context_lengths[i] = len(entry)
responses = np.zeros([len(r), max([len(entry) for entry in r]), max([len(cand) for entry in r for cand in entry])], dtype=int)
responses.fill(self.vocab.PAD_token_id)
responses_lengths = np.zeros([len(r), max([len(entry) for entry in r])], dtype=int)
for i, entry in enumerate(r):
for j, cand in enumerate(entry):
responses[i, j, :len(cand)] = cand
responses_lengths[i, j] = len(cand)
return context, responses, context_lengths, responses_lengths
def __len__(self):
return len(self.data)
def num_batches(self, batch_size):
"""
Get the number of batches given batch size.
Args:
batch_size (int): number of examples in a batch
Returns:
(int) : number of batches
"""
return len(range(0, len(self.data), batch_size))
def make_batches(self, batch_size):
"""
Create a generator that generates batches in batch_size over data.
Args:
batch_size (int): number of pairs in a mini-batch
Yields:
(list (str), list (str)): next pair of source and target variable in a batch
"""
if len(self.data) < batch_size:
raise OverflowError("batch size = {} cannot be larger than data size = {}".
format(batch_size, len(self.data)))
for i in range(0, len(self.data), batch_size):
cur_batch = self.data[i:i + batch_size]
context, responses, context_lengths, responses_lengths = self._pad(cur_batch)
target = np.asarray([pair[1] for pair in cur_batch])
yield (context, responses, target, context_lengths, responses_lengths)
def shuffle(self, seed=None):
"""
Shuffle the data.
Args:
seed (int): provide a value for the random seed; default seed=None is truly random
"""
if seed is not None:
random.seed(seed)
random.shuffle(self.data)
```
#### File: noesis-tf/models/helpers.py
```python
import array
import numpy as np
import tensorflow as tf
from collections import defaultdict
def load_vocab(filename):
vocab = None
with open(filename) as f:
vocab = f.read().splitlines()
dct = defaultdict(int)
vocab = set(vocab)
for idx, word in enumerate(vocab):
dct[word] = idx
return [vocab, dct]
def load_glove_vectors(filename, vocab):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
tf.logging.info("Found {} out of {} vectors in Glove".format(num_vectors, len(vocab)))
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def build_initial_embedding_matrix(vocab_dict, glove_dict, glove_vectors, embedding_dim):
initial_embeddings = np.random.uniform(-0.25, 0.25, (len(vocab_dict), embedding_dim)).astype("float32")
for word, glove_word_idx in glove_dict.items():
word_idx = vocab_dict.get(word)
initial_embeddings[word_idx, :] = glove_vectors[glove_word_idx]
return initial_embeddings
```
#### File: noesis-tf/util/blocks.py
```python
import tensorflow as tf
def length(sequence):
"""
Get true length of sequences (without padding), and mask for true-length in max-length.
Input of shape: (batch_size, max_seq_length, hidden_dim)
Output shapes,
length: (batch_size)
mask: (batch_size, max_seq_length, 1)
"""
populated = tf.sign(tf.abs(sequence))
length = tf.cast(tf.reduce_sum(populated, axis=1), tf.int32)
mask = tf.cast(tf.expand_dims(populated, -1), tf.float32)
return length, mask
def biLSTM(inputs, dim, seq_len, name):
"""
A Bi-Directional LSTM layer. Returns forward and backward hidden states as a tuple, and cell states as a tuple.
Output of hidden states: [(batch_size, max_seq_length, hidden_dim), (batch_size, max_seq_length, hidden_dim)]
Same shape for cell states.
"""
with tf.name_scope(name):
with tf.variable_scope('forward' + name):
lstm_fwd = tf.contrib.rnn.LSTMCell(num_units=dim)
with tf.variable_scope('backward' + name):
lstm_bwd = tf.contrib.rnn.LSTMCell(num_units=dim)
hidden_states, cell_states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fwd, cell_bw=lstm_bwd,
inputs=inputs, sequence_length=seq_len,
dtype=tf.float32, scope=name)
return hidden_states, cell_states
def LSTM(inputs, dim, seq_len, name):
"""
An LSTM layer. Returns hidden states and cell states as a tuple.
Output shape of hidden states: (batch_size, max_seq_length, hidden_dim)
Same shape for cell states.
"""
with tf.name_scope(name):
cell = tf.contrib.rnn.LSTMCell(num_units=dim)
hidden_states, cell_states = tf.nn.dynamic_rnn(cell, inputs=inputs, sequence_length=seq_len,
dtype=tf.float32, scope=name)
return hidden_states, cell_states
def last_output(output, true_length):
"""
To get the last hidden layer form a dynamically unrolled RNN.
Input of shape (batch_size, max_seq_length, hidden_dim).
true_length: Tensor of shape (batch_size). Such a tensor is given by the length() function.
Output of shape (batch_size, hidden_dim).
"""
max_length = int(output.get_shape()[1])
length_mask = tf.expand_dims(tf.one_hot(true_length-1, max_length, on_value=1., off_value=0.), -1)
last_output = tf.reduce_sum(tf.multiply(output, length_mask), 1)
return last_output
def masked_softmax(scores, mask):
"""
Used to calculate a softmax score with true sequence length (without padding), rather than max-sequence length.
Input shape: (batch_size, max_seq_length, hidden_dim).
mask parameter: Tensor of shape (batch_size, max_seq_length). Such a mask is given by the length() function.
"""
numerator = tf.exp(tf.subtract(scores, tf.reduce_max(scores, 1, keep_dims=True))) * mask
denominator = tf.reduce_sum(numerator, 1, keep_dims=True)
weights = tf.div(numerator, denominator)
return weights
``` |
{
"source": "jkkummerfeld/emnlp20lm",
"score": 3
} |
#### File: emnlp20lm/data-preprocessing/make-non-unk-ptb.py
```python
import tarfile
import string
import sys
import argparse
parser = argparse.ArgumentParser(description='Get text for language modeling evaluation based on the original Penn Treebank Wall Street Journal data, following the Mikolov style preprocessing. Run with just the treebank file to get a very close match to the Mikolov files (all but 282 words match out of more than a million).')
parser.add_argument('treebank',
help='treebank_3_LDC99T42.tgz file from the LDC')
parser.add_argument('--prefix', default="penn_text.",
help='Prefix for output files')
parser.add_argument('--no-unks', action='store_true',
help='Do not introduce unks')
parser.add_argument('--keep-case', action='store_true',
help='Do not lowercase all text')
parser.add_argument('--keep-nums', action='store_true',
help='Do not convert numbers to N')
parser.add_argument('--keep-punc', action='store_true',
help='Do not remove punctuation')
parser.add_argument('--keep-percent', action='store_true',
help='Do not remove percentage signs')
args = parser.parse_args()
# Apply edits based on command line arguments
def modify_token(token):
if not args.keep_case:
token = token.lower()
if not args.keep_punc:
if token in ["''", "``", ',', '.', ':', ';', '--', '(', ')', '...', '-lrb-', '-rrb-', '-lcb-', '-rcb-', '?', '!', '`', '-']:
return None
if token.lower() == 'u.s':
token = 'u.s.'
if not args.keep_nums:
has_char = False
has_num = False
for char in token:
if char in string.ascii_letters + "'":
has_char = True
elif char in string.digits:
has_num = True
if has_num and (not has_char):
token = "N"
if not args.keep_percent:
if token == '%':
token = 'N'
if len(token) == 0:
return None
return token
# Read data
data = tarfile.open(args.treebank, "r:gz")
text = {}
for member in data.getmembers():
if member.name.endswith(".mrg") and 'wsj' in member.name:
name = member.name.split('_')[-1][:2]
text.setdefault(name, [[]])
lines = data.extractfile(member).readlines()
# We are reading the syntactic parses, so track depth to know when we
# finish a sentence.
# Note, why the parse files?
# - Raw, require tokenisation
# - Tagged, have breaks that make sentence boundaries unclear
# - Prd, have some formatting issues
depth = 0
for line in lines:
line = line.decode("ascii").strip().split()
for prev, token in zip([''] + line, line):
if '(' in token:
for char in token:
if char == '(':
depth += 1
else:
for char in token:
if char == ')':
depth -= 1
if prev == '(-NONE-':
continue
token = token.rstrip(")")
token = modify_token(token)
if token is not None:
text[name][-1].append(token)
if depth == 0 and len(text[name][-1]) > 0:
text[name].append([])
# Prepare output files
train = open(args.prefix +'train.txt', 'w')
valid = open(args.prefix +'valid.txt', 'w')
test = open(args.prefix +'test.txt', 'w')
name2file = {
"00": train, "01": train, "02": train, "03": train, "04": train,
"05": train, "06": train, "07": train, "08": train, "09": train,
"10": train, "11": train, "12": train, "13": train, "14": train,
"15": train, "16": train, "17": train, "18": train, "19": train,
"20": train,
"21": valid, "22": valid,
"23": test, "24": test,
}
# Insert <unk> tokens
if not args.no_unks:
# Count words in training and validation data
counts = {}
for name in text:
if name2file[name] == train or name2file[name] == valid:
for sentence in text[name]:
for token in sentence:
counts[token] = counts.get(token, 0) + 1
# Keep 10,000 words, keeping the most frequent ones. This cuts off part way
# through the words with frequency 5.
#
# This is where the remaining difference is with Mikolov's data.
pairs = [(-c, t) for t, c in counts.items()]
pairs.sort()
top10k = {t for _, t in pairs[:10000]}
# Replace rare words with <unk>
for name in text:
for sentence in text[name]:
for j, token in enumerate(sentence):
if token not in top10k:
sentence[j] = '<unk>'
# Print data
names = list(text.keys())
names.sort()
for name in names:
for sentence in text[name]:
if len(sentence) > 0:
print(' '.join(sentence), file=name2file[name])
# Close files
train.close()
valid.close()
test.close()
``` |
{
"source": "jkkummerfeld/game-ai",
"score": 3
} |
#### File: game-ai/camel-up/run_sim.py
```python
import random
import sys
import traceback
from collections import defaultdict
COLOURS = ['green', 'blue', 'orange', 'yellow', 'white']
COLOUR_MAP = {
'g': 'green',
'b': 'blue',
'o': 'orange',
'y': 'yellow',
'w': 'white',
}
MAX_SQUARE_DIST = 4
MAX_POS = 0
MAX_SQUARES = 4
ROLLOUTS = 50000
SQUARE_ROLLOUTS = 5000
ALL_MOVABLE = True
INTERACTIVE = True
class Board(object):
def __init__(self):
self.camels = {}
self.squares = {}
def add_square(self, position, direction):
self.squares[position] = [direction, 0]
def add_camel(self, colour, position, movable):
height = 0
for camel in self.camels:
pos = self.camels[camel]
if pos[0] == position:
height = max(height, pos[1] + 1)
self.set_camel(colour, position, height, movable)
def set_camel(self, colour, position, height, movable):
self.camels[colour] = (position, height, movable)
def square_options(self):
# Get positions of camels
unavailable = set()
lowest_pos = 16
highest_pos = 0
for camel in self.camels:
position = self.camels[camel][0]
lowest_pos = min(lowest_pos, position + 1)
highest_pos = max(highest_pos, position + MAX_SQUARE_DIST)
unavailable.add(position)
for square in self.squares:
unavailable.add(square)
unavailable.add(square + 1)
unavailable.add(square - 1)
# Work out where squares can go
square_options = []
for i in range(lowest_pos, highest_pos):
if i not in unavailable:
square_options.append(i)
return square_options
def apply_roll(self, colour, number):
position, height, movable = self.camels[colour]
target = [position + number, 0]
# Take into consideration +/- squares
# Rules guarantee only one in a row
if target[0] in self.squares:
self.squares[target[0]][1] += 1
target[0] += self.squares[target[0]][0]
# Find all the camels that are moving
moving = []
for camel in self.camels:
opos = self.camels[camel]
if opos[0] == position and height <= opos[1]:
moving.append((opos, camel))
if opos[0] == target[0]:
target[1] = max(target[1], opos[1] + 1)
moving.sort()
# Move the camels
for opos, camel in moving:
self.camels[camel] = (target[0], target[1], False)
target[1] += 1
def copy(self):
ans = Board()
for camel in self.camels:
ans.camels[camel] = self.camels[camel]
for square in self.squares:
ans.squares[square] = self.squares[square][:]
return ans
def stack_size(self, pos):
count = 0
for camel in self.camels:
if self.camels[camel][0] == pos:
count += 1
return count
def leader(self, ignore=None):
best = None
best_colour = None
for camel in self.camels:
if camel == ignore:
continue
pos = self.camels[camel]
if best is None or pos[0] > best[0] or (pos[0] == best[0] and pos[1] > best[1]):
best = pos
best_colour = camel
return best_colour
def second(self):
best = self.leader()
return self.leader(best)
def __str__(self):
ans = []
camel_list = [(self.camels[camel], camel) for camel in self.camels]
camel_list.sort()
for pos, camel in camel_list:
while pos[0] >= len(ans):
ans.append('')
ans[pos[0]] += camel[0]
if not pos[2]:
ans[pos[0]] += "!"
for pos in self.squares:
while pos >= len(ans):
ans.append('')
direction = self.squares[pos][0]
if direction > 0:
ans[pos] += '+'
else:
ans[pos] += '-'
return '.'.join(ans)
def get_roll(available):
colour = available[random.randint(0, len(available) - 1)]
number = random.randint(1, 3)
return (number, colour)
def read_state():
# Format, a single line:
# Letters: g b o y w
# Squares: + -
# Dot to indicate next square
try:
data = input("\nEnter current state:\n")
init = Board()
board_position = 0
for pos in range(len(data)):
char = data[pos]
if char == '.':
board_position += 1
elif char == '+':
init.add_square(board_position, 1)
elif char == '-':
init.add_square(board_position, -1)
elif char in 'gboyw':
movable = True
if pos + 1 < len(data) and data[pos + 1] == '!':
movable = False
init.add_camel(COLOUR_MAP[char], board_position, movable)
### print("Insert", COLOUR_MAP[char], board_position, movable)
return init
except EOFError:
return None
def add_random_camels(state):
tops = {}
for colour in COLOURS:
position = random.randint(0, MAX_POS)
movable = random.randint(0, 1) == 0
if ALL_MOVABLE:
movable = True
state.add_camel(colour, position, movable)
if position not in tops:
tops[position] = 0
tops[position] += 1
### state.add_camel('green', 0)
### state.add_camel('blue', 0)
### state.add_camel('white', 0)
### state.add_camel('orange', 19)
### state.add_camel('yellow', 19)
### tops = {0:3, 19:2}
def add_random_squares(state):
# Get positions of camels
unavailable = set()
for camel in state.camels:
position = state.camels[camel]
unavailable.add(position[0])
# Work out where squares can go
for i in range(min(unavailable) + 1, max(unavailable) + MAX_SQUARE_DIST):
if i not in unavailable:
square_options.append(i)
for i in range(random.randint(0, MAX_SQUARES)):
if len(square_options) == 0:
break
to_add = square_options[random.randint(0, len(square_options) - 1)]
direction = [-1, 1][random.randint(0, 1)]
state.add_square(to_add, direction)
pos = 0
while pos < len(square_options):
if abs(square_options[pos] - to_add) < 2:
square_options.pop(pos)
else:
pos += 1
def do_rollout(init_state):
cur = init_state.copy()
cur_available = COLOURS[:]
for camel in cur.camels:
if not cur.camels[camel][2]:
cur_available.remove(camel)
while len(cur_available) > 0:
square_options = cur.square_options()
action = random.randint(0, 1)
if action == 1 and len(square_options) > 0:
addition = square_options[random.randint(0, len(square_options) - 1)]
direction = (random.randint(0, 1) * 2) - 1
cur.add_square(addition, direction)
else:
number, colour = get_roll(cur_available)
cur_available.remove(colour)
cur.apply_roll(colour, number)
return cur
if INTERACTIVE:
while True:
try:
init = read_state()
if init is None:
break
# Do rollout
counts = {camel: 0 for camel in COLOURS}
counts_second = {camel: 0 for camel in COLOURS}
for i in range(ROLLOUTS):
cur = do_rollout(init.copy())
counts[cur.leader()] += 1
counts_second[cur.second()] += 1
if i % 10000 == 0 and i > 0:
print("Done", i)
counts_squares = defaultdict(lambda: 0)
square_options = init.square_options()
if len(square_options) > 0:
for option in square_options:
for direction in [1, -1]:
for i in range(SQUARE_ROLLOUTS):
cur = init.copy()
cur.add_square(option, direction)
cur = do_rollout(cur)
score = cur.squares[option][1]
desc = '{}{}'.format(('+'+ str(direction))[-2], option)
counts_squares[desc] += score
# Create summary
ordering = []
for camel in counts:
ordering.append((counts[camel], counts_second[camel], camel))
ordering.sort(reverse=True)
for first_count, second_count, camel in ordering:
name = "{:6}".format(camel)
first = first_count / ROLLOUTS
second = second_count / ROLLOUTS
expected = []
for num in [5, 3, 2]:
score = num * first + second - (1 - first - second)
summary = "{: 6.2f}".format(score)
expected.append(summary)
print("{} {:>6.2f} {:>6.2f} {}".format(name, first * 100, second * 100, ' '.join(expected)))
ordering = []
for square in counts_squares:
score = counts_squares[square] / SQUARE_ROLLOUTS
ordering.append((score, square))
ordering.sort(reverse=True)
for score, square in ordering:
print(square, score)
except Exception as inst:
traceback.print_exc()
else:
win_based_on_start = defaultdict(lambda: 0)
for i in range(ROLLOUTS):
init = Board()
# Initialise the board
# 1 - Add camels
add_random_camels(init)
# 2 - Add squares
add_random_squares(init)
# Note properties of the start state
starting = {}
for camel in init.camels:
position = init.camels[camel]
distance_from_top = init.stack_size(position[0]) - position[1] - 1
# Add in whether there is a +/- square 1,2,3 in front
square_info = ''
for pos in init.squares:
direction = init.squares[pos][0]
distance = pos - init.camels[camel][0]
if 0 < distance < 4:
square_info += "."+ str(distance)
if direction > 0:
square_info += "+"
else:
square_info += "-"
while len(square_info) <= 6:
square_info += " "
starting[camel] = (init.stack_size(position[0]), distance_from_top, square_info)
# Do rollout
best_camel = do_rollout(init).leader()
# Create summary
best_info = "Stack height: {} Camel depth: {} Squares: {}".format(*starting[best_camel])
win_based_on_start[best_info] += 1
for key in win_based_on_start:
print(key, " wins:", win_based_on_start[key])
``` |
{
"source": "jkkummerfeld/lamb",
"score": 2
} |
#### File: lamb/lamb/cell.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from absl import logging
import six
import tensorflow.compat.v1 as tf
# pylint: disable=g-bad-import-order
from lamb import utils
from lamb import tiled_linear
from lamb.nascell import NASCell
from lamb.tiled_lstm import TiledLSTMCell
from lamb.tiled_rhn import TiledRHNCell
from lamb.res_multi_rnn_cell import ResMultiRNNCell
from lamb.skip_multi_rnn_cell import SkipMultiRNNCell
from lamb.dropout import DirichletDropout
from lamb.dropout import DriftingDropout
from lamb.dropout import Dropout
from lamb.dropout import GaussianDropout
from tensorflow.contrib import framework as contrib_framework
def build_cell(model, num_layers, hidden_size,
layer_norm, cell_init_factor,
shared_mask_dropout,
input_dropout, inter_layer_dropout, state_dropout,
update_dropout, state_dropout_flip_rate,
tie_forget_and_input_gates, cap_input_gate, forget_bias,
feature_mask_rounds, feature_mask_rank,
overlay_rank, sparsity_ratio,
cell_clip, activation_fn,
lstm_skip_connection, residual_connections):
cell_initializer = utils.variance_scaling_initializer(
scale=cell_init_factor, mode='fan_in', distribution='truncated_normal')
def hidden_size_for_layer(layer_index):
if isinstance(hidden_size, int):
return hidden_size
elif layer_index < len(hidden_size):
return hidden_size[layer_index]
else:
return hidden_size[-1]
def dropout(dropout_rate, share=shared_mask_dropout,
flip_prob=None, kind='bernoulli', scaler=1.0):
if dropout_rate is not None:
# The same graph is used for training and evaluation with different
# dropout rates. Passing the constant configured dropout rate here would
# be a subtle error.
assert contrib_framework.is_tensor(dropout_rate)
if flip_prob is not None:
assert kind == 'bernoulli'
return DriftingDropout(1-dropout_rate, flip_prob=flip_prob,
scaler=scaler)
elif kind == 'bernoulli':
return Dropout(1-dropout_rate, share_mask=share, scaler=scaler)
elif kind == 'dirichlet':
return DirichletDropout(1-dropout_rate, share_mask=share, scaler=scaler)
elif kind == 'gaussian':
return GaussianDropout(1-dropout_rate, share_mask=share, scaler=scaler)
else:
assert False
# We don't use DriftingDropout currently. Ignore it.
state_dropout_flip_rate = state_dropout_flip_rate
# Set up input_transforms for the layers based on
# {input,inter_layer}_dropout.
input_transforms = []
for layer_index in six.moves.range(num_layers):
if model in ['lstm', 'nas']:
if layer_index == 0:
transform = dropout(input_dropout)
elif layer_index > 0:
transform = dropout(inter_layer_dropout)
else:
transform = None
elif model == 'rhn':
if layer_index == 0:
transform = dropout(input_dropout)
else:
# The input is not fed to higher layers.
transform = None
else:
assert False
input_transforms.append(transform)
# Populate state_transforms to handle state_dropout. This is currently the
# same for LSTM and RHN: all layers have the same dropout mask, possibly
# with further sharing over time steps.
state_transforms = []
for layer_index in six.moves.range(num_layers):
transform = dropout(state_dropout, share=True)
state_transforms.append(transform)
# Populate update_transforms to handle update_dropout. This is currently the
# same for LSTM and RHN: all layers have their own dropout mask which may be
# shared between time steps.
update_transforms = []
if model == 'lstm' and (tie_forget_and_input_gates or cap_input_gate):
# The 1.5 is to reach a more non-linear part of the output tanh.
base_scale = 1.5
else:
base_scale = 1.0
for layer_index in six.moves.range(num_layers):
if update_dropout is None:
scaler = 1.0
else:
scaler = base_scale*(1-update_dropout)
update_transforms.append(dropout(
update_dropout,
# Dropout mask for the recurrent state needs to be the
# same for all time steps.
share=True,
# This makes update dropout do mask*x at training time and
# x*(1-r) at test time instead of usual mask*x/(1-r) and
# x, respectively.
scaler=scaler))
def make_lstm_column():
init_params = collections.OrderedDict([
('B_f', {'initializer': utils.variance_scaling_initializer(
scale=cell_init_factor, distribution='truncated_normal',
mean=forget_bias)})
])
if overlay_rank > 0:
assert sparsity_ratio < 0
# TODO(melisgl): Specify initializers for the shared matrices.
tiled_linear_class = tiled_linear.OverlayedTiledLinear
init_params.update(collections.OrderedDict([
('W_x_i', {'overlay_sharing_key': 'W_x_any',
'overlay_rank': overlay_rank}),
('W_x_j', {'overlay_sharing_key': 'W_x_any',
'overlay_rank': overlay_rank}),
('W_x_f', {'overlay_sharing_key': 'W_x_any',
'overlay_rank': overlay_rank}),
('W_x_o', {'overlay_sharing_key': 'W_x_any',
'overlay_rank': overlay_rank}),
('W_h_i', {'overlay_sharing_key': 'W_h_any',
'overlay_rank': overlay_rank}),
('W_h_j', {'overlay_sharing_key': 'W_h_any',
'overlay_rank': overlay_rank}),
('W_h_f', {'overlay_sharing_key': 'W_h_any',
'overlay_rank': overlay_rank}),
('W_h_o', {'overlay_sharing_key': 'W_h_any',
'overlay_rank': overlay_rank}),
]))
elif sparsity_ratio >= 0.0:
assert overlay_rank == -1
tiled_linear_class = tiled_linear.SparseTiledLinear
# This is equivalent to using cell_initializer scaled by
# 1/sparsity_ratio.
sparse_initializer = tf.truncated_normal_initializer(
stddev=math.sqrt(cell_init_factor /
sparsity_ratio /
# TODO(melisgl): This is off if the input
# embedding size is different from the hidden
# size.
hidden_size))
init_params.update(collections.OrderedDict([
('W_x_.*', {'sparse_indices_sharing_key': 'W_x'}),
('W_h_.*', {'sparse_indices_sharing_key': 'W_h'}),
('W_x', {'sparsity_ratio': sparsity_ratio,
'initializer': sparse_initializer}),
('W_h', {'sparsity_ratio': sparsity_ratio,
'initializer': sparse_initializer}),
]))
else:
if layer_norm:
tiled_linear_class = tiled_linear.LayerNormedTiledLinear
else:
tiled_linear_class = tiled_linear.TiledLinear
init_params.update(collections.OrderedDict([
('W_.*', {'initializer': cell_initializer}),
('B_.*', {'initializer': cell_initializer})
]))
def make_layer(layer_index):
cell = TiledLSTMCell(
hidden_size_for_layer(layer_index),
tie_gates=tie_forget_and_input_gates,
cap_input_gate=cap_input_gate,
feature_mask_rounds=feature_mask_rounds,
feature_mask_rank=feature_mask_rank,
input_transform=input_transforms[layer_index],
state_transform=state_transforms[layer_index],
update_transform=update_transforms[layer_index],
tiled_linear_class=tiled_linear_class,
tiled_linear_var_init_params=init_params,
initializer=cell_initializer,
cell_clip=cell_clip if cell_clip > 0 else None,
layer_norm=layer_norm,
activation=eval(activation_fn)) # pylint: disable=eval-used
return cell
layers = [make_layer(i) for i in six.moves.range(num_layers)]
if lstm_skip_connection:
assert not residual_connections
return SkipMultiRNNCell(layers)
elif residual_connections:
return ResMultiRNNCell(layers)
else:
return tf.nn.rnn_cell.MultiRNNCell(layers)
def make_rhn_column():
init_params = collections.OrderedDict([
('B_c', {'initializer': tf.constant_initializer(forget_bias)}),
])
if overlay_rank > 0:
assert sparsity_ratio < 0
# TODO(melisgl): Specify initializers for the shared matrices.
tiled_linear_class = tiled_linear.OverlayedTiledLinear
init_params.update(collections.OrderedDict([
('W_x_h', {'overlay_sharing_key': 'W_x_any',
'overlay_rank': overlay_rank}),
('W_x_c', {'overlay_sharing_key': 'W_x_any',
'overlay_rank': overlay_rank}),
('W_x_t', {'overlay_sharing_key': 'W_x_any',
'overlay_rank': overlay_rank}),
('W_s_h', {'overlay_sharing_key': 'W_s_any',
'overlay_rank': overlay_rank}),
('W_s_c', {'overlay_sharing_key': 'W_s_any',
'overlay_rank': overlay_rank}),
('W_s_t', {'overlay_sharing_key': 'W_s_any',
'overlay_rank': overlay_rank}),
]))
elif sparsity_ratio >= 0.0:
assert overlay_rank == -1
tiled_linear_class = tiled_linear.SparseTiledLinear
sparse_initializer = tf.truncated_normal_initializer(
stddev=math.sqrt(cell_init_factor /
sparsity_ratio /
# TODO(melisgl): This is off if the input
# embedding size is different from the hidden
# size.
hidden_size))
init_params.update(collections.OrderedDict([
('W_x_.*', {'sparse_indices_sharing_key': 'W_x'}),
('W_s_.*', {'sparse_indices_sharing_key': 'W_s'}),
('W_x', {'sparsity_ratio': sparsity_ratio,
'initializer': sparse_initializer}),
('W_s', {'sparsity_ratio': sparsity_ratio,
'initializer': sparse_initializer}),
]))
else:
tiled_linear_class = tiled_linear.TiledLinear
init_params.update(collections.OrderedDict([
('W_.*', {'initializer': cell_initializer}),
]))
logging.info('Creating RHN of depth %s', num_layers)
if layer_norm:
logging.warn('RHN does not support layer normalization.')
cell = TiledRHNCell(
hidden_size,
depth=num_layers,
tie_gates=tie_forget_and_input_gates,
input_transform=input_transforms[layer_index],
state_transform=state_transforms[layer_index],
update_transform=update_transforms[layer_index],
tiled_linear_class=tiled_linear_class,
tiled_linear_var_init_params=init_params,
cell_clip=cell_clip if cell_clip > 0 else None,
activation=eval(activation_fn)) # pylint: disable=eval-used
return cell
def make_nas_column():
assert not layer_norm
def make_layer(layer_index):
logging.info('Creating layer %s', layer_index)
cell = NASCell(
hidden_size,
input_transform=input_transforms[layer_index],
state_transform=state_transforms[layer_index],
update_transform=update_transforms[layer_index],
initializer=cell_initializer)
return cell
layers = [make_layer(i) for i in six.moves.range(num_layers)]
if lstm_skip_connection:
assert not residual_connections
return SkipMultiRNNCell(layers)
elif residual_connections:
return ResMultiRNNCell(layers)
else:
return tf.nn.rnn_cell.MultiRNNCell(layers)
assert len(hidden_size) <= num_layers
if model == 'lstm':
return make_lstm_column()
elif model == 'rhn':
assert len(set(hidden_size)) == 1
hidden_size = hidden_size[0]
return make_rhn_column()
elif model == 'nas':
assert len(set(hidden_size)) == 1
hidden_size = hidden_size[0]
return make_nas_column()
else:
assert False
```
#### File: lamb/lamb/dyneval.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
class Dyneval(object):
def __init__(self, grads_and_vars, learning_rate, decay_rate, epsilon):
with tf.variable_scope('dyneval'):
# convert_to_tensor densifies IndexedSlices
self._grads = [tf.convert_to_tensor(grad) for grad, _ in grads_and_vars]
self._vars = [var for _, var in grads_and_vars]
self._learning_rate = learning_rate
self._decay_rate = decay_rate
def shadow_vars():
return [
tf.get_variable(
var.name.replace('/', '-').replace(':', '-'),
var.get_shape(), initializer=tf.zeros_initializer(),
trainable=False)
for var in self._vars]
with tf.variable_scope('save'):
self._saves = shadow_vars()
with tf.variable_scope('sum_squared_grads'):
self._sum_squared_grads = shadow_vars()
self._save = self._make_save()
self._restore = self._make_restore()
# These are for computing an RMSProplike estimate of the variance of
# minibatch gradients. Here, this quantity is estimated on the training
# set once, while gradient descent happens on validation/test.
self._num_squared_grads = tf.get_variable(
'num_squared_grads', [], initializer=tf.zeros_initializer(),
trainable=False)
self._zero_sum_squared_grads = self._make_zero_sum_squared_grads()
self._add_squared_grads = self._make_add_squared_grads()
self._epsilon = epsilon
self._update = self._make_update()
def _make_save(self):
assignments = []
for save, var in zip(self._saves, self._vars):
assignments.append(save.assign(var))
return tf.group(assignments)
def _make_restore(self):
assignments = []
for save, var in zip(self._saves, self._vars):
assignments.append(var.assign(save))
return tf.group(assignments)
def _make_update(self):
mss = []
gsum = 0.0
count = 0
for sum_squared_grads in self._sum_squared_grads:
ms = tf.sqrt(sum_squared_grads / self._num_squared_grads)
gsum += tf.reduce_sum(ms)
count += tf.reduce_sum(tf.ones_like(ms))
mss.append(ms)
gsum = gsum / count
assignments = []
for grad, var, save, sum_squared_grads, ms in zip(
self._grads, self._vars, self._saves, self._sum_squared_grads, mss):
decay_rate = tf.minimum(1.0, self._decay_rate*(ms/gsum))
delta = (-self._learning_rate*grad / (ms + self._epsilon) +
decay_rate*(save-var))
assignments.append(var.assign_add(delta))
return tf.group(assignments)
def _make_add_squared_grads(self):
assignments = []
for sum_squared_grads, grads in zip(self._sum_squared_grads, self._grads):
assignments.append(sum_squared_grads.assign_add(tf.square(grads)))
return tf.group(assignments + [self._num_squared_grads.assign_add(1)])
def _make_zero_sum_squared_grads(self):
assignments = []
for sum_squared_grads in self._sum_squared_grads:
assignments.append(sum_squared_grads.assign(
tf.zeros_like(sum_squared_grads)))
return tf.group(assignments + [self._num_squared_grads.assign(0)])
def save(self):
tf.get_default_session().run(self._save)
def restore(self):
tf.get_default_session().run(self._restore)
def update_op(self):
return self._update
def zero_sum_squared_grads(self):
tf.get_default_session().run(self._zero_sum_squared_grads)
def add_squared_grads_op(self):
return self._add_squared_grads
def __enter__(self):
self.save()
def __exit__(self, type_, value, traceback):
self.restore()
```
#### File: lamb/lamb/lamb_flags.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import copy
import math
from absl import flags
from absl import logging
import six
import tensorflow.compat.v1 as tf
from google.protobuf import text_format
from tensorflow.contrib import training as contrib_training
from tensorflow.contrib.training.python.training import hparam_pb2
# Bump this on incompatible changes such as renaming an option and update
# maybe_upgrade_args_line below.
_config_version = 5
# The format of options is `(name, type, default_value, visibility)`.
# `visibility` is optional and can be `deprecated`, `external` or `internal`.
def option_visibility(option):
if len(option) == 4:
return option[3]
else:
return None
# There will be a command-line flag for every option except those with
# `internal` visibility. Conversely, in the Config object, `external` and
# `deprecated` are not going to be present. String like 'data' are turned into
# python comments when options are saved/printed.
_config_options = [
('config_version', 'integer', _config_version),
'data',
('training_file', 'string', ''),
('validation_file', 'string', ''),
('test_file', 'string', ''),
('conditioning_separator', 'string', ''),
('file_encoding', 'string', 'utf-8'),
('word_based', 'boolean', False),
('episodic', 'boolean', False),
'model',
('num_params', 'integer', -1),
('share_input_and_output_embeddings', 'boolean', False),
('input_embedding_size', 'integer', -1),
('output_embedding_size', 'integer', -1),
('input_embedding_ratio', 'float', 1.0),
('output_embedding_ratio', 'float', -1.0),
('mos_num_components', 'integer', 0),
('token_dropout', 'float', 0.0),
('embedding_dropout', 'float', 0.0),
('input_dropout', 'float', 0.0),
('output_dropout', 'float', 0.0),
('downprojected_output_dropout', 'float', -1.0),
('shared_mask_dropout', 'boolean', False),
# Whether to embed 'globally' or per time step. They are
# equivalent, but may differ in performance.
('embed_once', 'boolean', True),
('output_once', 'boolean', True),
'cell',
('model', 'string', 'lstm'),
('num_layers', 'integer', 1),
('residual_connections', 'boolean', False),
('lstm_skip_connection', 'boolean', True),
('feature_mask_rounds', 'integer', 0),
('feature_mask_rank', 'integer', 0),
# Deprecated. This is here to be able to load old configs. True sets
# feature_mask_rounds to 2 and feature_mask_rank to 0.
('feature_mask', 'boolean', False),
# If in [0,1) then within the recurrent cell in every dense
# connectivity matrix of N elements, randomly chosen elements
# are fixed to 0 such that the total number of trainable,
# non-fixed values is N*sparsity_ratio. Values outside [0,1) are
# treated as 1.0 (i.e. no sparsity),.
('sparsity_ratio', 'float', -1.0),
# TODO(melisgl): Document it once it's actually used.
('overlay_rank', 'integer', -1),
('hidden_size', 'list_of_ints', '-1'),
('hidden_size_multiplier', 'float', 1.0),
('layer_norm', 'boolean', False),
('activation_fn', 'string', 'tf.tanh'),
('tie_forget_and_input_gates', 'boolean', False),
('cap_input_gate', 'boolean', True),
('trainable_initial_state', 'boolean', True),
('inter_layer_dropout', 'float', 0.0),
('state_dropout', 'float', 0.0),
# This allows gradual change in the dropout mask. It's kind of in between
# shared and non-shared masks.
('state_dropout_flip_rate', 'float', 0.0),
('update_dropout', 'float', 0.0),
('cell_clip', 'float', -1.0),
'objective',
('model_average', 'string', 'arithmetic'),
('num_training_samples', 'integer', 1),
('l2_penalty', 'float', 0.0),
('l1_penalty', 'float', 0.0),
('activation_norm_penalty', 'float', 0.0),
('drop_state_probability', 'float', 0.0),
'initialization',
('embedding_init_factor', 'float', 1.0),
('scale_input_embeddings', 'boolean', False),
('cell_init_factor', 'float', 1.0),
('forget_bias', 'float', 1.0),
('output_init_factor', 'float', 1.0),
'schedule',
('steps_per_turn', 'integer', 1000),
('print_training_stats_every_num_steps', 'integer', 1000),
('turns', 'integer', -1),
'optimization',
('optimizer_type', 'string', 'rmsprop'),
('rmsprop_beta2', 'float', 0.999),
('rmsprop_epsilon', 'float', 1e-8),
('adam_beta1', 'float', 0.9),
('adam_beta2', 'float', 0.999),
('adam_epsilon', 'float', 1e-8),
('batch_size', 'integer', -1),
('accum_batch_size', 'integer', -1),
('max_grad_norm', 'float', 1.0),
('max_time_steps', 'integer', 100),
('trigger_averaging_turns', 'integer', -1),
('trigger_averaging_at_the_latest', 'integer', -1),
'learning rate',
('learning_rate', 'float', 0.001),
# TODO(melisgl): Learning rate decay is currently unimplemented.
#
# After each optimization step beyond learning_rate_decay_burn_in_steps the
# effective learning rate is multiplied by learning_rate_decay so that it's
# equal to learning_rate * pow(decay, max(0, global_step - burn_in_steps)).
# Also see drop_learning_rate_turns.
('learning_rate_decay', 'float', 1.0),
('learning_rate_decay_burn_in_steps', 'integer', 0),
('drop_learning_rate_turns', 'integer', -1),
('drop_learning_rate_multiplier', 'float', 1.0),
('drop_learning_rate_at_the_latest', 'integer', -1),
'early stopping',
('early_stopping_turns', 'integer', -1),
('early_stopping_rampup_turns', 'integer', 0),
('early_stopping_worst_xe_target', 'string', ''),
('early_stopping_slowest_rate', 'float', 0.0),
'cross-validation',
('crossvalidate', 'boolean', False),
('crossvalidation_folds', 'integer', 10),
('crossvalidation_rounds', 'integer', 1),
'evaluation',
('max_training_eval_batches', 'integer', 100),
('max_eval_eval_batches', 'integer', -1),
('max_test_eval_batches', 'integer', -1),
('min_non_episodic_eval_examples_per_stripe', 'integer', 100),
('eval_on_test', 'boolean', False),
('eval_method', 'string', 'deterministic'),
('num_eval_samples', 'integer', 0),
('eval_softmax_temperature', 'float', 1.0),
('eval_softmax_temperature_estimation_num_tokens', 'integer', 50000),
('eval_power_mean_power', 'float', 1.0),
('eval_dropout_multiplier', 'float', 1.0),
('validation_prediction_file', 'string', ''),
('dyneval', 'boolean', False),
('dyneval_learning_rate', 'float', 0.001),
('dyneval_decay_rate', 'float', 0.02),
('dyneval_epsilon', 'float', 1e-5),
'experiments',
('experiment_dir', 'string', '/tmp/lamb'),
('save_config', 'boolean', True, 'external'),
('config_file', 'string', '', 'external'),
# Some parameters used to be specified like
# `--hps=model=lstm,hidden_size=500`, a comma-separated list of assignments.
('hps', 'string', '', 'deprecated'),
# These used to be saved in a sepearate file.
('hps_proto_file', 'string', '', 'deprecated'),
# The old name for config_file.
('flags_as_dict', 'string', '', 'deprecated'),
'checkpoints',
('save_checkpoints', 'boolean', True),
('load_checkpoint', 'string', '', 'external'),
('load_optimizer_state', 'boolean', True, 'external'),
('load_averaged', 'boolean', False, 'external'),
('use_old_linear_names', 'boolean', False, 'external'),
'misc',
('seed', 'integer', 1),
('swap_memory', 'boolean', False),
('log_device_placement', 'boolean', False),
# currently unused
('summary_flush_secs', 'integer', 120)
]
FLAGS = flags.FLAGS
def _filter_options(options):
return [option for option in options
if not isinstance(option, six.string_types)]
def _define_flags(options):
for option in _filter_options(options):
name, type_, default_ = option[:3]
if type_ == 'boolean':
flags.DEFINE_boolean(name, default_, '')
elif type_ == 'integer':
flags.DEFINE_integer(name, default_, '')
elif type_ == 'float':
flags.DEFINE_float(name, default_, '')
elif type_ == 'string':
flags.DEFINE_string(name, default_, '')
elif type_ == 'list_of_ints':
flags.DEFINE_string(name, default_, '')
else:
assert 'Unexpected option type %s' % type_
# Define command-line flags for all options (unless `internal`).
_define_flags(_config_options)
_is_initialized = [False]
def initialize():
"""Override flags from FLAGS.config_file and handle old formats.
Unless they were explicitly provided on the command line.
"""
if not _is_initialized[0]:
assert not (FLAGS.config_file and FLAGS.flags_as_dict), (
'Both config_file and flags_as_dict were specified.')
# The deprecated --flags_as_dict used to save some command-line flags as a
# dict.
if FLAGS.flags_as_dict:
logging.info('Handling --flags_as_dict %s', FLAGS.flags_as_dict)
with tf.gfile.GFile(FLAGS.flags_as_dict, 'r') as f:
# This contains a single dict.
args_dict = eval(f.read()) # pylint: disable=eval-used
if FLAGS.config_file:
logging.info('Handling --config_file %s', FLAGS.config_file)
with tf.gfile.GFile(FLAGS.config_file, 'r') as f:
# This contains a list of bindings.
args_dict = dict(eval(f.read())) # pylint: disable=eval-used
if FLAGS.config_file or FLAGS.flags_as_dict:
args_dict = _maybe_upgrade_args(args_dict)
# Update FLAGS with the upgraded values.
for name, value in args_dict.items():
if (name not in ['flags_version', 'config_version'] and
FLAGS[name].using_default_value):
logging.info('override FLAGS.%s = %r', name, value)
FLAGS[name].value = value
_handle_hps()
_handle_hps_proto_file()
# Turn off trainable_initial_state for non-episodic mode.
if not FLAGS.episodic:
FLAGS.trainable_initial_state = False
_is_initialized[0] = True
# args_dict comes from either --flags_as_dict or --config_file, either of which
# may be saved using an old format.
def _maybe_upgrade_args(args_dict):
version = args_dict.get('config_version', 1)
if version < _config_version:
logging.info('config file version was %s. Upgrading to %s',
version, _config_version)
if version < 2:
args_dict['validation_file'] = args_dict.pop('eval_file')
args_dict['max_time_steps'] = args_dict.pop('max_steps')
args_dict['steps_per_turn'] = args_dict.pop('steps')
args_dict['early_stopping_turns'] = args_dict.pop(
'early_stopping_rounds')
args_dict['early_stopping_rampup_turns'] = args_dict.pop(
'early_stopping_rampup_rounds')
args_dict['print_training_stats_every_num_steps'] = args_dict.pop(
'print_every')
if 'averaged_trigger_turns' in args_dict:
args_dict['trigger_averaging_turns'] = args_dict.pop(
'averaged_trigger_turns')
if 'mixture_of_softmaxes_num_components' in args_dict:
mos_num = args_dict.pop('mixture_of_softmaxes_num_components')
if mos_num == 1:
mos_num = 0
args_dict['mos_num_components'] = mos_num
if version < 5 and 'hidden_size' in args_dict:
# FLAGS.hidden_size used to be an int, now it's a string.
args_dict['hidden_size'] = str(args_dict['hidden_size'])
else:
assert version == _config_version, (
'Unexpected config format version {}'.format(version))
return args_dict
# No more versions changes, since the corresponding --hps_proto_file is for
# backwards compatibility only.
_hparams_version = 2
_v2_hparam_renames = {
'intra_layer_dropout': 'inter_layer_dropout',
'softmax_test_time_temperature': 'eval_softmax_temperature',
'test_time_power_mean_power': 'eval_power_mean_power',
'test_time_dropout_multiplier': 'eval_dropout_multiplier',
'weight_decay': 'l2_penalty',
'weight_penalty': 'l1_penalty',
'outer_steps': 'turns',
'drop_learning_rate_rounds': 'drop_learning_rate_turns',
'vocab_size': None
}
# Some options used to be specified like `--hps=model=lstm,hidden_size=500`, a
# comma-separated list of assignments. Now, any option can be given via the
# deprecated --hps option.
#
# Error handling is weak, but this is for v1 compatibility only, so that's ok.
def _handle_hps():
assignments = FLAGS.hps.split(',')
for assignment in assignments:
if assignment:
name, value = assignment.split('=')
name = _v2_hparam_renames.get(name, name)
if name and value:
FLAGS[name].parse(value)
logging.info('hps: FLAGS.%s = %r', name, FLAGS[name].value)
# There used to be two files in which options were saved. Now there is only one,
# but we must support old saves.
def _handle_hps_proto_file():
if FLAGS.hps_proto_file:
hparams_proto = hparam_pb2.HParamDef()
with tf.gfile.GFile(FLAGS.hps_proto_file) as f:
text_format.Parse(f.read(), hparams_proto)
hparams = contrib_training.HParams.from_proto(hparams_proto)
hparams = _maybe_upgrade_hparams(hparams)
for name, value in hparams.values().items():
if FLAGS[name].using_default_value:
logging.info('hps_proto FLAGS.%s = %r', name, value)
FLAGS[name].value = value
def _maybe_upgrade_hparams(hparams):
version = hparams.get('hparams_version', 1)
if version < _hparams_version:
logging.info('hps_proto_file version was %s. Upgrading to %s.',
version, _hparams_version)
def rename(old, new):
# No assignment, delete and readd with new value.
old_value = hparams.get(old)
if new and old_value is not None:
hparams.add_hparam(new, old_value)
hparams.del_hparam(old)
if version == 1:
for old_name, new_name in _v2_hparam_renames.items():
rename(old_name, new_name)
if hparams.get('mixture_of_softmaxes_num_components', None):
rename('mixture_of_softmaxes_num_components', 'mos_num_components')
if hparams.mos_num_components == 1:
hparams.mos_num_components = 0
if hparams.get('hidden_size', None):
value = str(hparams.get('hidden_size'))
hparams.del_hparam('hidden_size')
hparams.add_hparam('hidden_size', value)
else:
assert version == _hparams_version, (
'Unknown hps_proto_file format version {}'.format(version))
return hparams
# At startup the command-line flags are packaged into a Config object. Some code
# has been refactored to work with Config objects, some code still uses the
# command line arguments directly (as FLAGS.*). In general, we want to minimize
# dependency on FLAGS, and also on Config. Thus relevant parts of Config should
# be extracted and passed as arguments as early as possible.
class Config(object):
"""Flat, mutable configuration with dot notation."""
def __init__(self, options=()):
self._options = options
self._values = {}
def _find_option(self, name):
for option in _filter_options(self._options):
if option[0] == name:
return option
def __getattr__(self, name):
if name in ['_options', '_values', '_find_option']:
return super(Config, self).__getattribute__(name)
elif name in self._values:
return self._values[name]
else:
# Lookup the default value.
option = self._find_option(name)
if option is None:
return super(Config, self).__getattribute__(name)
# raise AttributeError('No config option named {}.'.format(name))
else:
return option[2]
def __setattr__(self, name, value):
if name in ['_options', '_values', '_find_option']:
super(Config, self).__setattr__(name, value)
elif self._find_option(name):
self._values[name] = value
else:
# Add an internal value that doesn't get saved.
self._options.append((name, 'unknown_type', None, 'internal'))
self._values[name] = value
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __contains__(self, name):
return name in self._values
def get(self, name, default):
if name in self:
return self[name]
else:
return default
def __iter__(self):
for option in _filter_options(self._options):
yield option[0]
def __copy__(self):
config = self.__class__(copy(self._options))
config._values = copy(self._values) # pylint: disable=protected-access
return config
def __str__(self):
s = ''
for option in self._options:
if s:
indent = ' '
else:
indent = ' '
if isinstance(option, six.string_types):
s += indent + '# ' + option + '\n'
elif option_visibility(option) != 'internal':
name = option[0]
value = self.__getattr__(name)
s += indent + str((name, value)) + ',\n'
return '[' + s + ']'
def save(self, filename):
with tf.gfile.GFile(filename, 'w') as f:
f.write(str(self))
def get_config():
"""Return the config in effect.
Returns:
A Config containing all the config options (except deprecated or external,
see _config_options) with values set from command-line arguments.
"""
options = [option for option in _config_options
if (isinstance(option, six.string_types) or
option_visibility(option) not in ['deprecated', 'external'])]
config = Config(options)
# Update the config with the flag values.
for option in _filter_options(options):
if option_visibility(option) not in ['deprecated', 'external', 'internal']:
name = option[0]
if option[1] == 'list_of_ints':
if isinstance(FLAGS[name].value, list):
value = [int(x) for x in FLAGS[name].value]
else:
value = [int(x) for x in FLAGS[name].value.split(',')]
else:
value = FLAGS[name].value
config[name] = value
return config
def handle_config_defaults(config, num_params_fn):
"""Resolve dependencies within `config`.
In particular, set hidden_size (if -1) according to num_params and make the
embedding sizes default to the hidden size. Also, handle budgeting: if
hidden_size is not provided (it is -1), but num_params is, then compute the
largest possible hidden_size with which the total number of trainable
parameters does not exceed num_params.
Args:
config: The base config. Must have num_params set.
num_params_fn: A function of one argument a config object. The config passed
to it is constructed by setting the hidden_size and performing the usual
defaulting.
Returns:
The mutated config.
"""
# TODO(melisgl): Move this to the tuner code.
# For ease of specification, tuning ranges are weird. Let's fix them up here.
if config.sparsity_ratio >= 1.0:
config.sparsity_ratio = -1.0
if config.input_embedding_ratio >= 1.0:
config.input_embedding_ratio = 1.0
if config.output_embedding_ratio >= 1.0:
config.output_embedding_ratio = 1.0
if config.output_embedding_ratio < 0.0:
config.output_embedding_ratio = config.input_embedding_ratio
if config.learning_rate_decay > 1.0:
config.learning_rate_decay = 1.0
if config.feature_mask_rank < 0:
config.feature_mask_rank = 0
if config.inter_layer_dropout < 0.0:
config.inter_layer_dropout = config.input_dropout
if config.downprojected_output_dropout < 0.0:
config.downprojected_output_dropout = config.output_dropout
# Handle deprecated feature_mask flag.
if config.feature_mask:
config.feature_mask_rounds = 2
config.feature_mask_rank = 0
# Handle the num_param budget.
if config.hidden_size in [-1, [-1]]:
assert config.num_params > -1, (
'Neither hidden_size nor num_params is specified.')
config.hidden_size = [_budget_hidden_size(config, num_params_fn)]
config = _handle_hidden_size_defaults(config)
# Perform some sanity checks.
if config.output_embedding_size > config.hidden_size[-1]:
logging.warn('output_embedding_size %s is greater than '
'the hidden size %s', config.output_embedding_size,
config.hidden_size[-1])
if config.share_input_and_output_embeddings:
assert config.input_embedding_size == config.output_embedding_size
return config
def _budget_hidden_size(config, num_params_fn):
"""Finds the largest possible hidden size that respects config.num_params.
Args:
config: A Config. Must have num_params set.
num_params_fn: A function of one argument a config object. The config passed
to it is constructed by setting the hidden_size and performing the usual
defaulting.
Returns:
The largest possible hidden size with which the total number of
trainable parameters does not exceed config.num_params. Respects
defaulting rules such as input_embedding_ratio.
"""
logging.info(
'Searching for largest possible hidden_size subject to num_params<=%s',
config.num_params)
assert config.num_params > 0
def config_with_hidden_size(hidden_size):
updated_config = copy(config)
updated_config.hidden_size = [hidden_size]
return _handle_hidden_size_defaults(updated_config)
def is_good(hidden_size):
n = num_params_fn(config_with_hidden_size(hidden_size))
good = (n <= config.num_params)
if n is None:
logging.info('hidden_size=%s, num_params=OOM BAD', hidden_size)
elif good:
logging.info('hidden_size=%s, num_params=%s GOOD', hidden_size, n)
else:
logging.info('hidden_size=%s, num_params=%s BAD', hidden_size, n)
return good, n
# Double the size until it's too large.
previous_hidden_size = 1
hidden_size = 1
good, n = is_good(hidden_size)
while good:
previous_hidden_size = hidden_size
hidden_size = max(hidden_size+1,
int(hidden_size*math.sqrt(1.2*config.num_params / n)))
good, n = is_good(hidden_size)
# Bisect the [previous_hidden_size, hidden_size] range.
def bisect(lower, upper, fn): # pylint: disable=missing-docstring
while lower < upper-1:
# The number of parameters is likely to be at least quadratic in
# hidden_size. Find the middle point in log space.
middle = int(math.exp((math.log(upper) + math.log(lower)) / 2))
middle = min(max(middle, lower+1), upper-1)
if fn(middle)[0]:
lower = middle
else:
upper = middle
return lower
return bisect(previous_hidden_size, hidden_size, is_good)
def _handle_hidden_size_defaults(config):
"""Handle default that depend on hidden_size."""
last_hidden_size = config.hidden_size[-1]
for i in six.moves.range(config.num_layers-len(config.hidden_size)):
config.hidden_size.append(
max(1, int(last_hidden_size * pow(config.hidden_size_multiplier, i+1))))
# Now set the actual embedding size if necessary.
last_hidden_size = config.hidden_size[-1]
if config.input_embedding_size == -1:
config.input_embedding_size = max(1, round_to_int(
config.input_embedding_ratio*last_hidden_size))
if config.output_embedding_size == -1:
config.output_embedding_size = max(1, round_to_int(
config.output_embedding_ratio*last_hidden_size))
return config
def round_to_int(x):
return int(round(x))
def flags_as_dict():
"""Return flags that were explicitly provided."""
dict_ = {}
for option in _filter_options(_config_options):
name = option[0]
if not FLAGS[name].using_default_value:
dict_[name] = FLAGS[name].value
return dict_
```
#### File: lamb/lamb/nascell.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
class NASCell(tf.nn.rnn_cell.RNNCell):
"""Neural Architecture Search (NAS) recurrent network cell.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.01578
<NAME> and <NAME>.
"Neural Architecture Search with Reinforcement Learning" Proc. ICLR 2017.
The class uses an optional projection layer.
"""
def __init__(self, num_units, num_proj=None,
use_biases=False, reuse=None,
initializer=None,
input_transform=None,
state_transform=None,
update_transform=None):
"""Initialize the parameters for a NAS cell.
Args:
num_units: int, The number of units in the NAS cell
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
use_biases: (optional) bool, If True then use biases within the cell. This
is False by default.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
initializer: Initializer for the variables.
input_transform: None, or a function of one argument that
massages the input in some way. For example, variational
dropout can be implemted by passing a Dropout object here.
state_transform: Similar to input_transform, this is
applied to the recurrent state.
update_transform: Similar to input_transform, this is
applied to the proposed update ('j').
"""
super(NASCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._num_proj = num_proj
self._use_biases = use_biases
self._reuse = reuse
if num_proj is not None:
self._state_size = tf.nn.rnn_cell.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = tf.nn.rnn_cell.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
self._initializer = initializer
self._input_transform = input_transform
self._state_transform = state_transform
assert update_transform is None
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of NAS Cell.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: This must be a tuple of state Tensors, both `2-D`, with column
sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
NAS Cell after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of NAS Cell after reading `inputs`
when the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = tf.sigmoid
tanh = tf.tanh
relu = tf.nn.relu
num_proj = self._num_units if self._num_proj is None else self._num_proj
def maybe_transform(transform, x):
if transform is None:
return x
else:
return transform(x)
(c_prev, m_prev) = state
m_prev = maybe_transform(self._state_transform, m_prev)
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
inputs = maybe_transform(self._input_transform, inputs)
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# Variables for the NAS cell. W_m is all matrices multiplying the
# hiddenstate and W_inputs is all matrices multiplying the inputs.
concat_w_m = tf.get_variable(
"recurrent_kernel", [num_proj, 8 * self._num_units],
initializer=self._initializer, dtype=dtype)
concat_w_inputs = tf.get_variable(
"kernel", [input_size.value, 8 * self._num_units],
initializer=self._initializer, dtype=dtype)
m_matrix = tf.matmul(m_prev, concat_w_m)
inputs_matrix = tf.matmul(inputs, concat_w_inputs)
if self._use_biases:
b = tf.get_variable(
"bias",
shape=[8 * self._num_units],
initializer=tf.zeros_initializer(),
dtype=dtype)
m_matrix = tf.nn.bias_add(m_matrix, b)
# The NAS cell branches into 8 different splits for both the hiddenstate
# and the input
m_matrix_splits = tf.split(axis=1, num_or_size_splits=8,
value=m_matrix)
inputs_matrix_splits = tf.split(axis=1, num_or_size_splits=8,
value=inputs_matrix)
# First layer
layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])
layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1])
layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])
layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3])
layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])
layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])
layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])
layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])
# Second layer
l2_0 = tanh(layer1_0 * layer1_1)
l2_1 = tanh(layer1_2 + layer1_3)
l2_2 = tanh(layer1_4 * layer1_5)
l2_3 = sigmoid(layer1_6 + layer1_7)
# Inject the cell
l2_0 = tanh(l2_0 + c_prev)
# Third layer
l3_0_pre = l2_0 * l2_1
new_c = l3_0_pre # create new cell
l3_0 = l3_0_pre
l3_1 = tanh(l2_2 + l2_3)
# Final layer
new_m = tanh(l3_0 * l3_1)
# Projection layer if specified
if self._num_proj is not None:
concat_w_proj = tf.get_variable(
"projection_weights", [self._num_units, self._num_proj],
dtype)
new_m = tf.matmul(new_m, concat_w_proj)
new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_m)
return new_m, new_state
```
#### File: lamb/lamb/skip_multi_rnn_cell.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
class SkipMultiRNNCell(tf.nn.rnn_cell.RNNCell):
"""RNN cell composed sequentially of multiple simple cells."""
def __init__(self, cells, state_is_tuple=True):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all
concatenated along the column axis. This latter behavior will soon be
deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
if not cells:
raise ValueError("Must specify at least one cell for SkipMultiRNNCell.")
if not nest.is_sequence(cells):
raise TypeError(
"cells must be a list or tuple, but saw: %s." % cells)
self._cells = cells
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s"
% str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum([cell.state_size for cell in self._cells])
@property
def output_size(self):
return self._cells[-1].output_size
def __call__(self, inputs, state, scope=None):
"""Run this multi-layer cell on inputs, starting from state."""
output = None
with tf.variable_scope(scope or "skip_multi_rnn_cell"):
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with tf.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s"
% (len(self.state_size), state))
cur_state = state[i]
else:
cur_state = tf.slice(
state, [0, cur_state_pos], [-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
if output is None:
output = cur_inp
else:
output += cur_inp
new_states = (tuple(new_states) if self._state_is_tuple else
tf.concat(new_states, 1))
return output, new_states
```
#### File: lamb/lamb/tiled_linear.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl import logging
from lamb import utils
import six
from sonnet.python.modules import base as snt_base
import tensorflow.compat.v1 as tf
class AbstractTiledLinear(snt_base.AbstractModule):
"""Efficient linear mappings from a number of inputs to outputs."""
def __init__(self, input_name_and_sizes, output_name_and_sizes,
var_init_params=None, name='tiled_linear'):
"""Constructs a AbstractTiledLinear module.
Args:
input_name_and_sizes: A sequence of `(name, size)` tuples
listing the inputs are their sizes (a positive integer or None
to rely on shape inferencing at build() time). As a
convenience, `(name, None)` can be shortened to `(name,)` or
just `name`.
output_name_and_sizes: Similar to `input_name_and_sizes`, it
lists the names and sizes of outputs. Since there is no way of
inferring shapes for outputs, the full `(name, size)` form
must always be used.
var_init_params: A dict for specifying initialization parameters
for variables such as the initializer, partitioner and
regularizer. Subclasses may support more parameters.
name: Name of the module.
Raises:
ValueError: If ` contains any keys other than 'w' or 'b'.
KeyError: If `partitioners` contains any keys other than 'w' or 'b'.
KeyError: If `regularizers` contains any keys other than 'w' or 'b'.
TypeError: If any of the given initializers are not callable.
TypeError: If any of the given partitioners are not callable.
TypeError: If any of the given regularizers are not callable.
"""
super(AbstractTiledLinear, self).__init__(name=name)
self._input_name_and_sizes = self._canonicalize_input_name_and_sizes(
input_name_and_sizes)
self._output_name_and_sizes_ = self._check_output_name_and_sizes(
output_name_and_sizes)
self._var_init_params = self._check_var_init_params(var_init_params)
self._merged_input_sizes = None
self._name = name
self._dtype = None
def _canonicalize_input_name_and_sizes(self, name_and_sizes):
result = []
for e in name_and_sizes:
if isinstance(e, six.string_types):
result.append((e, None))
else:
assert isinstance(e, tuple)
if len(e) == 1:
result.append((e[0], None))
elif len(e) == 2:
result.append(e)
else:
assert False, 'Malformed name_and_sizes spec {}.'.format(e)
return result
def _check_output_name_and_sizes(self, name_and_sizes):
for e in name_and_sizes:
assert isinstance(e, tuple)
assert len(e) == 2
assert isinstance(e[0], six.string_types)
assert isinstance(e[1], int)
return name_and_sizes
def _check_var_init_params(self, var_init_params):
if var_init_params is None:
return {}
else:
valid_keys = self.valid_var_init_param_keys()
for pattern in var_init_params:
for key in var_init_params[pattern]:
assert key in valid_keys, (
'Unexpected key {} in var_init_params[{}].'.format(key, pattern))
return var_init_params
def _check_dtype(self, inputs, previous_dtype):
dtype = previous_dtype
for input_ in inputs:
if dtype is None:
dtype = input_.dtype
else:
assert input_.dtype == dtype
return dtype
def valid_var_init_param_keys(self):
return ['initializer', 'partitioner', 'regularizer']
def _find_var_init_param(self, var_name, key, default):
for pattern in self._var_init_params:
if re.match(pattern, var_name):
value = self._var_init_params[pattern].get(key, None)
if value is not None:
return value
return default
def _get_variable(self, name, shape,
initializer=None,
default_initializer=None, default_partitioner=None,
default_regularizer=None):
if initializer is None:
initializer = self._find_var_init_param(
name, 'initializer', default_initializer)
partitioner = self._find_var_init_param(
name, 'partitioner', default_partitioner)
regularizer = self._find_var_init_param(
name, 'regularizer', default_regularizer)
return tf.get_variable(name, shape=shape, dtype=self._dtype,
initializer=initializer, partitioner=partitioner,
regularizer=regularizer)
def _declared_input_sizes(self):
sizes = []
for _, input_size in self._input_name_and_sizes:
sizes.append(input_size)
return tf.TensorShape(sizes)
def _inferred_input_sizes(self, inputs):
return tf.TensorShape([input_.get_shape().as_list()[-1]
for input_ in inputs])
def _merge_input_sizes(self, inputs):
inferred_input_sizes = self._inferred_input_sizes(inputs)
if self._merged_input_sizes is None:
declared_input_sizes = self._declared_input_sizes()
# This is the first call to build(). Remember the input sizes
# (only the last dimension matters for matmul).
if not declared_input_sizes.is_compatible_with(inferred_input_sizes):
raise snt_base.IncompatibleShapeError(
'{}: Declared input sizes {} are incompatible '
'with inferred ones {}.'.format(
self.scope_name, declared_input_sizes.as_list(),
inferred_input_sizes.as_list()))
self._merged_input_sizes = declared_input_sizes.merge_with(
inferred_input_sizes)
if not self._merged_input_sizes.is_fully_defined():
raise snt_base.IncompatibleShapeError(
'{}: Last input dimensions must be known at module build time.'
' Got {}.'.format(self.name, self._merged_input_sizes.as_list()))
else:
# At subsequent calls check that input sizes are compatible.
if not self._merged_input_sizes.is_compatible_with(inferred_input_sizes):
raise snt_base.IncompatibleShapeError(
'{}: Current input sizes {} are different '
'from first build {}'.format(
self.name, inferred_input_sizes.as_list(),
self._merged_input_sizes.as_list()))
def _merged_input_name_and_sizes(self):
return zip([input_name for input_name, _ in self._input_name_and_sizes],
self._merged_input_sizes.as_list())
def _output_name_and_sizes(self):
return self._output_name_and_sizes_
def _build(self, inputs):
"""Connects the module into the graph, with `inputs`.
If this is not the first time the module has been connected to the
graph, the Tensors in `inputs` must have the same final dimension,
in order for the existing variables to be the correct size for the
multiplication. The leading dimensions of the input tensors may
differ for each call to `build()`.
Args:
inputs: A sequence of tensors. The last dimension of the tensor
at position I must be compatible with the declared size of the
corresponding input (if not None) and also with the last
dimension of the corresponding input tensor in all previous
calls to build() on the same object.
Returns:
A sequence of output tensors.
Raises:
base.IncompatibleShapeError: If the input sizes are not
compatible with the declared or with the sizes previous calls.
"""
self._merge_input_sizes(inputs)
self._dtype = self._check_dtype(inputs, self._dtype)
return self._build_tiled_linear(inputs,
self._merged_input_name_and_sizes(),
self._output_name_and_sizes(),
True)
class TiledLinear(AbstractTiledLinear):
"""Plain linear mapping without any bells or whistles."""
def __init__(self, input_name_and_sizes, output_name_and_sizes,
var_init_params=None, name='tiled_linear'):
"""Plain linear mapping without any bells or whistles."""
super(TiledLinear, self).__init__(
input_name_and_sizes, output_name_and_sizes,
var_init_params=var_init_params, name=name)
self._weights = None
self._biases = None
def _ensure_weights(self):
# pylint: disable=missing-docstring
if self._weights is None:
# Tile an initializer together from the initializers of the individual
# tiles. We used to assemble the weight matrix by tiling the individual
# matrices, but with that tensorflow wasted gobs of memory for the
# gradients.
default_initializer = utils.variance_scaling_initializer(scale=1.0)
columns = []
for output_name, output_size in self._output_name_and_sizes_:
# Collect the initializers for the tiles for weight matrices mapping
# _to_ the output being considered. These will be stacked in a column of
# the final tiled weight matrix.
initializers_to_output = []
for input_name, input_size in self._input_name_and_sizes:
name = 'W_{}_{}'.format(input_name, output_name)
initializer = self._find_var_init_param(
name, 'initializer', default_initializer)
shape = [int(input_size), int(output_size)]
# logging.info('Tile initializer for %r %r: %r',
# name, shape, initializer)
initializers_to_output.append((initializer, shape))
columns.append(initializers_to_output)
def tiled_initializer(shape, dtype=self._dtype, partition_info=None):
column_values = []
for column in columns:
values = [initializer(shape, dtype=dtype,
partition_info=partition_info)
for initializer, shape in column]
column_values.append(tf.concat(values, axis=0))
return tf.concat(column_values, axis=1)
# Finally, instantiate the weights.
total_input_size = sum([input_size for _, input_size
in self._input_name_and_sizes])
total_output_size = sum([output_size for _, output_size
in self._output_name_and_sizes_])
self._weights = self._get_variable(
'W', shape=[total_input_size, total_output_size],
initializer=tiled_initializer)
return self._weights
def _ensure_biases(self):
# pylint: disable=missing-docstring
if self._biases is None:
# Biases are much smaller than weights, so wasting memory with gradients
# is not an issue.
biases = []
for output_name, output_size in self._output_name_and_sizes_:
bias = self._get_variable(
'B_{}'.format(output_name), shape=[output_size],
default_initializer=tf.zeros_initializer())
biases.append(bias)
self._biases = tf.concat(biases, 0)
return self._biases
def _build_tiled_linear(self, inputs, input_name_and_sizes,
output_name_and_sizes, add_bias):
# pylint: disable=missing-docstring
def split_output(output):
if len(output_name_and_sizes) == 1:
return output
elif len(set([size for _, size in output_name_and_sizes])) == 1:
# This is a bit faster than several tf.slice calls.
return tf.split(output, len(output_name_and_sizes), axis=1)
else:
outputs = []
offset = 0
for _, output_size in output_name_and_sizes:
outputs.append(tf.slice(output, [0, offset], [-1, output_size]))
offset += output_size
return outputs
weights = self._ensure_weights()
if len(inputs) > 1:
inputs = tf.concat(inputs, 1)
if add_bias:
biases = self._ensure_biases()
return split_output(tf.nn.xw_plus_b(inputs, weights, biases))
else:
return split_output(tf.matmul(inputs, weights))
class LayerNormedTiledLinear(AbstractTiledLinear):
# pylint: disable=missing-docstring
def _build_tiled_linear(self, inputs, input_name_and_sizes,
output_name_and_sizes, add_bias):
# pylint: disable=missing-docstring
# Return a list of weight matrices that parallels
# input_name_and_sizes and maps one input tensor to the
# concatenation of all outputs.
def make_weights_for_inputs():
rows = []
for input_name, input_size in input_name_and_sizes:
# Collect the weight matrices mapping from the input being
# considered. These will be stacked in a row.
weights_from_input = []
for output_name, output_size in output_name_and_sizes:
name = 'W_{}_{}'.format(input_name, output_name)
weight = self._get_variable(name, shape=[input_size, output_size])
weights_from_input.append(weight)
rows.append(tf.concat(weights_from_input, 1))
return rows
def make_biases():
biases = []
for name, size in output_name_and_sizes:
bias = self._get_variable('B_{}'.format(name), shape=[size],
default_initializer=tf.zeros_initializer())
biases.append(bias)
return tf.concat(biases, 0)
def split_output(output):
outputs = []
offset = 0
for _, output_size in output_name_and_sizes:
outputs.append(tf.slice(output, [0, offset], [-1, output_size]))
offset += output_size
return outputs
weights_for_inputs = make_weights_for_inputs()
s = make_biases() if add_bias else 0.0
for input_, weights, (name, _) in zip(inputs, weights_for_inputs,
input_name_and_sizes):
s += utils.layer_norm(tf.matmul(input_, weights), [1], bias=0.0,
scope='ln_{}'.format(name))
return split_output(s)
class SparseTiledLinear(AbstractTiledLinear):
"""Tiled mapping with sparse but fixed connectivity.
There are two additional variable initialization parameters:
`sparse_indices_sharing_key` and `sparsity_ratio`.
`sparse_indices_sharing_key` controls which tiles have the same
connectivity pattern (in the sense of having the same
tf.SparseTensor.indices). Generally, tiles with the same sharing key
and `sparsity_ratio` share these indices. There are two special key
values: `':name:'` and `':shape:'` that get substituted with the
name and shape of the actual tile, respectively.
For example, if an LSTM cell maps inputs ('x', 'h') to ('f, 'i, 'j',
'o'), then the following makes all weight matrices from the input
'x' to any of the gates or the candidate update share connectivity
structure. Similarly, there is connectivity pattern sharing between
weight matrices mapping from the recurrent state 'h'.
var_init_params=OrderedDict([
('W_x_.*', {'sparse_indices_sharing_key': 'x'}),
('W_h_.*', {'sparse_indices_sharing_key': 'h'}),
('.*', {'sparsity_ratio': 0.5,
'initializer': tf.random_uniform_initializer(-1, 1)})
])
Note that only the sparse indices are shared, the values are
different (unless playing tricks with the 'initializer' param).
If `sparsity_ratio` is set (to a float number in [0,1]), then this
represents the proportion of entries non-missing entries in the
tile. The actual connectivity pattern is determined randomly.
In the future, there may be support for band and block diagonal
matrices.
"""
def __init__(self, input_name_and_sizes, output_name_and_sizes,
var_init_params=None, name='sparse_tiled_linear'):
super(SparseTiledLinear, self).__init__(
input_name_and_sizes, output_name_and_sizes,
var_init_params=var_init_params, name=name)
self._sparse_indices_cache = {}
# Cache the SparseTensor instances to avoid the considerable
# overhead of creating duplicates just to be optimized out.
self._sparse_variable_cache = {}
def _find_or_create_sparse_indices(self, name, shape):
ratio = self._find_var_init_param(name, 'sparsity_ratio', None)
assert ratio, 'sparsity_ratio must be specified.'
sharing_key = self._find_var_init_param(name, 'sparse_indices_sharing_key',
':name:')
if sharing_key == ':name:':
key = name
if sharing_key == ':shape:':
sharing_key = shape
key = (sharing_key, ratio)
if key not in self._sparse_indices_cache:
logging.info('Creating sparse indices for %s%r with key %r.',
name, shape, key)
self._sparse_indices_cache[key] = utils.sparse_random_indices(ratio,
shape)
return self._sparse_indices_cache[key]
def _find_or_create_sparse_variable(self, name, sparse_indices, shape,
initializer=None, partitioner=None,
regularizer=None):
if name not in self._sparse_variable_cache:
logging.info('Create sparse variable %s.', name)
self._sparse_variable_cache[name] = utils.get_sparse_variable(
name, sparse_indices, shape=shape, initializer=initializer,
partitioner=partitioner, regularizer=regularizer)
return self._sparse_variable_cache[name]
def valid_var_init_param_keys(self):
return (super(SparseTiledLinear, self).valid_var_init_param_keys() +
['sparse_indices_sharing_key', 'sparsity_ratio'])
def _get_variable(self, name, shape,
default_initializer=None, default_partitioner=None,
default_regularizer=None, sparse_indices=None):
initializer = self._find_var_init_param(
name, 'initializer', default_initializer)
partitioner = self._find_var_init_param(
name, 'partitioner', default_partitioner)
regularizer = self._find_var_init_param(
name, 'regularizer', default_regularizer)
sparse_indices = self._find_or_create_sparse_indices(name, shape)
return self._find_or_create_sparse_variable(
name, sparse_indices, shape=shape, initializer=initializer,
partitioner=partitioner, regularizer=regularizer)
def _build_tiled_linear(self, inputs, input_name_and_sizes,
output_name_and_sizes, add_bias):
results = []
for output_name, output_size in output_name_and_sizes:
r = 0.0
for input_, (input_name, input_size) in zip(inputs, input_name_and_sizes):
name = 'W_{}_{}'.format(input_name, output_name)
weight = self._get_variable(
name, shape=[output_size, input_size])
r += tf.sparse_tensor_dense_matmul(weight, input_, adjoint_b=True)
r = tf.transpose(r)
if add_bias:
# Biases are dense, hence we call _get_variable of the base
# class.
r += super(SparseTiledLinear, self)._get_variable(
'B_{}'.format(output_name), shape=[output_size],
default_initializer=tf.zeros_initializer())
results.append(r)
return results
# TODO(melisgl): Since computation is the same as in TiledLinear,
# perhaps this should be implemented as a custom getter (see
# tf.get_variable) instead of being tied to tiling.
class OverlaidTiledLinear(TiledLinear):
"""Tiled mapping with weight sharing and low-rank overlays.
To reduce the number of parameters, one may want to share weight
matrices. This class makes that sharing possible in the form of W_1
= s_1*W + a_1*b_1 and W_2 = s_2*W + a_2*b_2 where the s are scalars,
and a*b are low-rank matrices.
`overlay_sharing_key` controls which tiles share the same underlying
weight matrix. Generally, tiles with the same sharing key and 2D
shape. There are two special key values: `':name:'` and `':shape:'`
that get substituted with the name and shape of the actual tile,
respectively.
For example, if an LSTM cell maps inputs ('x', 'h') to ('f, 'i, 'j',
'o'), then the following makes all weight matrices from the input
'x' to any of the gates or the candidate update share the underlying
full rank weight matrix.
var_init_params=OrderedDict([
('W_x_i', {'overlay_sharing_key': 'W_x_any',
'overlay_rank': 16}),
('W_x_j', {'overlay_sharing_key': 'W_x_any',
'overlay_rank': 10}),
('W_x_f', {'overlay_sharing_key': 'W_x_any',
'overlay_rank': 8}),
('W_x_o', {'overlay_sharing_key': 'W_x_any',
'overlay_rank': 11}),
])
That is, W_x_i = s_W_x_i * W_x_any + a_W_x_i * b_W_x_i where 's_' is
a scalar, and 'a_', 'b_' are of shape [N, 16], [16, N] respectively.
W_x_j and the other are computed similarly by adding a low-rank
overlay ('a_'*'b_') on top of a shared weight matrix ('W_x_any').
"""
def __init__(self, *args, **kwargs):
super(OverlaidTiledLinear, self).__init__(*args, **kwargs)
self._matrix_cache = {}
def _get_variable(self, name, shape,
default_initializer=None, default_partitioner=None,
default_regularizer=None):
if len(shape) != 2:
return super(OverlaidTiledLinear, self)._get_variable(
name, shape, default_initializer=default_initializer,
default_partitioner=default_partitioner,
default_regularizer=default_regularizer)
else:
rank = self._find_var_init_param(name, 'overlay_rank', 0)
sharing_key = self._find_var_init_param(name, 'overlay_sharing_key',
':name:')
if sharing_key == ':name:':
sharing_key = name
if sharing_key == ':shape:':
sharing_key = shape
if (sharing_key in self._matrix_cache and
not tf.get_variable_scope().reuse):
scaler = super(OverlaidTiledLinear, self)._get_variable(
's_'+name, [shape[1]], default_initializer=tf.ones_initializer())
base = scaler*self._matrix_cache[sharing_key]
else:
base = super(OverlaidTiledLinear, self)._get_variable(
sharing_key, shape, default_initializer=default_initializer,
default_partitioner=default_partitioner,
default_regularizer=default_regularizer)
self._matrix_cache[sharing_key] = base
if rank == 0:
return base
else:
overlay = self._low_rank_matrix(name, rank=rank, shape=shape)
return base+overlay
def _low_rank_matrix(self, name, rank=None, shape=None,
initializer=None, trainable=True):
assert len(shape) == 2
a = super(OverlaidTiledLinear, self)._get_variable(
'a_'+name, [shape[0], rank], default_initializer=initializer)
b = super(OverlaidTiledLinear, self)._get_variable(
'b_'+name, [rank, shape[1]], default_initializer=initializer)
return tf.matmul(a, b)
def valid_var_init_param_keys(self):
return (super(OverlaidTiledLinear, self).valid_var_init_param_keys() +
['overlay_sharing_key', 'overlay_rank'])
```
#### File: lamb/lamb/vocab.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
class Vocab(object):
"""Immutable reversible mappings from strings to integers."""
def __init__(self, tokens, unk=u'<UNK>', eos=u'\u25bc'):
"""Create a Vocab object that maps `tokens` to dense indices."""
self._token_to_index = {}
self._token_to_frequency = {}
self._unk = unk
self._eos = eos
token_to_index = self._token_to_index
token_to_frequency = self._token_to_frequency
# Get the unique tokens from `tokens` that might be a generator.
for token in tokens:
token_to_index[token] = True
token_to_frequency[token] = token_to_frequency.get(token, 0) + 1
token_to_index[unk] = True
token_to_index[eos] = True
# Now that we have a smaller set of tokens, assign ids in sorted
# order for deterministic encoding.
self._index_to_token = [None] * len(token_to_index)
index_to_token = self._index_to_token
i = 0
for token in sorted(list(token_to_index)):
token_to_index[token] = i
index_to_token[i] = token
i += 1
def unk_index(self):
"""Returns the index of the unknown token."""
return self._token_to_index[self._unk]
def eos_index(self):
"""Returns the index of the end-of-sentence token."""
return self._token_to_index[self._eos]
def token(self, index_):
"""The string whose `index()` is `index_` or an IndexError."""
return self._index_to_token[index_]
def __iter__(self):
"""Iterates over tokens in order of indices."""
for i in range(self.size()):
yield self.token(i)
def index_or_unk(self, token):
"""Find the index assigned to `token`.
Args:
token: a string.
Returns:
The index of `token` or `unk_index()` if it is not in the vocabulary.
"""
if token in self._token_to_index:
return self._token_to_index[token]
else:
return self.unk_index()
def size(self):
"""Returns the number of different tokens in the vocabulary."""
return len(self._index_to_token)
def decode(self, ids):
"""Decode a sequence of `ids` with `token()`."""
assert all([0 <= x and x < len(self._index_to_token) for x in ids])
return [self.token(x) for x in ids]
def encode(self, tokens, add_eos=True):
"""Encodes a sentence into a list of token indices.
Args:
tokens: A list of tokens.
add_eos: Whether to add the end of sentence token.
Returns:
A list of integer token indices where `unk_index()` stands for
tokens not found in the vocabulary.
"""
ids = [self.index_or_unk(token) for token in tokens]
if add_eos:
ids += [self.eos_index()]
return ids
def index_frequency(self, index_):
return self._token_to_frequency.get(self.token(index_), 0)
``` |
{
"source": "jkkummerfeld/neural-tagger-tutorial",
"score": 2
} |
#### File: jkkummerfeld/neural-tagger-tutorial/tagger.dy.py
```python
import argparse
import random
import sys
import numpy as np
#### Typically, we would make many of these constants command line arguments and tune using the development set. For simplicity, I have fixed their values here to match Jiang, Liang and Zhang (CoLing 2018).
PAD = "__PAD__"
UNK = "__UNK__"
DIM_EMBEDDING = 100 # DIM_EMBEDDING - number of dimensions in our word embeddings.
LSTM_HIDDEN = 100 # LSTM_HIDDEN - number of dimensions in the hidden vectors for the LSTM. Based on NCRFpp (200 in the paper, but 100 per direction in code)
BATCH_SIZE = 10 # BATCH_SIZE - number of examples considered in each model update.
LEARNING_RATE = 0.015 # LEARNING_RATE - adjusts how rapidly model parameters change by rescaling the gradient vector.
LEARNING_DECAY_RATE = 0.05 # LEARNING_DECAY_RATE - part of a rescaling of the learning rate after each pass through the data.
EPOCHS = 100 # EPOCHS - number of passes through the data in training.
KEEP_PROB = 0.5 # KEEP_PROB - probability of keeping a value when applying dropout.
GLOVE = "../data/glove.6B.100d.txt" # GLOVE - location of glove vectors.
WEIGHT_DECAY = 1e-8 # WEIGHT_DECAY - part of a rescaling of weights when an update occurs.
#### Dynet library imports. The first allows us to configure DyNet from within code rather than on the command line: mem is the amount of system memory initially allocated (DyNet has its own memory management), autobatch toggles automatic parallelisation of computations, weight_decay rescales weights by (1 - decay) after every update, random_seed sets the seed for random number generation.
import dynet_config
dynet_config.set(mem=256, autobatch=0, weight_decay=WEIGHT_DECAY,random_seed=0)
# dynet_config.set_gpu() for when we want to run with GPUs
import dynet as dy
####
# Data reading
def read_data(filename):
#### We are expecting a minor variation on the raw Penn Treebank data, with one line per sentence, tokens separated by spaces, and the tag for each token placed next to its word (the | works as a separator as it does not appear as a token).
"""Example input:
Pierre|NNP Vinken|NNP ,|, 61|CD years|NNS old|JJ
"""
content = []
with open(filename) as data_src:
for line in data_src:
t_p = [w.split("|") for w in line.strip().split()]
tokens = [v[0] for v in t_p]
tags = [v[1] for v in t_p]
content.append((tokens, tags))
return content
def simplify_token(token):
chars = []
for char in token:
#### Reduce sparsity by replacing all digits with 0.
if char.isdigit():
chars.append("0")
else:
chars.append(char)
return ''.join(chars)
def main():
#### For the purpose of this example we only have arguments for locations of the data.
parser = argparse.ArgumentParser(description='POS tagger.')
parser.add_argument('training_data')
parser.add_argument('dev_data')
args = parser.parse_args()
train = read_data(args.training_data)
dev = read_data(args.dev_data)
#### These indices map from strings to integers, which we apply to the input for our model. UNK is added to our mapping so that there is a vector we can use when we encounter unknown words. The special PAD symbol is used in PyTorch and Tensorflow as part of shaping the data in a batch to be a consistent size. It is not needed for DyNet, but kept for consistency.
# Make indices
id_to_token = [PAD, UNK]
token_to_id = {PAD: 0, UNK: 1}
id_to_tag = [PAD]
tag_to_id = {PAD: 0}
#### The '+ dev' may seem like an error, but is done here for convenience. It means in the next section we will retain the GloVe embeddings that appear in dev but not train. They won't be updated during training, so it does not mean we are getting information we shouldn't. In practise I would simply keep all the GloVe embeddings to avoid any potential incorrect use of the evaluation data.
for tokens, tags in train + dev:
for token in tokens:
token = simplify_token(token)
if token not in token_to_id:
token_to_id[token] = len(token_to_id)
id_to_token.append(token)
for tag in tags:
if tag not in tag_to_id:
tag_to_id[tag] = len(tag_to_id)
id_to_tag.append(tag)
NWORDS = len(token_to_id)
NTAGS = len(tag_to_id)
# Load pre-trained GloVe vectors
#### I am assuming these are 100-dimensional GloVe embeddings in their standard format.
pretrained = {}
for line in open(GLOVE):
parts = line.strip().split()
word = parts[0]
vector = [float(v) for v in parts[1:]]
pretrained[word] = vector
#### We need the word vectors as a list to initialise the embeddings. Each entry in the list corresponds to the token with that index.
pretrained_list = []
scale = np.sqrt(3.0 / DIM_EMBEDDING)
for word in id_to_token:
# apply lower() because all GloVe vectors are for lowercase words
if word.lower() in pretrained:
pretrained_list.append(np.array(pretrained[word.lower()]))
else:
#### For words that do not appear in GloVe we generate a random vector (note, the choice of scale here is important and we follow Jiang, Liang and Zhang (CoLing 2018).
random_vector = np.random.uniform(-scale, scale, [DIM_EMBEDDING])
pretrained_list.append(random_vector)
#### The most significant difference between the frameworks is how the model parameters and their execution is defined. In DyNet we define parameters here and then define computation as needed. In PyTorch we use a class with the parameters defined in the constructor and the computation defined in the forward() method. In Tensorflow we define both parameters and computation here.
# Model creation
####
model = dy.ParameterCollection()
# Create word embeddings and initialise
#### Lookup parameters are a matrix that supports efficient sparse lookup.
pEmbedding = model.add_lookup_parameters((NWORDS, DIM_EMBEDDING))
pEmbedding.init_from_array(np.array(pretrained_list))
# Create LSTM parameters
#### Objects that create LSTM cells and the necessary parameters.
stdv = 1.0 / np.sqrt(LSTM_HIDDEN) # Needed to match PyTorch
f_lstm = dy.VanillaLSTMBuilder(1, DIM_EMBEDDING, LSTM_HIDDEN, model,
forget_bias=(np.random.random_sample() - 0.5) * 2 * stdv)
b_lstm = dy.VanillaLSTMBuilder(1, DIM_EMBEDDING, LSTM_HIDDEN, model,
forget_bias=(np.random.random_sample() - 0.5) * 2 * stdv)
# Create output layer
pOutput = model.add_parameters((NTAGS, 2 * LSTM_HIDDEN))
# Set recurrent dropout values (not used in this case)
f_lstm.set_dropouts(0.0, 0.0)
b_lstm.set_dropouts(0.0, 0.0)
# Initialise LSTM parameters
#### To match PyTorch, we initialise the parameters with an unconventional approach.
f_lstm.get_parameters()[0][0].set_value(
np.random.uniform(-stdv, stdv, [4 * LSTM_HIDDEN, DIM_EMBEDDING]))
f_lstm.get_parameters()[0][1].set_value(
np.random.uniform(-stdv, stdv, [4 * LSTM_HIDDEN, LSTM_HIDDEN]))
f_lstm.get_parameters()[0][2].set_value(
np.random.uniform(-stdv, stdv, [4 * LSTM_HIDDEN]))
b_lstm.get_parameters()[0][0].set_value(
np.random.uniform(-stdv, stdv, [4 * LSTM_HIDDEN, DIM_EMBEDDING]))
b_lstm.get_parameters()[0][1].set_value(
np.random.uniform(-stdv, stdv, [4 * LSTM_HIDDEN, LSTM_HIDDEN]))
b_lstm.get_parameters()[0][2].set_value(
np.random.uniform(-stdv, stdv, [4 * LSTM_HIDDEN]))
#### The trainer object is used to update the model.
# Create the trainer
trainer = dy.SimpleSGDTrainer(model, learning_rate=LEARNING_RATE)
#### DyNet clips gradients by default, which we disable here (this can have a big impact on performance).
trainer.set_clip_threshold(-1)
#### To make the code match across the three versions, we group together some framework specific values needed when doing a pass over the data.
expressions = (pEmbedding, pOutput, f_lstm, b_lstm, trainer)
#### Main training loop, in which we shuffle the data, set the learning rate, do one complete pass over the training data, then evaluate on the development data.
for epoch in range(EPOCHS):
random.shuffle(train)
####
# Update learning rate
trainer.learning_rate = LEARNING_RATE / (1+ LEARNING_DECAY_RATE * epoch)
#### Training pass.
loss, tacc = do_pass(train, token_to_id, tag_to_id, expressions, True)
#### Dev pass.
_, dacc = do_pass(dev, token_to_id, tag_to_id, expressions, False)
print("{} loss {} t-acc {} d-acc {}".format(epoch, loss, tacc, dacc))
#### The syntax varies, but in all three cases either saving or loading the parameters of a model must be done after the model is defined.
# Save model
model.save("tagger.dy.model")
# Load model
model.populate("tagger.dy.model")
# Evaluation pass.
_, test_acc = do_pass(dev, token_to_id, tag_to_id, expressions, False)
print("Test Accuracy: {:.3f}".format(test_acc))
#### Inference (the same function for train and test).
def do_pass(data, token_to_id, tag_to_id, expressions, train):
pEmbedding, pOutput, f_lstm, b_lstm, trainer = expressions
# Loop over batches
loss = 0
match = 0
total = 0
for start in range(0, len(data), BATCH_SIZE):
#### Form the batch and order it based on length (important for efficient processing in PyTorch).
batch = data[start : start + BATCH_SIZE]
batch.sort(key = lambda x: -len(x[0]))
#### Log partial results so we can conveniently check progress.
if start % 4000 == 0 and start > 0:
print(loss, match / total)
sys.stdout.flush()
#### Start a new computation graph for this batch.
# Process batch
dy.renew_cg()
#### For each example, we will construct an expression that gives the loss.
loss_expressions = []
predicted = []
#### Convert tokens and tags from strings to numbers using the indices.
for n, (tokens, tags) in enumerate(batch):
token_ids = [token_to_id.get(simplify_token(t), 0) for t in tokens]
tag_ids = [tag_to_id[t] for t in tags]
#### Now we define the computation to be performed with the model. Note that they are not applied yet, we are simply building the computation graph.
# Look up word embeddings
wembs = [dy.lookup(pEmbedding, w) for w in token_ids]
# Apply dropout
if train:
wembs = [dy.dropout(w, 1.0 - KEEP_PROB) for w in wembs]
# Feed words into the LSTM
#### Create an expression for two LSTMs and feed in the embeddings (reversed in one case).
#### We pull out the output vector from the cell state at each step.
f_init = f_lstm.initial_state()
f_lstm_output = [x.output() for x in f_init.add_inputs(wembs)]
rev_embs = reversed(wembs)
b_init = b_lstm.initial_state()
b_lstm_output = [x.output() for x in b_init.add_inputs(rev_embs)]
# For each output, calculate the output and loss
pred_tags = []
for f, b, t in zip(f_lstm_output, reversed(b_lstm_output), tag_ids):
# Combine the outputs
combined = dy.concatenate([f,b])
# Apply dropout
if train:
combined = dy.dropout(combined, 1.0 - KEEP_PROB)
# Matrix multiply to get scores for each tag
r_t = pOutput * combined
# Calculate cross-entropy loss
if train:
err = dy.pickneglogsoftmax(r_t, t)
#### We are not actually evaluating the loss values here, instead we collect them together in a list. This enables DyNet's <a href="http://dynet.readthedocs.io/en/latest/tutorials_notebooks/Autobatching.html">autobatching</a>.
loss_expressions.append(err)
# Calculate the highest scoring tag
#### This call to .npvalue() will lead to evaluation of the graph and so we don't actually get the benefits of autobatching. With some refactoring we could get the benefit back (simply keep the r_t expressions around and do this after the update), but that would have complicated this code.
chosen = np.argmax(r_t.npvalue())
pred_tags.append(chosen)
predicted.append(pred_tags)
# combine the losses for the batch, do an update, and record the loss
if train:
loss_for_batch = dy.esum(loss_expressions)
loss_for_batch.backward()
trainer.update()
loss += loss_for_batch.scalar_value()
####
# Update the number of correct tags and total tags
for (_, g), a in zip(batch, predicted):
total += len(g)
for gt, at in zip(g, a):
gt = tag_to_id[gt]
if gt == at:
match += 1
return loss, match / total
if __name__ == '__main__':
main()
``` |
{
"source": "jkkummerfeld/slate",
"score": 2
} |
#### File: slate/slate/annotate.py
```python
from __future__ import print_function
import argparse
import curses
import datetime
import logging
import string
import sys
from .data import *
from .config import *
from .view import *
class Annotator(object):
def __init__(self, config, filenames, current_mode, args):
self.current_mode = current_mode
self.current_num = None
self.search_term = ''
self.partial_typing = ''
self.cfilename = -1
self.filename = None
self.filenames = filenames
self.datum = None
self.view = None
self.window = None
self.config = config
self.args = args
self.action_to_function = {
'delete-query-char': self.delete_typing_char,
'leave-query-mode': self.leave_typing_mode,
'enter-query-mode': self.enter_typing_mode,
'clear-query': self.clear_query,
'add-to-query': self.add_to_typing,
'delete-label-char': self.delete_typing_char,
'assign-text-label': self.assign_text,
'enter-label-mode': self.enter_typing_mode,
'add-to-label': self.add_to_typing,
'toggle-line-numbers': self.toggle_line_numbers,
'move-up': self.move,
'move-down': self.move,
'move-left': self.move,
'move-right': self.move,
'move-link-up': self.move,
'move-link-down': self.move,
'move-link-left': self.move,
'move-link-right': self.move,
'jump-up': self.move,
'jump-down': self.move,
'jump-left': self.move,
'jump-right': self.move,
'extend-up': self.change_span,
'extend-down': self.change_span,
'extend-left': self.change_span,
'extend-right': self.change_span,
'contract-up': self.change_span,
'contract-down': self.change_span,
'contract-left': self.change_span,
'contract-right': self.change_span,
'extend-link-up': self.change_span,
'extend-link-down': self.change_span,
'extend-link-left': self.change_span,
'extend-link-right': self.change_span,
'contract-link-up': self.change_span,
'contract-link-down': self.change_span,
'contract-link-left': self.change_span,
'contract-link-right': self.change_span,
'search-previous': self.search,
'search-next': self.search,
'search-link-previous': self.search,
'search-link-next': self.search,
'page-up': self.shift_view,
'page-down': self.shift_view,
'toggle-help': self.modify_display,
'toggle-progress': self.modify_display,
'toggle-legend': self.modify_display,
'toggle-current-mark': self.modify_display,
'next-file': self.change_file,
'previous-file': self.change_file,
'quit': self.save_or_quit,
'save-and-quit': self.save_or_quit,
'save': self.save_or_quit,
'create-link': self.create_link,
'create-link-and-move': self.create_link,
'edit-annotation': self.edit_annotation,
'remove-annotation': self.remove_annotation,
'update-num': self.update_number,
}
def move(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
direction = action.split('-')[-1]
jump = 'jump' in action
link = 'link' in action
num = 1
if self.current_num == 0:
jump = True
self.current_num = None
elif self.current_num is not None:
num = self.current_num
self.current_num = None
self.view.move(direction, num, jump, link)
def toggle_line_numbers(self, user_input, action):
self.view.line_numbers = not self.view.line_numbers
def change_span(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
change = action.split('-')[0]
direction = action.split('-')[-1]
# TODO: Support these adjustments to the linking_pos too. This requires
# edits to view and also to config
link = 'link' in action
num = 1
jump = False
if self.current_num == 0:
jump = True
self.current_num = None
elif self.current_num is not None:
num = self.current_num
self.current_num = None
self.view.adjust(direction, num, change, jump, link)
def delete_typing_char(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
if self.current_mode[-1] == 'write_query':
self.search_term = self.search_term[:-1]
else:
self.partial_typing = self.partial_typing[:-1]
def leave_typing_mode(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
if len(self.current_mode) > 1:
self.current_mode.pop()
def assign_text(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
if len(self.current_mode) > 1:
self.current_mode.pop()
self.datum.modify_annotation([self.view.cursor], self.partial_typing)
self.partial_typing = ''
def enter_typing_mode(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
if 'query' in action:
self.current_mode.append('write_query')
else:
self.current_mode.append('write_label')
self.partial_typing = ''
def clear_query(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
self.search_term = ''
def add_to_typing(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
char = user_input[0]
if user_input[0] == 'SPACE':
char = ' '
if self.current_mode[-1] == 'write_query':
self.search_term += char
else:
self.partial_typing += char
def change_file(self, user_input, action):
if self.current_mode[-1] != 'no_file':
self.save_or_quit(None, 'save')
direction = 1 if 'next' in action else -1
if self.current_mode[-1] == 'no_file':
if (self.cfilename < 0) == (direction > 0):
self.current_mode.pop()
self.cfilename += direction
elif 0 <= self.cfilename + direction < len(self.filenames):
self.cfilename += direction
self.filename, start_pos, output_file, annotation_files = \
self.filenames[self.cfilename]
self.datum = Datum(self.filename, self.config, output_file, annotation_files)
self.get_view(self.config, self.cfilename, len(self.filenames), start_pos, self.view)
elif self.current_mode != 'no_file':
self.cfilename += direction
self.current_mode.append('no_file')
def modify_display(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
if 'help' in action:
self.view.toggle_help()
elif 'progress' in action:
self.view.toggle_progress()
elif 'legend' in action:
self.view.toggle_legend()
elif 'current-mark' in action:
self.view.toggle_current_mark()
def shift_view(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
if 'up' in action:
self.view.shift_view()
else:
self.view.shift_view(True)
def update_number(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
num = int(user_input[0])
if self.current_num is None:
self.current_num = 0
else:
self.current_num *= 10
self.current_num += num
def remove_annotation(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
if self.current_mode[-1] != 'read':
spans = [self.view.cursor]
if self.current_mode[-1] == 'link':
spans = [self.view.linking_pos]
self.datum.remove_annotation(spans)
def edit_annotation(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
if self.current_mode[-1] == 'category':
label = self.config.get_label_for_input(user_input)
self.datum.modify_annotation([self.view.cursor], label)
def create_link(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
self.datum.modify_annotation([self.view.cursor, self.view.linking_pos])
if 'and-move' in action:
if self.config.annotation == 'line':
self.view.move('down', 1, False, True)
self.view.put_cursor_beside_link()
else:
self.view.move('right', 1, False, True)
self.view.put_cursor_beside_link()
self.view.must_show_linking_pos = True
def save_or_quit(self, user_input, action):
if 'save' in action:
if self.current_mode[-1] != 'read':
self.datum.write_out()
# TODO: Save both cursor and linking pos
if 0 <= self.cfilename < len(self.filenames):
cur = self.filenames[self.cfilename]
pos = self.view.cursor
if self.config.annotation_type == 'link':
pos = self.view.linking_pos
self.filenames[self.cfilename] = (cur[0], pos, cur[2], cur[3])
if 'quit' in action:
if 'save' not in action:
# TODO: Have an 'are you sure?' step
pass
return 'quit'
def search(self, user_input, action):
if self.current_mode[-1] == 'no_file':
return
direction = action.split('-')[-1]
jump = False
link = 'link' in action
num = 1
if self.current_num == 0:
jump = True
self.current_num = None
elif self.current_num is not None:
num = self.current_num
self.current_num = None
if len(self.search_term) > 0:
self.view.search(self.search_term, direction, num, jump, link)
else:
# Used when comparing files to go to the next/previous annotation
# Or when not comparing files to go to the next unannotated thing
# When there is nothing unannotated, it jumps to the next self-linked item
self.view.search(None, direction, num, jump, link)
def input_to_symbol(self, num):
if num in key_to_symbol:
return key_to_symbol[num]
else:
return "UNKNOWN"
def get_view(self, config, file_num, total_files, position, prev_view=None):
cursor = position
link = position if self.config.annotation_type == 'link' else None
self.view = View(self.window, cursor, link, self.datum, self.config, file_num, total_files, prev_view)
def annotate(self, window_in):
self.window = window_in
# Set color combinations
curses.use_default_colors()
for num, fore, back in COLORS:
curses.init_pair(num, fore, back)
# No blinking cursor
curses.curs_set(0)
self.cfilename = 0
self.filename, start_pos, output_file, annotation_files = self.filenames[self.cfilename]
self.datum = Datum(self.filename, self.config, output_file, annotation_files)
self.get_view(self.config, self.cfilename, len(self.filenames), start_pos)
if not self.args.hide_help:
self.view.toggle_help()
last_num = None
at_end = None
nsteps = 0
user_input = []
while True:
# Draw screen
if self.current_mode[-1] == 'no_file':
self.view.render_edgecase(self.cfilename >= 0)
else:
# Set current search term appearance
tmp_term = self.search_term
if self.current_mode[-1] == 'write_query':
tmp_term = '\\'+ tmp_term
self.view.render(tmp_term, self.partial_typing)
self.view.must_show_linking_pos = False
# Get input
ch = self.window.getch()
next_user_input = self.input_to_symbol(ch)
logging.debug("Input {} converted to {} in mode {}".format(ch, next_user_input, self.current_mode))
user_input.append(next_user_input)
tuser_input = tuple(user_input)
if (self.current_mode[-1], tuser_input) not in self.config.valid_prefixes:
if (None, tuser_input) not in self.config.valid_prefixes:
if (self.current_mode[-1], tuser_input) not in self.config.input_to_action:
if (None, tuser_input) not in self.config.input_to_action:
user_input = [next_user_input]
tuser_input = (next_user_input,)
nsteps += 1
if nsteps % 100 == 0 and self.current_mode[-1] == 'category':
self.datum.write_out()
# Determine what to do for the input
action = None
function = None
if (self.current_mode[-1], tuser_input) in self.config.input_to_action:
action = self.config.input_to_action[self.current_mode[-1], tuser_input]
if action in self.action_to_function:
function = self.action_to_function[action]
elif (None, tuser_input) in self.config.input_to_action:
action = self.config.input_to_action[None, tuser_input]
if action in self.action_to_function:
function = self.action_to_function[action]
logging.debug("{} {} -> {} {}".format(self.current_mode, tuser_input, action, function))
# Do it!
if function is not None:
outcome = function(tuser_input, action)
user_input = []
if outcome == 'quit':
break
# Clear the screen in preparation for rendering it again
self.window.clear()
# Write out information for continuing annotation later
out_filename = self.args.log_prefix + '.todo'
out = open(out_filename, "w")
for fname, start_pos, output_file, annotation_files in self.filenames:
parts = [
fname,
output_file,
str(start_pos), # TODO - simplified value here
' '.join(annotation_files)
]
print(" ".join(parts), file=out)
out.close()
def ext_annotate(window_in, annotator):
annotator.annotate(window_in)
def main():
stime = datetime.datetime.now().strftime('%Y-%m-%d.%H-%M-%S')
parser = argparse.ArgumentParser(
description='A tool for annotating text data.',
fromfile_prefix_chars='@')
parser.add_argument('data', nargs="*",
help='Files to be annotated')
parser.add_argument('-d', '--data-list', nargs="+",
help='Files containing lists of files to be annotated')
parser.add_argument('-t', '--ann-type',
choices=['categorical', 'link'],
default='categorical',
help='The type of annotation being done.')
parser.add_argument('-s', '--ann-scope',
choices=['character', 'token', 'line', 'document'],
default='token',
help='The scope of annotation being done.')
parser.add_argument('-c', '--config-file',
help='A file containing configuration information.')
parser.add_argument('-l', '--log-prefix',
default="annotation_log."+ stime,
help='Prefix for logging files')
parser.add_argument('-ld', '--log-debug',
action='store_true',
help='Provide detailed logging.')
parser.add_argument('-hh', '--hide-help',
action='store_true',
help='Do not show help on startup.')
parser.add_argument('-r', '--readonly',
action='store_true',
help='Do not allow changes or save annotations.')
parser.add_argument('-o', '--overwrite',
default=False,
action='store_true',
help='If they exist already, read and overwrite output files.')
parser.add_argument('-ps', '--prevent-self-links',
default=False,
action='store_true',
help='Prevent an item from being linked to itself.')
parser.add_argument('-pf', '--prevent-forward-links',
default=False,
action='store_true',
help='Prevent a link from an item to one after it.')
parser.add_argument('--do-not-show-linked',
default=False,
action='store_true',
help='Do not have a special color to indicate any linked token.')
parser.add_argument('--alternate-comparisons',
default=False,
action='store_true',
help='Activate alternative way of showing different annotations '
'(one colour per set of markings, rather than counts).')
args = parser.parse_args()
if len(args.data) == 0 and args.data_list is None:
parser.error("No filenames or data lists provided")
# Set up logging
logging_level = logging.DEBUG if args.log_debug else logging.INFO
logging.basicConfig(filename=args.log_prefix + '.log', level=logging_level)
logging.info("Executed with: {}".format(' '.join(sys.argv)))
logging.info("Arguments interpreted as: {}".format(args))
if logging_level == logging.INFO:
sys.tracebacklimit = 1
### Process configuration
config = None
if args.config_file is not None:
config = Config(args)
else:
config = Config(args,
{
'label:a': (('SPACE', 'a'), 'green'),
'label:s': (('SPACE', 's'), 'blue'),
'label:d': (('SPACE', 'd'), 'magenta'),
'label:v': (('SPACE', 'v'), 'red'),
}
)
file_info = args.data
if args.data_list is not None:
for filename in args.data_list:
if len(glob.glob(filename)) == 0:
raise Exception("Cannot open / find '{}'".format(filename))
for line in open(filename):
file_info.append(line.strip())
filenames = process_fileinfo(file_info, config)
if len(filenames) == 0:
print("Found no files")
sys.exit(0)
config_out = open(args.log_prefix + '.config', 'w')
print(config, file=config_out)
config_out.close()
# Set the current mode
current_mode = []
if args.readonly:
current_mode.append('read')
elif args.ann_type == 'categorical':
current_mode.append('category')
elif args.ann_type == 'link':
current_mode.append('link')
### Start interface
annotator = Annotator(config, filenames, current_mode, args)
curses.wrapper(ext_annotate, annotator)
``` |
{
"source": "jkl1337/ankisport",
"score": 2
} |
#### File: jkl1337/ankisport/exporter.py
```python
import json
import subprocess
import textwrap
from collections import defaultdict
from datetime import datetime
from itertools import islice, izip
import codecs
import os
import re
from anki.exporting import Exporter
from anki.utils import splitFields, ids2str
from aqt.utils import showWarning
import pytoml as toml
class keydefaultdict(defaultdict):
"""
A defaultdict with a custom default_factory that passes the key as an argument
"""
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(key)
return ret
class TOMLGenerator(object):
DATETIME_ISO8601_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
escape_re = re.compile(r'([\x00-\x1f"\\])')
escape_re_sub_tab = {'\t': 't', '\n': 'n', '\"': '"', '\r': 'r', '\\': '\\', '\f': 'f', '\b': 'b', '"""': r'"""'}
ml_escape_re = re.compile(r'([\x00-\x09\x0b-\x1f\\]|""")')
ws_match_re = re.compile(r'^[\t ]+')
def __init__(self, output):
self.output = output
self.text_wrapper = textwrap.TextWrapper(width=120, expand_tabs=False, replace_whitespace=False, drop_whitespace=False)
@classmethod
def escape_string(cls, s):
escape_re_sub_tab = cls.escape_re_sub_tab
return cls.escape_re.sub(lambda c: '\\' + (escape_re_sub_tab.get(c.group(1), None)
or ('u%.4x' % ord(c.group(1)))), s)
def write_escaped_string(self, s):
self.output.write(self.escape_string(s))
def write_multiline_escaped_string(self, s):
escape_re_sub_tab = self.escape_re_sub_tab
self.output.write(
self.ml_escape_re.sub(lambda c: '\\' + (escape_re_sub_tab.get(c.group(1), None)
or ('u%.4x' % ord(c.group(1)))), s))
def wrap_text(self, s, offset):
tw = self.text_wrapper
tw.initial_indent = ' ' * offset
lines = []
for para in s.splitlines(True):
line = tw.wrap(para)
if tw.initial_indent and line:
line[0] = line[0][offset:]
lines.extend(line)
tw.initial_indent = ''
return lines
def write_string(self, line_offset, v):
output = self.output
ws_match_re = self.ws_match_re
lines = self.wrap_text(v, 0)
if len(lines) == 0:
output.write('""\n')
return
elif len(lines) == 1:
multiline = False
# prefer not to use literal style if there are control characters, can't if there's a quote
use_literal_string = use_multiline_literal = False
if not re.search(r"[\x00-\x1f\x7f\x80-\x9f]", lines[0]):
use_literal_string = lines[0].find("'") == -1
use_multiline_literal = not use_literal_string and lines[0].find("'''") == -1
else:
multiline = True
if multiline:
output.write('"""\n')
self.write_multiline_escaped_string(lines[0])
trailing_newline = lines[0][-1] == u'\n'
trailing_quote = False
for line in islice(lines, 1, None):
# fix up previous line, appending any leading whitespace on this line since TOML will ignore
# leading whitespace after a continuation
leading_white_space = ws_match_re.match(line)
if leading_white_space:
ws = leading_white_space.group()
trailing_newline = ws[-1] == u'\n'
self.write_multiline_escaped_string(ws)
line = line[leading_white_space.end():]
if not trailing_newline:
output.write('\\\n')
self.write_multiline_escaped_string(line)
trailing_newline = line[-1] == u'\n'
trailing_quote = line[-1] == u'"'
output.write('"""\n' if not trailing_quote else '\\\n"""\n')
else:
if use_literal_string:
output.write("'%s'\n" % lines[0])
elif use_multiline_literal:
output.write("'''%s'''\n" % lines[0])
else:
output.write('"')
self.write_escaped_string(lines[0])
output.write('"\n')
def write_bool(self, line_offset, v):
self.output.write('true' if v else 'false')
self.output.write('\n')
def write_integer(self, line_offset, v):
self.output.write(str(v))
self.output.write('\n')
write_float = write_integer
def write_datetime(self, line_offset, v):
self.output.write(v.strftime(self.DATETIME_ISO8601_FORMAT))
self.output.write('\n')
VALUE_MAP = {unicode: write_string, bool: write_bool, int: write_integer, long: write_integer, float: write_float, datetime: write_datetime}
def write_value(self, line_offset, v):
for t, c in self.VALUE_MAP.iteritems():
if isinstance(v, t):
return c(self, line_offset, v)
def write_key_value(self, k, v):
if re.search(r"[^A-Za-z0-9_-]", k):
ko = '"%s" = ' % self.escape_string(k)
else:
ko = '%s = ' % k
self.output.write(ko)
self.write_value(len(ko), v)
class OutputModel(object):
def __init__(self, models, mid):
model = models.get(mid)
field_names = models.fieldNames(model)
field_set = set(field_names)
def transform_name(name):
"""
Transform names to be unquoted TOML key friendly.
Do it only if it will not cause ambiguity.
"""
n = name.lower().replace(' ', '-')
return n if n not in field_set else name
self.field_names = [transform_name(fn) for fn in field_names]
self.name = model['name']
self.model = model
class TOMLNoteExporter(Exporter):
key = _("Notes in TOML format")
ext = ".toml"
def __init__(self, col, query=None, sets=None, set_name=''):
"""
Create a TOML Note Exporter.
:param col: The anki collection object.
:param query: An anki filter string to select notes for export.
:param sets: A set of tags to break the cards into smaller files.
"""
Exporter.__init__(self, col)
self.query = query
self.sets = sets
self.set_name = set_name
def exportInto(self, path):
file = codecs.open(path, "w", encoding='utf-8')
self.doExport(file)
file.close()
def doExport(self, path, verify=False):
models = self.col.models
output_models = keydefaultdict(lambda mid: OutputModel(models, mid))
count = 0
grouped_notes = []
sets = None
if self.query is not None:
if self.sets:
sets = self.sets
for group_name, expr in sets.items():
grouped_notes.append((group_name, self.col.findNotes('(%s) (%s)' % (self.query, expr))))
else:
grouped_notes.append(('', self.col.findNotes('%s' % self.query)))
else:
grouped_notes.append(('', self.cardIds()))
paths = []
for group_name, note_ids in grouped_notes:
if group_name:
dirname, _ = os.path.split(path)
cur_path = os.path.join(dirname, group_name + '.toml')
else:
cur_path = path
with codecs.open(cur_path, 'w', encoding='utf-8') as output:
paths.append(cur_path)
generator = TOMLGenerator(output)
for guid, flds, mid, tags in self.col.db.execute(r"""
SELECT guid, flds, mid, tags FROM notes
WHERE id IN %s
ORDER BY sfld""" % ids2str(note_ids)):
field_data = splitFields(flds)
cur_model = output_models[mid]
output.write('[[notes]]\n')
output.write("model = '%s'\n" % cur_model.name)
output.write("guid = '%s'\n" % guid)
for i, name in enumerate(cur_model.field_names):
f = field_data[i]
if name == u'note-id':
try:
f = int(f)
except ValueError:
pass
generator.write_key_value(name, f)
tags = self.fixup_tags(tags)
generator.write_key_value(u'tags', tags)
output.write('\n')
count += 1
mode = 'a' if not sets else 'w'
filtered_models = []
for v in output_models.values():
n = v.model.copy()
# not sure the importance of this value and it leaks unwanted data
n['tags'] = []
n.pop('req', None)
filtered_models.append(n)
with codecs.open(path, mode, encoding='utf-8') as output:
data = {'models': filtered_models}
toml.dump(output, data)
if verify:
self.verify(paths)
self.count = count
return True
re_tag_fixup = re.compile(r'(?:marked|leech)(\s+|\Z)')
@classmethod
def fixup_tags(cls, tags):
tags = cls.re_tag_fixup.sub('', tags)
return tags.strip()
def verify(self, paths):
"""
lel at this shitty verify function
"""
p1 = subprocess.Popen(['cat'] + paths, stdout=subprocess.PIPE)
p2 = subprocess.Popen(['tomljson'], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
exp_data = json.loads(p2.communicate()[0])
notes = exp_data['notes']
note_tbl = {}
for n in notes:
note_tbl[n['note-id']] = n
for flds, in self.col.db.execute(r"""
SELECT flds FROM notes
WHERE id IN %s""" % ids2str(note_tbl.keys())):
flds = splitFields(flds)
nid = flds[0]
want = flds[1]
n = note_tbl[int(nid)]
if want != n['text']:
showWarning('Mismatch text %s\n\nWant %s\n\nGot %s' % (nid, repr(want), repr(n['text'])))
want = flds[2]
if want != n['extra']:
showWarning('Mismatch extra %s\n\nWant %s\n\nGot %s' % (nid, repr(want), repr(n['extra'])))
``` |
{
"source": "JKL404/Object_Detection_using_OpenCV",
"score": 2
} |
#### File: JKL404/Object_Detection_using_OpenCV/main.py
```python
import os
from app import app
import urllib.request
from werkzeug.utils import secure_filename
from flask import Flask, flash, request, redirect, url_for, render_template
@app.route('/')
def home():
return render_template('index.html')
@app.route('/textdetect/')
def upload_form():
return render_template('upload.html')
@app.route('/textdetect/', methods=['POST'])
def upload_image():
#Code to run main scan file
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
# Load webcam
font = cv2.FONT_HERSHEY_SIMPLEX
starting_time = time.time()
frame_id = 0
net = cv2.dnn.readNet("./weights/yolov3-tiny.weights", "./configuration/yolov3-tiny.cfg")
### Change here for custom classes for trained model
classes = []
mylist = []
flag = 0
with open("./configuration/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
# Load webcam
cap = cv2.VideoCapture(0)
colors = np.random.uniform(0, 255, size=(len(classes), 3))
while 1:
_, img = cap.read()
frame_id += 1
img = cv2.resize(img,(1280,720))
hight,width,_ = img.shape
blob = cv2.dnn.blobFromImage(img, 1/255,(416,416),(0,0,0),swapRB = True,crop= False)
net.setInput(blob)
output_layers_name = net.getUnconnectedOutLayersNames()
layerOutputs = net.forward(output_layers_name)
boxes =[]
confidences = []
class_ids = []
for output in layerOutputs:
for detection in output:
score = detection[5:]
class_id = np.argmax(score)
confidence = score[class_id]
if confidence > 0.1:
center_x = int(detection[0] * width)
center_y = int(detection[1] * hight)
w = int(detection[2] * width)
h = int(detection[3]* hight)
x = int(center_x - w/2)
y = int(center_y - h/2)
boxes.append([x,y,w,h])
confidences.append((float(confidence)))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes,confidences, 0.8, 0.3)
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
confidence = confidences[i]
color = colors[class_ids[i]]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
cv2.putText(img, label + " " + str(round(confidence, 2)), (x, y + 30), font, 3, color, 3)
flag=0
for ls in mylist:
if ls is label:
flag=1
if flag != 1:
mylist.append(label)
elapsed_time = time.time() - starting_time
fps = frame_id / elapsed_time
cv2.putText(img, "FPS: " + str(round(fps, 2)), (40, 670), font, .7, (0, 255, 255), 1)
cv2.putText(img, "press [esc] to exit", (40, 690), font, .45, (0, 255, 255), 1)
cv2.imshow("Image", img)
key = cv2.waitKey(1)
if key == 27:
print("[button pressed] ///// [esc].")
print("[feedback] ///// Videocapturing succesfully stopped")
break
cap.release()
cv2.destroyAllWindows()
return render_template('message.html' , itemss=mylist )
if __name__ == "__main__":
app.run()
``` |
{
"source": "jklaiho/django-class-fixtures",
"score": 3
} |
#### File: django-class-fixtures/class_fixtures/models.py
```python
try:
from collections import OrderedDict # Python 2.7 onwards
except ImportError:
from class_fixtures.utils.ordereddict import OrderedDict
from collections import Iterable
from django.db import models, router
from django.db.models.fields.related import (
SingleRelatedObjectDescriptor as srod,
ManyRelatedObjectsDescriptor as mrod,
ReverseSingleRelatedObjectDescriptor as rsrod,
ReverseManyRelatedObjectsDescriptor as rmrod,
)
from class_fixtures.exceptions import FixtureUsageError, RelatedObjectError
try:
from milkman.dairy import milkman
except ImportError:
milkman = None
__all__ = ['Fixture']
class Fixture(object):
"""
A class-based fixture. Relies on the overridden ``loaddata`` command of
django-class-fixtures.
Each ``Fixture`` instance is tied to one model class. Instances have an
``add`` method which receives a primary key value followed by the same
keyword arguments that would be used to create instances of that model.
You can have multiple fixtures per model, like so::
admin_fixture = Fixture(User)
staff_fixture = Fixture(User)
You can then ``add()`` different sets of User objects to each.
Fixture instances have a ``load`` method, which does the work of actually
saving the model objects and their relations into the database.
For the full details, see the documentation.
"""
def __init__(self, model, raw=False):
# PK values as keys, object definition kwargs as values.
# Populated by add() calls.
self._kwarg_storage = OrderedDict()
# Stores references to Fixture instances that need to be loaded
# before this one. Populated by add() calls.
self._dependencies = []
# Set to False just before the first loading attempt.
self._adding_allowed = True
# Enable DeserializedObject-like raw saves that bypass custom save
# methods (which Django's loaddata does)
self.raw = raw
# Allow for custom model classes, not just models.Model subclasses.
if isinstance(model, models.base.ModelBase):
self.model = model
else:
raise TypeError('%s is not a Django model class' % model.__name__)
def add(self, *args, **kwargs):
"""
A tiny gatekeeper method. Checks that ``args`` contains precisely one
item, assumed to be a valid primary key for an instance of
``self.model``. See ``_add`` for the actual instance adding
functionality.
The reason this method exists is that running some_fixture.add()
without the positional PK parameter (a common mistake in handmade
fixtures) raises an unhelpful TypeError that can't be caught in _add.
"""
if len(args) != 1:
raise FixtureUsageError('Fixture.add() must receive a primary key value as its single positional argument')
self._add(*args, **kwargs)
def _add(self, pk, **kwargs):
"""
Adds model instance definitions to the Fixture instance. Does *not*
write anything to the database (that is done when the ``load`` method
gets run later).
The ``pk`` parameter is the hard-coded primary key for the object.
Hard-coding is needed for accuracy with how Django's loaddata works
(assumes serialized primary keys, overwrites existing objects when
running loaddata).
The remaining keyword arguments are very close to those you would you
would give to a model instance constructor or a manager's ``create``
method. This is too complex a topic for this docstring, see the
documentation on defining relations between objects and more.
"""
if not self._adding_allowed:
raise FixtureUsageError('Cannot add more objects to a loaded fixture')
if pk in self._kwarg_storage:
raise FixtureUsageError('Primary key %s already added to another object in the same fixture.' % pk)
definitions = self._build_relations(**kwargs)
definitions.update({'pk': pk})
self._kwarg_storage[pk] = definitions
def add_random(self, pk, **kwargs):
"""
Creates randomly generated model instances using Milkman, if it is
installed. Raises a FixtureUsageError if not.
Since the point of this integration is to be able to easily and safely
mix generated and predefined Fixture instances, we still require
explicit primary keys (milkman does not).
"""
if milkman is None:
raise FixtureUsageError('Milkman is not installed, Fixture.add_random() not available.')
if pk in self._kwarg_storage:
raise FixtureUsageError('Primary key %s already added to another object in the same fixture.' % pk)
definitions = self._build_relations(**kwargs)
definitions.update({'pk': pk})
self._kwarg_storage[pk] = DelayedMilkmanDelivery(**definitions)
def _build_relations(self, **kwargs):
for fieldname, value in kwargs.items():
field_is_m2m = False
# The name given to a ManyToManyField in a model definition will
# actually become a descriptor with that name. In the model where
# the field is included in, it becomes a
# ReverseManyRelatedObjectsDescriptor. In the "target" model, a
# ManyRelatedObjectsDescriptor (foobar_set by default) is created.
# See if the field name we're examining is either of those.
if any([isinstance(getattr(self.model, fieldname, None), m2m_descriptor) for m2m_descriptor in [mrod, rmrod]]):
field_is_m2m = True
# M2Ms must be expressed as iterables (iterables of iterables
# in case of natural keys). A single natural key tuple will
# pass this check, but fail another one later on.
if not isinstance(value, Iterable):
raise FixtureUsageError('Non-iterable value %s passed to '\
'the "%s" M2M field in an add() call.' % (value, fieldname))
# Case 1: Relations to objects that don't yet exist but are
# defined in this or some other Fixture instance. They are
# represented as DelayedRelatedObjectLoader instances.
# Normalize to a list to keep the logic below simpler and DRYer
if not isinstance(value, Iterable):
value_list = [value]
else:
value_list = value
if any([isinstance(v, DelayedRelatedObjectLoader) for v in value_list]):
# Add the Fixture instances that DelayedRelatedObjectLoaders
# point to as dependencies that need to be loaded before this
# Fixture.
for v in value_list:
if isinstance(v, DelayedRelatedObjectLoader):
if self in v.fixture_instance._dependencies:
raise RelatedObjectError('Circular dependency between '\
'Fixture instances for %s and %s' %
self.model, v.fixture_instance.model)
if v.fixture_instance not in self._dependencies and v.fixture_instance != self:
self._dependencies.append(v.fixture_instance)
# Case 2: Relating to pre-existing objects, not ones getting
# created in the fixture loading process.
# The model class itself won't have attributes named after
# its fields, except the descriptors created by FK/M2M/O2O
# fields, which are precisely what we're after here.
descriptor = getattr(self.model, fieldname, None)
# Find the other end of the relation
if descriptor:
if any([isinstance(descriptor, rev_rel_descriptor) for rev_rel_descriptor in [rsrod, rmrod]]):
# fieldname refers to a descriptor in the model that
# contains the FK/M2M/O2O field definition
other_model = descriptor.field.related.parent_model
elif any([isinstance(descriptor, rel_descriptor) for rel_descriptor in [srod, mrod]]):
# fieldname refers to the automatically created
# attribute in the target model of the FK/M2M/O2O
other_model = descriptor.related.model
else:
from django.db.models.fields.related import ForeignRelatedObjectsDescriptor
if isinstance(descriptor, ForeignRelatedObjectsDescriptor):
raise RelatedObjectError('Cannot define foreign key relation from the target end')
else:
raise RelatedObjectError('Unknown descriptor-related '\
'error condition. Please file a bug report for '\
'django-class-fixtures.')
# Turn any values that don't evaluate to boolean False and are
# not DelayedRelatedObjectLoaders into RelatedObjectLoader
# instances.
if value:
if not field_is_m2m and not isinstance(value, DelayedRelatedObjectLoader):
kwargs.update({fieldname: RelatedObjectLoader(other_model, value)})
elif field_is_m2m:
loaders = []
for v in value_list:
if not isinstance(v, DelayedRelatedObjectLoader):
loaders.append(RelatedObjectLoader(other_model, v))
else:
loaders.append(v)
kwargs.update({fieldname: loaders})
return kwargs
def load(self, using=None):
"""
Creates model instances from the stored definitions and writes them
to the database.
You generally won't run this method by hand, as it's handled by the
fixture discovery and loading process of the overridden ``loaddata``
command.
Returns the number of objects saved to the database.
"""
self._adding_allowed = False
saved_objects = {}
# Load any unloaded dependencies of this instance first
for dep in self._dependencies:
saved_objects.update(dep.load(using=using))
# Offload the actual processing to a FixtureLoader instance
fl = FixtureLoader(self._kwarg_storage, self)
saved_objects.update(fl.load(using=using, raw=self.raw))
fl.create_m2m_relations(using=using)
return saved_objects
def get_object_by_pk(self, pk, using=None):
try:
return self.model._default_manager.db_manager(using).get(pk=pk)
except self.model.DoesNotExist:
raise RelatedObjectError('No %s object found with the primary key %s'\
% (self.model._meta.object_name, pk))
def _create_delayed_relation(self, pk):
"""
Places DelayedRelatedObjectLoader instances as value placeholders in
model definition kwargs. The ``load`` method will later parse these
into the actual related objects.
``fk``, ``m2m`` and ``o2o`` are functionally identical aliases of this
method to make fixture construction more self-documenting.
"""
return DelayedRelatedObjectLoader(self, pk)
fk = m2m = o2o = _create_delayed_relation
class FixtureLoader(object):
"""
A utility class, throwaway instances of which are generated by
``Fixture.load``. Constructs objects from the ``kwarg_storage``
OrderedDict, saves them to the database and builds any M2M relations.
Enables keeping Fixture instances state-free regarding actual
created objects.
"""
def __init__(self, kwarg_storage, fixture_instance):
self.kwarg_storage = kwarg_storage
self.fixture_instance = fixture_instance
# PKs as keys, dictionaries of all the M2M fields to which objects
# need to be added to as values.
self._pending_m2m = {}
# PKs as keys, saved objects as values.
self.saved = OrderedDict()
def load(self, using=None, raw=False):
"""
Does the actual work of creating objects from definitions stored in
Fixture instances.
Returns the number of objects saved to the database.
"""
# Replace ObjectLoaders with the actual objects
for pk, model_def in self.kwarg_storage.items():
resolved_def = dict()
for fieldname, value in model_def.items():
# Do the magic of allowing M2M relation creation through an
# iterable of values inlined in the object definition kwargs.
# See if the field name is listed in the Model's M2M field
# list. If yes, replace the assignment with a proper post-save
# M2M addition.
if any([isinstance(getattr(self.fixture_instance.model, fieldname, None), m2m_descriptor) for m2m_descriptor in [mrod, rmrod]]):
if isinstance(model_def, DelayedMilkmanDelivery):
# Milkman handles explicit M2Ms itself, no need to
# add to the list of relations created later. Just
# resolve to the real objects.
resolved_def[fieldname] = []
if isinstance(value, Iterable) and all([isinstance(v, ObjectLoader) for v in value]):
for v in value:
resolved_def[fieldname].append(v.get_related_object(using=using))
else:
raise RelatedObjectError('Invalid argument "%s" to a ManyToMany field' % value)
else:
# Save the M2M relations for later saving
if pk not in self._pending_m2m:
self._pending_m2m[pk] = {}
if fieldname not in self._pending_m2m[pk]:
self._pending_m2m[pk][fieldname] = []
# The value assigned to the field can be either a single
# M2M placeholder or an iterable of them.
if isinstance(value, Iterable) and all([isinstance(v, ObjectLoader) for v in value]):
for v in value:
self._pending_m2m[pk][fieldname].append(v.get_related_object(using=using))
else:
raise RelatedObjectError('Invalid argument "%s" to a ManyToMany field' % value)
else:
# The field is not an M2M and thus supports
# "fieldname=value" assignment, so just get a reference to
# an actual object to replace the placeholder.
if isinstance(value, ObjectLoader):
resolved_def[fieldname] = value.get_related_object(using=using)
else:
resolved_def[fieldname] = value
# Safe to modify, since we're iterating over the items() output,
# not the dictionary itself.
self.kwarg_storage[pk] = resolved_def
if router.allow_syncdb(using, self.fixture_instance.model):
if isinstance(model_def, DelayedMilkmanDelivery):
self.saved[pk] = milkman.deliver(self.fixture_instance.model, **resolved_def)
else:
if raw:
# See the documentation on "raw mode" for an explanation
obj = self.fixture_instance.model(**resolved_def)
models.Model.save_base(obj, using=using, raw=True)
self.saved[pk] = obj
else:
obj = self.fixture_instance.model(**resolved_def)
obj.save(using=using)
self.saved[pk] = obj
return self.saved
def create_m2m_relations(self, using=None):
"""
Writes any pending M2M relations to the database after the objects
that are to relate to each other have been saved.
"""
for pk, relations in self._pending_m2m.items():
obj = self.fixture_instance.get_object_by_pk(pk, using=using)
for rel_name, targets in relations.items():
for target in targets:
getattr(obj, rel_name).add(target)
class ObjectLoader(object):
"""
No-op base class for DelayedRelatedObjectLoader and RelatedObjectLoader,
used to aid in ``isinstance`` calls in ``FixtureLoader.load``.
"""
def __init__(self, *args, **kwargs):
raise NotImplementedError('Use one of the child classes of ObjectLoader.')
def get_related_object(self, using=None):
raise NotImplementedError('Use one of the child classes of ObjectLoader.')
class DelayedRelatedObjectLoader(ObjectLoader):
"""
A placeholder for actual related object references in model instance
definitions stored in a Fixture, where those related objects are
themselves contained in Fixtures. See RelatedObjectLoader for the
equivalent for pre-existing objects.
"""
def __init__(self, fixture_instance, pk):
self.fixture_instance = fixture_instance
self.pk = pk
def get_related_object(self, using=None):
return self.fixture_instance.get_object_by_pk(self.pk, using=using)
class RelatedObjectLoader(ObjectLoader):
"""
When ``Fixture.add`` calls include non-DelayedRelatedObjectLoader values
for relation field kwargs, RelatedObjectLoader instances are created as
placeholders.
Example::
some_fixture = Fixture(SomeModel)
some_fixture.add(name="foo", some_related_fk=12, some_m2m=[10, 18])
some_fixture.add(name="bar", some_related_fk=obj1, some_m2m=[obj2, obj3])
The values passed as the ``some_related_fk`` and ``some_m2m`` kwargs
are seen by ``add`` to not be DelayedRelatedObjectLoaders, so the job of
figuring out if and how to turn them into proper object instances is left
to the ``get_related_object`` method of this class.
See DelayedRelatedObjectLoader for the similar implementation of relations
that live in Fixture instances.
"""
def __init__(self, model, identifier):
self.model = model
# Either a PK value, a natural key tuple or a model instance.
self.identifier = identifier
def get_related_object(self, using=None):
"""
When this gets called, what self.identifier contains is unknown.
Figure it out and return an object reference.
"""
# Is self.identifier already a model instance of the correct type?
if isinstance(self.identifier, self.model):
return self.identifier
# Is self.identifier a natural key tuple?
elif isinstance(self.identifier, Iterable) and hasattr(self.model._default_manager.db_manager(using), 'get_by_natural_key'):
try:
obj = self.model._default_manager.db_manager(using).get_by_natural_key(*self.identifier)
return obj
except self.model.DoesNotExist:
# Pass, since it could be a list of PKs
pass
# Is self.identifier the PK value of an instance of the related model?
else:
try:
obj = self.model._default_manager.db_manager(using).get(pk=self.identifier)
return obj
except self.model.DoesNotExist:
raise RelatedObjectError('No %s objects with primary key %s exist.' % \
(self.model._meta.object_name, self.identifier)
)
class DelayedMilkmanDelivery(dict):
"""
No-op dictionary subclass to aid in identifying the use of Milkman in the
loader methods. Not the most pythonic way, relying on isinstance for it,
but the least work to fit this in with the logic that existed before
Milkman support.
"""
pass
from django.core.serializers import register_serializer
# Not thread safe according to the register_serializer docstring, don't know
# if it matters here or not.
register_serializer('class', 'class_fixtures.serializer')
``` |
{
"source": "jklaise/seldon-core",
"score": 2
} |
#### File: python/seldon_core/seldon_client.py
```python
from seldon_core.proto import prediction_pb2
from seldon_core.proto import prediction_pb2_grpc
from seldon_core.utils import array_to_grpc_datadef, seldon_message_to_json, \
json_to_seldon_message, feedback_to_json, seldon_messages_to_json
import numpy as np
import grpc
import requests
from requests.auth import HTTPBasicAuth
from typing import Tuple, Dict, Union, List, Optional, Iterable
import json
import logging
logger = logging.getLogger(__name__)
class SeldonClientException(Exception):
"""
Seldon Client Exception
"""
status_code = 400
def __init__(self, message):
Exception.__init__(self)
self.message = message
class SeldonClientPrediction(object):
"""
Data class to return from Seldon Client
"""
def __init__(self, request: Optional[prediction_pb2.SeldonMessage],
response: Optional[prediction_pb2.SeldonMessage],
success: bool = True, msg: str = ""):
self.request = request
self.response = response
self.success = success
self.msg = msg
def __repr__(self):
return "Success:%s message:%s\nRequest:\n%s\nResponse:\n%s" % (
self.success, self.msg, self.request, self.response)
class SeldonClientFeedback(object):
"""
Data class to return from Seldon Client for feedback calls
"""
def __init__(self, request: Optional[prediction_pb2.Feedback], response: Optional[prediction_pb2.SeldonMessage],
success: bool = True,
msg: str = ""):
self.request = request
self.response = response
self.success = success
self.msg = msg
def __repr__(self):
return "Success:%s message:%s\nRequest:\n%s\nResponse:\n%s" % (
self.success, self.msg, self.request, self.response)
class SeldonClientCombine(object):
"""
Data class to return from Seldon Client for aggregate calls
"""
def __init__(self, request: Optional[prediction_pb2.SeldonMessageList],
response: Optional[prediction_pb2.SeldonMessage],
success: bool = True, msg: str = ""):
self.request = request
self.response = response
self.success = success
self.msg = msg
def __repr__(self):
return "Success:%s message:%s\nRequest:\n%s\nResponse:\n%s" % (
self.success, self.msg, self.request, self.response)
class SeldonClient(object):
"""
A reference Seldon API Client
"""
def __init__(self, gateway: str = "ambassador", transport: str = "rest", namespace: str = None,
deployment_name: str = None,
payload_type: str = "tensor", oauth_key: str = None, oauth_secret: str = None,
seldon_rest_endpoint: str = "localhost:8002", seldon_grpc_endpoint: str = "localhost:8004",
gateway_endpoint: str = "localhost:8003", microservice_endpoint: str = "localhost:5000",
grpc_max_send_message_length: int = 4 * 1024 * 1024,
grpc_max_receive_message_length: int = 4 * 1024 * 1024):
"""
Parameters
----------
gateway
API Gateway - either ambassador, istio or seldon
transport
API transport - grpc or rest
namespace
k8s namespace of running deployment
deployment_name
name of seldon deployment
payload_type
pyalod - tensor, ndarray or tftensor
oauth_key
OAUTH key (if using seldon api server)
oauth_secret
OAUTH secret (if using seldon api server)
seldon_rest_endpoint
REST endpoint to seldon api server
seldon_grpc_endpoint
gRPC endpoint to seldon api server
gateway_endpoint
Gateway endpoint
microservice_endpoint
Running microservice endpoint
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
"""
self.config = locals()
del self.config["self"]
logging.debug("Configuration:" + str(self.config))
def _gather_args(self, **kwargs):
c2 = {**self.config}
c2.update({k: v for k, v in kwargs.items() if v is not None})
return c2
def _validate_args(self, gateway: str = None, transport: str = None,
method: str = None, data: np.ndarray = None, **kwargs):
"""
Internal method to validate parameters
Parameters
----------
gateway
API gateway
transport
API transport
method
The method to call
data
Numpy data to send
kwargs
Returns
-------
"""
if not (gateway == "ambassador" or gateway == "seldon" or gateway == "istio"):
raise SeldonClientException("Valid values for gateway are 'ambassador', 'istio', or 'seldon'")
if not (transport == "rest" or transport == "grpc"):
raise SeldonClientException("Valid values for transport are 'rest' or 'grpc'")
if not (method == "predict" or method == "route" or method == "aggregate" or method == "transform-input" or
method == "transform-output" or method == "send-feedback" or method is None):
raise SeldonClientException(
"Valid values for method are 'predict', 'route', 'transform-input', 'transform-output', 'aggregate' or None")
if not (data is None or isinstance(data, np.ndarray)):
raise SeldonClientException("Valid values for data are None or numpy array")
def predict(self, gateway: str = None, transport: str = None, deployment_name: str = None,
payload_type: str = None, oauth_key: str = None, oauth_secret: str = None,
seldon_rest_endpoint: str = None, seldon_grpc_endpoint: str = None,
gateway_endpoint: str = None, microservice_endpoint: str = None,
method: str = None, shape: Tuple = (1, 1), namespace: str = None, data: np.ndarray = None,
bin_data: Union[bytes, bytearray] = None, str_data: str = None, names: Iterable[str] = None,
gateway_prefix: str = None, headers: Dict = None) -> SeldonClientPrediction:
"""
Parameters
----------
gateway
API Gateway - either ambassador, istio or seldon
transport
API transport - grpc or rest
namespace
k8s namespace of running deployment
deployment_name
name of seldon deployment
payload_type
pyalod - tensor, ndarray or tftensor
oauth_key
OAUTH key (if using seldon api server)
oauth_secret
OAUTH secret (if using seldon api server)
seldon_rest_endpoint
REST endpoint to seldon api server
seldon_grpc_endpoint
gRPC endpoint to seldon api server
gateway_endpoint
Gateway endpoint
microservice_endpoint
Running microservice endpoint
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
data
Numpy Array Payload to send
bin_data
Binary payload to send - will override data
str_data
String payload to send - will override data
names
Column names
gateway_prefix
prefix path for gateway URL endpoint
headers
Headers to add to request
Returns
-------
"""
k = self._gather_args(gateway=gateway, transport=transport, deployment_name=deployment_name,
payload_type=payload_type, oauth_key=oauth_key,
oauth_secret=oauth_secret, seldon_rest_endpoint=seldon_rest_endpoint,
seldon_grpc_endpoint=seldon_grpc_endpoint, gateway_endpoint=gateway_endpoint,
microservice_endpoint=microservice_endpoint, method=method, shape=shape,
namespace=namespace, names=names,
data=data, bin_data=bin_data, str_data=str_data,
gateway_prefix=gateway_prefix, headers=headers)
self._validate_args(**k)
if k["gateway"] == "ambassador" or k["gateway"] == "istio":
if k["transport"] == "rest":
return rest_predict_gateway(**k)
elif k["transport"] == "grpc":
return grpc_predict_gateway(**k)
else:
raise SeldonClientException("Unknown transport " + k["transport"])
elif k["gateway"] == "seldon":
if k["transport"] == "rest":
return rest_predict_seldon_oauth(**k)
elif k["transport"] == "grpc":
return grpc_predict_seldon_oauth(**k)
else:
raise SeldonClientException("Unknown transport " + k["transport"])
else:
raise SeldonClientException("Unknown gateway " + k["gateway"])
def feedback(self, prediction_request: prediction_pb2.SeldonMessage = None,
prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0,
gateway: str = None, transport: str = None, deployment_name: str = None,
payload_type: str = None, oauth_key: str = None, oauth_secret: str = None,
seldon_rest_endpoint: str = None, seldon_grpc_endpoint: str = None,
gateway_endpoint: str = None, microservice_endpoint: str = None,
method: str = None, shape: Tuple = (1, 1), namespace: str = None,
gateway_prefix: str = None) -> SeldonClientFeedback:
"""
Parameters
----------
prediction_request
Previous prediction request
prediction_response
Previous prediction response
reward
A reward to send in feedback
gateway
API Gateway - either ambassador, istio or seldon
transport
API transport - grpc or rest
deployment_name
name of seldon deployment
payload_type
payload - tensor, ndarray or tftensor
oauth_key
OAUTH key (if using seldon api server)
oauth_secret
OAUTH secret (if using seldon api server)
seldon_rest_endpoint
REST endpoint to seldon api server
seldon_grpc_endpoint
gRPC endpoint to seldon api server
gateway_endpoint
Gateway endpoint
microservice_endpoint
Running microservice endpoint
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
method
The microservice method to call
shape
The shape of the data to send
namespace
k8s namespace of running deployment
Returns
-------
"""
k = self._gather_args(gateway=gateway, transport=transport, deployment_name=deployment_name,
payload_type=payload_type, oauth_key=oauth_key, oauth_secret=oauth_secret,
seldon_rest_endpoint=seldon_rest_endpoint
, seldon_grpc_endpoint=seldon_grpc_endpoint, gateway_endpoint=gateway_endpoint,
microservice_endpoint=microservice_endpoint, method=method, shape=shape,
namespace=namespace, gateway_prefix=gateway_prefix)
self._validate_args(**k)
if k["gateway"] == "ambassador" or k["gateway"] == "istio":
if k["transport"] == "rest":
return rest_feedback_gateway(prediction_request, prediction_response, reward, **k)
elif k["transport"] == "grpc":
return grpc_feedback_gateway(prediction_request, prediction_response, reward, **k)
else:
raise SeldonClientException("Unknown transport " + k["transport"])
elif k["gateway"] == "seldon":
if k["transport"] == "rest":
return rest_feedback_seldon_oauth(prediction_request, prediction_response, reward, **k)
elif k["transport"] == "grpc":
return grpc_feedback_seldon_oauth(prediction_request, prediction_response, reward, **k)
else:
raise SeldonClientException("Unknown transport " + k["transport"])
else:
raise SeldonClientException("Unknown gateway " + k["gateway"])
def microservice(self, gateway: str = None, transport: str = None, deployment_name: str = None,
payload_type: str = None, oauth_key: str = None, oauth_secret: str = None,
seldon_rest_endpoint: str = None, seldon_grpc_endpoint: str = None,
gateway_endpoint: str = None, microservice_endpoint: str = None,
method: str = None, shape: Tuple = (1, 1), namespace: str = None, data: np.ndarray = None,
datas: List[np.ndarray] = None, ndatas: int = None, bin_data: Union[bytes, bytearray] = None,
str_data: str = None, names: Iterable[str] = None) -> Union[SeldonClientPrediction, SeldonClientCombine]:
"""
Parameters
----------
gateway
API Gateway - either ambassador, istio or seldon
transport
API transport - grpc or rest
deployment_name
name of seldon deployment
payload_type
payload - tensor, ndarray or tftensor
oauth_key
OAUTH key (if using seldon api server)
oauth_secret
OAUTH secret (if using seldon api server)
seldon_rest_endpoint
REST endpoint to seldon api server
seldon_grpc_endpoint
gRPC endpoint to seldon api server
gateway_endpoint
Gateway endpoint
microservice_endpoint
Running microservice endpoint
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
method
The microservice method to call
shape
The shape of the data to send
namespace
k8s namespace of running deployment
data
Numpy Array Payload to send
bin_data
Binary payload to send - will override data
str_data
String payload to send - will override data
ndatas
Multiple numpy arrays to send for aggregation
bin_data
Binary data payload
str_data
String data payload
names
Column names
Returns
-------
A prediction result
"""
k = self._gather_args(gateway=gateway, transport=transport, deployment_name=deployment_name,
payload_type=payload_type, oauth_key=oauth_key,
oauth_secret=oauth_secret, seldon_rest_endpoint=seldon_rest_endpoint,
seldon_grpc_endpoint=seldon_grpc_endpoint, gateway_endpoint=gateway_endpoint,
microservice_endpoint=microservice_endpoint, method=method, shape=shape,
namespace=namespace, datas=datas, ndatas=ndatas, names=names,
data=data, bin_data=bin_data, str_data=str_data)
self._validate_args(**k)
if k["transport"] == "rest":
if k["method"] == "predict" or k["method"] == "transform-input" or k["method"] == "transform-output" or k[
"method"] == "route":
return microservice_api_rest_seldon_message(**k)
elif k["method"] == "aggregate":
return microservice_api_rest_aggregate(**k)
else:
raise SeldonClientException("Unknown method " + k["method"])
elif k["transport"] == "grpc":
if k["method"] == "predict" or k["method"] == "transform-input" or k["method"] == "transform-output" or k[
"method"] == "route":
return microservice_api_grpc_seldon_message(**k)
elif k["method"] == "aggregate":
return microservice_api_grpc_aggregate(**k)
else:
raise SeldonClientException("Unknown method " + k["method"])
else:
raise SeldonClientException("Unknown transport " + k["transport"])
def microservice_feedback(self, prediction_request: prediction_pb2.SeldonMessage = None,
prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0,
gateway: str = None, transport: str = None, deployment_name: str = None,
payload_type: str = None, oauth_key: str = None, oauth_secret: str = None,
seldon_rest_endpoint: str = None,
seldon_grpc_endpoint: str = None,
gateway_endpoint: str = None,
microservice_endpoint: str = None,
method: str = None, shape: Tuple = (1, 1), namespace: str = None) -> SeldonClientFeedback:
"""
Parameters
----------
prediction_request
Previous prediction request
prediction_response
Previous prediction response
reward
A reward to send in feedback
gateway
API Gateway - either Gateway or seldon
transport
API transport - grpc or rest
deployment_name
name of seldon deployment
payload_type
payload - tensor, ndarray or tftensor
oauth_key
OAUTH key (if using seldon api server)
oauth_secret
OAUTH secret (if using seldon api server)
seldon_rest_endpoint
REST endpoint to seldon api server
seldon_grpc_endpoint
gRPC endpoint to seldon api server
gateway_endpoint
Gateway endpoint
microservice_endpoint
Running microservice endpoint
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
method
The microservice method to call
shape
The shape of the data to send
namespace
k8s namespace of running deployment
Returns
-------
A client response
"""
k = self._gather_args(gateway=gateway, transport=transport, deployment_name=deployment_name,
payload_type=payload_type, oauth_key=oauth_key, oauth_secret=oauth_secret,
seldon_rest_endpoint=seldon_rest_endpoint
, seldon_grpc_endpoint=seldon_grpc_endpoint, gateway_endpoint=gateway_endpoint,
microservice_endpoint=microservice_endpoint, method=method, shape=shape,
namespace=namespace)
self._validate_args(**k)
if k["transport"] == "rest":
return microservice_api_rest_feedback(prediction_request, prediction_response, reward, **k)
else:
return microservice_api_grpc_feedback(prediction_request, prediction_response, reward, **k)
def microservice_api_rest_seldon_message(method: str = "predict", microservice_endpoint: str = "localhost:5000",
shape: Tuple = (1, 1),
data: object = None, payload_type: str = "tensor",
bin_data: Union[bytes, bytearray] = None, str_data: str = None,
names: Iterable[str] = None,
**kwargs) -> SeldonClientPrediction:
"""
Call Seldon microservice REST API
Parameters
----------
method
The microservice method to call
microservice_endpoint
Running microservice endpoint
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
method
The microservice method to call
shape
The shape of the data to send
namespace
k8s namespace of running deployment
shape
Shape of the data to send
data
Numpy array data to send
payload_type
payload - tensor, ndarray or tftensor
bin_data
Binary data payload
str_data
String data payload
names
Column names
kwargs
Returns
-------
A SeldonClientPrediction data response
"""
if bin_data is not None:
request = prediction_pb2.SeldonMessage(binData=bin_data)
elif str_data is not None:
request = prediction_pb2.SeldonMessage(strData=str_data)
else:
if data is None:
data = np.random.rand(*shape)
datadef = array_to_grpc_datadef(payload_type, data, names=names)
request = prediction_pb2.SeldonMessage(data=datadef)
payload = seldon_message_to_json(request)
response_raw = requests.post(
"http://" + microservice_endpoint + "/" + method,
data={"json": json.dumps(payload)})
if response_raw.status_code == 200:
success = True
msg = ""
else:
success = False
msg = response_raw.reason
try:
response = json_to_seldon_message(response_raw.json())
return SeldonClientPrediction(request, response, success, msg)
except Exception as e:
return SeldonClientPrediction(request, None, success, str(e))
def microservice_api_rest_aggregate(microservice_endpoint: str = "localhost:5000",
shape: Tuple = (1, 1),
datas: List[np.ndarray] = None, ndatas: int = None, payload_type: str = "tensor",
names: Iterable[str] = None,
**kwargs) -> SeldonClientCombine:
"""
Call Seldon microservice REST API aggregate endpoint
Parameters
----------
microservice_endpoint
Running microservice endpoint
shape
The shape of the data to send
datas
List of Numpy array data to send
ndatas
Multiple numpy arrays to send for aggregation
payload_type
payload - tensor, ndarray or tftensor
names
Column names
kwargs
Returns
-------
A SeldonClientPrediction
"""
if datas is None:
datas = []
for n in range(ndatas):
data = np.random.rand(*shape)
datas.append(data)
msgs = []
for data in datas:
if isinstance(data, (bytes, bytearray)):
msgs.append(prediction_pb2.SeldonMessage(binData=data))
elif isinstance(data, str):
msgs.append(prediction_pb2.SeldonMessage(strData=data))
else:
datadef = array_to_grpc_datadef(payload_type, data, names)
msgs.append(prediction_pb2.SeldonMessage(data=datadef))
request = prediction_pb2.SeldonMessageList(seldonMessages=msgs)
payload = seldon_messages_to_json(request)
response_raw = requests.post(
"http://" + microservice_endpoint + "/aggregate",
data={"json": json.dumps(payload)})
if response_raw.status_code == 200:
success = True
msg = ""
else:
success = False
msg = response_raw.reason
try:
response = json_to_seldon_message(response_raw.json())
return SeldonClientCombine(request, response, success, msg)
except Exception as e:
return SeldonClientCombine(request, None, success, str(e))
def microservice_api_rest_feedback(prediction_request: prediction_pb2.SeldonMessage = None,
prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0,
microservice_endpoint: str = None, **kwargs) -> SeldonClientFeedback:
"""
Call Seldon microserice REST API to send feedback
Parameters
----------
prediction_request
Previous prediction request
prediction_response
Previous prediction response
reward
A reward to send in feedback
microservice_endpoint
Running microservice endpoint
kwargs
Returns
-------
A SeldonClientFeedback
"""
request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward)
payload = feedback_to_json(request)
response_raw = requests.post(
"http://" + microservice_endpoint + "/send-feedback",
data={"json": json.dumps(payload)})
if response_raw.status_code == 200:
success = True
msg = ""
else:
success = False
msg = response_raw.reason
try:
response = json_to_seldon_message(response_raw.json())
return SeldonClientFeedback(request, response, success, msg)
except Exception as e:
return SeldonClientFeedback(request, None, success, str(e))
def microservice_api_grpc_seldon_message(method: str = "predict", microservice_endpoint: str = "localhost:5000",
shape: Tuple = (1, 1),
data: object = None, payload_type: str = "tensor",
bin_data: Union[bytes, bytearray] = None, str_data: str = None,
grpc_max_send_message_length: int = 4 * 1024 * 1024,
grpc_max_receive_message_length: int = 4 * 1024 * 1024,
names: Iterable[str] = None,
**kwargs) -> SeldonClientPrediction:
"""
Call Seldon microservice gRPC API
Parameters
----------
method
Method to call
microservice_endpoint
Running microservice endpoint
shape
The shape of the data to send
data
Numpy array data to send
payload_type
payload - tensor, ndarray or tftensor
bin_data
Binary data to send
str_data
String data to send
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
names
column names
kwargs
Returns
-------
SeldonClientPrediction
"""
if bin_data is not None:
request = prediction_pb2.SeldonMessage(binData=bin_data)
elif str_data is not None:
request = prediction_pb2.SeldonMessage(strData=str_data)
else:
if data is None:
data = np.random.rand(*shape)
datadef = array_to_grpc_datadef(payload_type, data, names=names)
request = prediction_pb2.SeldonMessage(data=datadef)
channel = grpc.insecure_channel(microservice_endpoint, options=[
('grpc.max_send_message_length', grpc_max_send_message_length),
('grpc.max_receive_message_length', grpc_max_receive_message_length)])
try:
if method == "predict":
stub_model = prediction_pb2_grpc.ModelStub(channel)
response = stub_model.Predict(request=request)
elif method == "transform-input":
stub = prediction_pb2_grpc.GenericStub(channel)
response = stub.TransformInput(request=request)
elif method == "transform-output":
stub = prediction_pb2_grpc.GenericStub(channel)
response = stub.TransformOutput(request=request)
elif method == "route":
stub = prediction_pb2_grpc.GenericStub(channel)
response = stub.Route(request=request)
else:
raise SeldonClientException("Unknown method:" + method)
return SeldonClientPrediction(request, response, True, "")
except Exception as e:
return SeldonClientPrediction(request, None, False, str(e))
def microservice_api_grpc_aggregate(microservice_endpoint: str = "localhost:5000",
shape: Tuple = (1, 1),
datas: List[np.ndarray] = None, ndatas: int = None, payload_type: str = "tensor",
grpc_max_send_message_length: int = 4 * 1024 * 1024,
grpc_max_receive_message_length: int = 4 * 1024 * 1024,
names: Iterable[str] = None,
**kwargs) -> SeldonClientCombine:
"""
Call Seldon microservice gRPC API aggregate
Parameters
----------
microservice_endpoint
Microservice API endpoint
shape
Shape of the data to send
datas
List of Numpy array data to send
ndatas
Multiple numpy arrays to send for aggregation
payload_type
payload - tensor, ndarray or tftensor
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
names
Column names
kwargs
Returns
-------
SeldonClientCombine
"""
if datas is None:
datas = []
for n in range(ndatas):
data = np.random.rand(*shape)
datas.append(data)
msgs = []
for data in datas:
if isinstance(data, (bytes, bytearray)):
msgs.append(prediction_pb2.SeldonMessage(binData=data))
elif isinstance(data, str):
msgs.append(prediction_pb2.SeldonMessage(strData=data))
else:
datadef = array_to_grpc_datadef(payload_type, data, names=names)
msgs.append(prediction_pb2.SeldonMessage(data=datadef))
request = prediction_pb2.SeldonMessageList(seldonMessages=msgs)
try:
channel = grpc.insecure_channel(microservice_endpoint, options=[
('grpc.max_send_message_length', grpc_max_send_message_length),
('grpc.max_receive_message_length', grpc_max_receive_message_length)])
stub = prediction_pb2_grpc.GenericStub(channel)
response = stub.Aggregate(request=request)
return SeldonClientCombine(request, response, True, "")
except Exception as e:
return SeldonClientCombine(request, None, False, str(e))
def microservice_api_grpc_feedback(prediction_request: prediction_pb2.SeldonMessage = None,
prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0,
microservice_endpoint: str = None,
grpc_max_send_message_length: int = 4 * 1024 * 1024,
grpc_max_receive_message_length: int = 4 * 1024 * 1024,
**kwargs) -> SeldonClientFeedback:
"""
Call Seldon gRPC
Parameters
----------
prediction_request
Previous prediction request
prediction_response
Previous prediction response
reward
A reward to send in feedback
microservice_endpoint
Running microservice endpoint
kwargs
Returns
-------
"""
request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward)
try:
channel = grpc.insecure_channel(microservice_endpoint, options=[
('grpc.max_send_message_length', grpc_max_send_message_length),
('grpc.max_receive_message_length', grpc_max_receive_message_length)])
stub = prediction_pb2_grpc.GenericStub(channel)
response = stub.SendFeedback(request=request)
return SeldonClientFeedback(request, response, True, "")
except Exception as e:
return SeldonClientFeedback(request, None, False, str(e))
#
# External API
#
def get_token(oauth_key: str = "", oauth_secret: str = "", namespace: str = None,
endpoint: str = "localhost:8002") -> str:
"""
Get an OAUTH key from the Seldon Gateway
Parameters
----------
oauth_key
OAUTH key
oauth_secret
OAUTH secret
namespace
k8s namespace of running deployment
endpoint
The host:port of the endpoint for the OAUTH API server
Returns
-------
The OAUTH token
"""
payload = {'grant_type': 'client_credentials'}
if namespace is None:
key = oauth_key
else:
key = oauth_key + namespace
response = requests.post(
"http://" + endpoint + "/oauth/token",
auth=HTTPBasicAuth(key, oauth_secret),
data=payload)
if response.status_code == 200:
token = response.json()["access_token"]
return token
else:
print("Failed to get token:"+response.text)
raise SeldonClientException(response.text)
def rest_predict_seldon_oauth(oauth_key: str, oauth_secret: str, namespace: str = None,
seldon_rest_endpoint: str = "localhost:8002", shape: Tuple = (1, 1),
data: object = None, payload_type: str = "tensor",
bin_data: Union[bytes, bytearray] = None, str_data: str = None,
names: Iterable[str] = None,
**kwargs) -> SeldonClientPrediction:
"""
Call Seldon API Gateway using REST
Parameters
----------
oauth_key
OAUTH key
oauth_secret
OAUTH secret
namespace
k8s namespace of running deployment
seldon_rest_endpoint
Endpoint of REST endpoint
shape
Shape of endpoint
data
Data to send
payload_type
payload - tensor, ndarray or tftensor
bin_data
Binary data to send
str_data
String data to send
names
column names
kwargs
Returns
-------
Seldon Client Prediction
"""
token = get_token(oauth_key, oauth_secret, namespace, seldon_rest_endpoint)
if bin_data is not None:
request = prediction_pb2.SeldonMessage(binData=bin_data)
elif str_data is not None:
request = prediction_pb2.SeldonMessage(strData=str_data)
else:
if data is None:
data = np.random.rand(*shape)
datadef = array_to_grpc_datadef(payload_type, data, names=names)
request = prediction_pb2.SeldonMessage(data=datadef)
headers = {'Authorization': 'Bearer ' + token}
payload = seldon_message_to_json(request)
response_raw = requests.post(
"http://" + seldon_rest_endpoint + "/api/v0.1/predictions",
headers=headers,
json=payload)
if response_raw.status_code == 200:
success = True
msg = ""
else:
success = False
msg = str(response_raw.status_code) + ":" + response_raw.reason
try:
if len(response_raw.text) > 0:
try:
response = json_to_seldon_message(response_raw.json())
except:
response = None
else:
response = None
return SeldonClientPrediction(request, response, success, msg)
except Exception as e:
return SeldonClientPrediction(request, None, False, str(e))
def grpc_predict_seldon_oauth(oauth_key: str, oauth_secret: str, namespace: str = None,
seldon_rest_endpoint: str = "localhost:8002",
seldon_grpc_endpoint: str = "localhost:8004", shape: Tuple[int, int] = (1, 1),
data: np.ndarray = None, payload_type: str = "tensor",
bin_data: Union[bytes, bytearray] = None, str_data: str = None,
grpc_max_send_message_length: int = 4 * 1024 * 1024,
grpc_max_receive_message_length: int = 4 * 1024 * 1024,
names: Iterable[str] = None,
**kwargs) -> SeldonClientPrediction:
"""
Call Seldon gRPC API Gateway endpoint
Parameters
----------
oauth_key
OAUTH key
oauth_secret
OAUTH secret
namespace
k8s namespace of running deployment
seldon_rest_endpoint
Endpoint of REST endpoint
shape
Shape of endpoint
data
Data to send
payload_type
payload - tensor, ndarray or tftensor
bin_data
Binary data to send
str_data
String data to send
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
names
Column names
kwargs
Returns
-------
A SeldonMessage proto
"""
token = get_token(oauth_key, oauth_secret, namespace, seldon_rest_endpoint)
if bin_data is not None:
request = prediction_pb2.SeldonMessage(binData=bin_data)
elif str_data is not None:
request = prediction_pb2.SeldonMessage(strData=str_data)
else:
if data is None:
data = np.random.rand(*shape)
datadef = array_to_grpc_datadef(payload_type, data, names=names)
request = prediction_pb2.SeldonMessage(data=datadef)
channel = grpc.insecure_channel(seldon_grpc_endpoint, options=[
('grpc.max_send_message_length', grpc_max_send_message_length),
('grpc.max_receive_message_length', grpc_max_receive_message_length)])
stub = prediction_pb2_grpc.SeldonStub(channel)
metadata = [('oauth_token', token)]
try:
response = stub.Predict(request=request, metadata=metadata)
return SeldonClientPrediction(request, response, True, "")
except Exception as e:
return SeldonClientPrediction(request, None, False, str(e))
def rest_predict_gateway(deployment_name: str, namespace: str = None, gateway_endpoint: str = "localhost:8003",
shape: Tuple[int, int] = (1, 1),
data: np.ndarray = None, headers: Dict = None, gateway_prefix: str = None,
payload_type: str = "tensor",
bin_data: Union[bytes, bytearray] = None, str_data: str = None,
names: Iterable[str] = None,
**kwargs) -> SeldonClientPrediction:
"""
REST request to Gateway Ingress
Parameters
----------
deployment_name
The name of the Seldon Deployment
namespace
k8s namespace of running deployment
gateway_endpoint
The host:port of gateway
shape
The shape of the data to send
data
The numpy data to send
headers
Headers to add to request
gateway_prefix
The prefix path to add to the request
payload_type
payload - tensor, ndarray or tftensor
bin_data
Binary data to send
str_data
String data to send
names
Column names
Returns
-------
A requests Response object
"""
if bin_data is not None:
request = prediction_pb2.SeldonMessage(binData=bin_data)
elif str_data is not None:
request = prediction_pb2.SeldonMessage(strData=str_data)
else:
if data is None:
data = np.random.rand(*shape)
datadef = array_to_grpc_datadef(payload_type, data, names=names)
request = prediction_pb2.SeldonMessage(data=datadef)
payload = seldon_message_to_json(request)
if gateway_prefix is None:
if namespace is None:
response_raw = requests.post(
"http://" + gateway_endpoint + "/seldon/" + deployment_name + "/api/v0.1/predictions",
json=payload,
headers=headers)
else:
response_raw = requests.post(
"http://" + gateway_endpoint + "/seldon/" + namespace + "/" + deployment_name + "/api/v0.1/predictions",
json=payload,
headers=headers)
else:
response_raw = requests.post(
"http://" + gateway_endpoint + gateway_prefix + "/api/v0.1/predictions",
json=payload,
headers=headers)
if response_raw.status_code == 200:
success = True
msg = ""
else:
success = False
msg = str(response_raw.status_code) + ":" + response_raw.reason
try:
if len(response_raw.text) > 0:
try:
response = json_to_seldon_message(response_raw.json())
except:
response = None
else:
response = None
return SeldonClientPrediction(request, response, success, msg)
except Exception as e:
return SeldonClientPrediction(request, None, False, str(e))
def rest_predict_gateway_basicauth(deployment_name: str, username: str, password: str, namespace: str = None,
gateway_endpoint: str = "localhost:8003",
shape: Tuple[int, int] = (1, 1), data: np.ndarray = None,
payload_type: str = "tensor",
bin_data: Union[bytes, bytearray] = None, str_data: str = None,
names: Iterable[str] = None,
**kwargs) -> SeldonClientPrediction:
"""
REST request with Basic Auth to Gateway Ingress
Parameters
----------
deployment_name
The name of the running deployment
username
Username for basic auth
password
<PASSWORD>
namespace
The namespace of the running deployment
gateway_endpoint
The host:port of gateway
shape
The shape of data
data
The numpy data to send
payload_type
payload - tensor, ndarray or tftensor
bin_data
Binary data to send
str_data
names
Column names
Returns
-------
"""
if bin_data is not None:
request = prediction_pb2.SeldonMessage(binData=bin_data)
elif str_data is not None:
request = prediction_pb2.SeldonMessage(strData=str_data)
else:
if data is None:
data = np.random.rand(*shape)
datadef = array_to_grpc_datadef(payload_type, data, names)
request = prediction_pb2.SeldonMessage(data=datadef)
payload = seldon_message_to_json(request)
if namespace is None:
response_raw = requests.post(
"http://" + gateway_endpoint + "/seldon/" + deployment_name + "/api/v0.1/predictions",
json=payload,
auth=HTTPBasicAuth(username, password))
else:
response_raw = requests.post(
"http://" + gateway_endpoint + "/seldon/" + namespace + "/" + deployment_name + "/api/v0.1/predictions",
json=payload,
auth=HTTPBasicAuth(username, password))
if response_raw.status_code == 200:
success = True
msg = ""
else:
success = False
msg = str(response_raw.status_code) + ":" + response_raw.reason
try:
if len(response_raw.text) > 0:
try:
response = json_to_seldon_message(response_raw.json())
except:
response = None
else:
response = None
return SeldonClientPrediction(request, response, success, msg)
except Exception as e:
return SeldonClientPrediction(request, None, False, str(e))
def grpc_predict_gateway(deployment_name: str, namespace: str = None, gateway_endpoint: str = "localhost:8003",
shape: Tuple[int, int] = (1, 1),
data: np.ndarray = None,
headers: Dict = None, payload_type: str = "tensor",
bin_data: Union[bytes, bytearray] = None, str_data: str = None,
grpc_max_send_message_length: int = 4 * 1024 * 1024,
grpc_max_receive_message_length: int = 4 * 1024 * 1024,
names: Iterable[str] = None,
**kwargs) -> SeldonClientPrediction:
"""
gRPC request to Gateway Ingress
Parameters
----------
deployment_name
Deployment name of Seldon Deployment
namespace
The namespace the Seldon Deployment is running in
gateway_endpoint
The endpoint for gateway
shape
The shape of the data
data
The numpy array data to send
headers
A Dict of key value pairs to add to gRPC HTTP Headers
payload_type
payload - tensor, ndarray or tftensor
bin_data
Binary data to send
str_data
String data to send
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
names
Column names
Returns
-------
A SeldonMessage proto response
"""
if bin_data is not None:
request = prediction_pb2.SeldonMessage(binData=bin_data)
elif str_data is not None:
request = prediction_pb2.SeldonMessage(strData=str_data)
else:
if data is None:
data = np.random.rand(*shape)
datadef = array_to_grpc_datadef(payload_type, data, names=names)
request = prediction_pb2.SeldonMessage(data=datadef)
channel = grpc.insecure_channel(gateway_endpoint, options=[
('grpc.max_send_message_length', grpc_max_send_message_length),
('grpc.max_receive_message_length', grpc_max_receive_message_length)])
stub = prediction_pb2_grpc.SeldonStub(channel)
if namespace is None:
metadata = [('seldon', deployment_name)]
else:
metadata = [('seldon', deployment_name), ('namespace', namespace)]
if not headers is None:
for k in headers:
metadata.append((k, headers[k]))
try:
response = stub.Predict(request=request, metadata=metadata)
return SeldonClientPrediction(request, response, True, "")
except Exception as e:
return SeldonClientPrediction(request, None, False, str(e))
def rest_feedback_seldon_oauth(prediction_request: prediction_pb2.SeldonMessage = None,
prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0,
oauth_key: str = "", oauth_secret: str = "", namespace: str = None,
seldon_rest_endpoint: str = "localhost:8002", **kwargs) -> SeldonClientFeedback:
"""
Send Feedback to Seldon API Gateway using REST
Parameters
----------
prediction_request
Previous prediction request
prediction_response
Previous prediction response
reward
A reward to send in feedback
oauth_key
OAUTH key
oauth_secret
OAUTH secret
namespace
k8s namespace of running deployment
seldon_rest_endpoint
Endpoint of REST endpoint
kwargs
Returns
-------
"""
token = get_token(oauth_key, oauth_secret, namespace, seldon_rest_endpoint)
headers = {'Authorization': 'Bearer ' + token}
request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward)
payload = feedback_to_json(request)
response_raw = requests.post(
"http://" + seldon_rest_endpoint + "/api/v0.1/feedback",
headers=headers,
json=payload)
if response_raw.status_code == 200:
success = True
msg = ""
else:
success = False
msg = str(response_raw.status_code) + ":" + response_raw.reason
try:
if len(response_raw.text) > 0:
try:
response = json_to_seldon_message(response_raw.json())
except:
response = None
else:
response = None
return SeldonClientFeedback(request, response, success, msg)
except Exception as e:
return SeldonClientFeedback(request, None, False, str(e))
def grpc_feedback_seldon_oauth(prediction_request: prediction_pb2.SeldonMessage = None,
prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0,
oauth_key: str = "", oauth_secret: str = "", namespace: str = None,
seldon_rest_endpoint: str = "localhost:8002",
seldon_grpc_endpoint: str = "localhost:8004",
grpc_max_send_message_length: int = 4 * 1024 * 1024,
grpc_max_receive_message_length: int = 4 * 1024 * 1024,
**kwargs) -> SeldonClientFeedback:
"""
Send feedback to Seldon API gateway via gRPC
Parameters
----------
prediction_request
Previous prediction request
prediction_response
Previous prediction response
reward
A reward to send in feedback
oauth_key
OAUTH key
oauth_secret
OAUTH secret
namespace
k8s namespace of running deployment
seldon_rest_endpoint
Endpoint of REST endpoint
seldon_grpc_endpoint
Endpoint for Seldon grpc
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
kwargs
Returns
-------
"""
token = get_token(oauth_key, oauth_secret, namespace, seldon_rest_endpoint)
request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward)
channel = grpc.insecure_channel(seldon_grpc_endpoint, options=[
('grpc.max_send_message_length', grpc_max_send_message_length),
('grpc.max_receive_message_length', grpc_max_receive_message_length)])
stub = prediction_pb2_grpc.SeldonStub(channel)
metadata = [('oauth_token', token)]
try:
response = stub.SendFeedback(request=request, metadata=metadata)
return SeldonClientFeedback(request, response, True, "")
except Exception as e:
return SeldonClientFeedback(request, None, False, str(e))
def rest_feedback_gateway(prediction_request: prediction_pb2.SeldonMessage = None,
prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0,
deployment_name: str = "", namespace: str = None,
gateway_endpoint: str = "localhost:8003", headers: Dict = None, gateway_prefix: str = None,
**kwargs) -> SeldonClientFeedback:
"""
Send Feedback to Seldon via gateway using REST
Parameters
----------
prediction_request
Previous prediction request
prediction_response
Previous prediction response
reward
A reward to send in feedback
deployment_name
The name of the running Seldon deployment
namespace
k8s namespace of running deployment
gateway_endpoint
The gateway host:port endpoint
headers
Headers to add to the request
gateway_prefix
The prefix to add to the request path for gateway
kwargs
Returns
-------
A Seldon Feedback Response
"""
request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward)
payload = feedback_to_json(request)
if gateway_prefix is None:
if namespace is None:
response_raw = requests.post(
"http://" + gateway_endpoint + "/seldon/" + deployment_name + "/api/v0.1/feedback",
json=payload,
headers=headers)
else:
response_raw = requests.post(
"http://" + gateway_endpoint + "/seldon/" + namespace + "/" + deployment_name + "/api/v0.1/feedback",
json=payload,
headers=headers)
else:
response_raw = requests.post(
"http://" + gateway_endpoint + gateway_prefix + "/api/v0.1/feedback",
json=payload,
headers=headers)
if response_raw.status_code == 200:
success = True
msg = ""
else:
success = False
msg = str(response_raw.status_code) + ":" + response_raw.reason
try:
if len(response_raw.text) > 0:
try:
response = json_to_seldon_message(response_raw.json())
except:
response = None
else:
response = None
return SeldonClientFeedback(request, response, success, msg)
except Exception as e:
return SeldonClientFeedback(request, None, False, str(e))
def grpc_feedback_gateway(prediction_request: prediction_pb2.SeldonMessage = None,
prediction_response: prediction_pb2.SeldonMessage = None, reward: float = 0,
deployment_name: str = "", namespace: str = None,
gateway_endpoint: str = "localhost:8003",
headers: Dict = None,
grpc_max_send_message_length: int = 4 * 1024 * 1024,
grpc_max_receive_message_length: int = 4 * 1024 * 1024,
**kwargs) -> SeldonClientFeedback:
"""
Parameters
----------
prediction_request
Previous prediction request
prediction_response
Previous prediction response
reward
A reward to send in feedback
deployment_name
The name of the running Seldon deployment
namespace
k8s namespace of running deployment
gateway_endpoint
The gateway host:port endpoint
headers
Headers to add to the request
grpc_max_send_message_length
Max grpc send message size in bytes
grpc_max_receive_message_length
Max grpc receive message size in bytes
kwargs
Returns
-------
"""
request = prediction_pb2.Feedback(request=prediction_request, response=prediction_response, reward=reward)
channel = grpc.insecure_channel(gateway_endpoint, options=[
('grpc.max_send_message_length', grpc_max_send_message_length),
('grpc.max_receive_message_length', grpc_max_receive_message_length)])
stub = prediction_pb2_grpc.SeldonStub(channel)
if namespace is None:
metadata = [('seldon', deployment_name)]
else:
metadata = [('seldon', deployment_name), ('namespace', namespace)]
if not headers is None:
for k in headers:
metadata.append((k, headers[k]))
try:
response = stub.SendFeedback(request=request, metadata=metadata)
return SeldonClientFeedback(request, response, True, "")
except Exception as e:
return SeldonClientFeedback(request, None, False, str(e))
```
#### File: testing/scripts/conftest.py
```python
import pytest
from k8s_utils import *
from s2i_utils import *
from java_utils import *
from go_utils import *
@pytest.fixture(scope="module")
def clusterwide_seldon_helm(request):
version = get_seldon_version()
create_seldon_clusterwide_helm(request,version)
port_forward(request)
@pytest.fixture(scope="module")
def setup_python_s2i(request):
build_python_s2i_images()
@pytest.fixture(scope="module")
def s2i_python_version():
return get_s2i_python_version()
@pytest.fixture(scope="session")
def seldon_images(request):
create_docker_repo(request)
port_forward_docker_repo(request)
build_java_images()
version = get_seldon_version()
build_go_images(version)
@pytest.fixture(scope="session")
def seldon_version():
return get_seldon_version()
``` |
{
"source": "jklann/jgk-i2b2tools",
"score": 3
} |
#### File: jgk-i2b2tools/ontology_tools/ontology_gen_recursive.py
```python
import numpy as np
import pandas as pd
path_in = '/Users/jeffklann/Dropbox (Partners HealthCare)/HMS/Projects/CONCERN/flowsheet_ont_v2.csv'
path_out = '/Users/jeffklann/Dropbox (Partners HealthCare)/HMS/Projects/CONCERN/flowsheet_ont_i2b2.csv'
def doRecurse(df, lvl, parent_codes):
print(str(lvl)+"...")
dflvl=df.loc[df.Parent_Code.isin(parent_codes)] # Get rows with given parent
n = len(dflvl) # Get num rows with given parent
if(n>0):
# Recursive case
# -1) Cycle check!
dfcyc = dflvl[dflvl['h_level'].notnull()]
if len(dfcyc)>0:
print('Cycle?')
print(dfcyc['Parent_Code'].drop_duplicates())
# 0) Enumerate all current level codes (for later)
codes = [str(x) for x in dflvl.loc[df.has_children=='Y'].Code.drop_duplicates().values.tolist()]
# 2) Set h_level at all the rows selected by dflvl
df.loc[dflvl.index, 'h_level'] = lvl
# 1) Set this level's fullname and tooltip
df.loc[dflvl.index,'fullname']=df.loc[dflvl.index,'fullname'].str.cat(df.loc[dflvl.index,'Code'],sep='\\',na_rep='')
df.loc[dflvl.index,'tooltip']=df.loc[dflvl.index,'tooltip'].str.cat(df.loc[dflvl.index,'Label'],sep='\\',na_rep='')
# 3) Recreate the dataframe, propagating tooltip and the fullname and tooltip down one level
dff = df.loc[dflvl.index].merge(df,left_on='Code',right_on='Parent_Code',how='right',suffixes=['_x',''],copy=True) # Left is current, right is child
# The fullname for children is completed next round, this just propagates down the parent if it exists
dff.loc[:,'fullname']=dff['fullname'].str.cat(dff['fullname_x'])#,na_rep='')
dff.loc[:,'path'] = dff['fullname_x'] # Path becomes parent fullname for next level
dff.loc[:,'tooltip'] = dff['tooltip'].str.cat(dff['tooltip_x'], na_rep='')
dfr = dff[dff.columns[-len(df.columns):]].copy()
# ^ Note we need to do a copy or the trying to modify the df in the next iteration fails
# 4) Recurse -- df_recurse is all nodes after recursion
df_recurse = doRecurse(dfr,lvl+1,codes)
return df_recurse
else:
# Base case: no nodes, return unchanged df
return df
""" Input a df with columns (minimally): Label, Code, Parent_Code, has_children
Will add additional columns: tooltip, h_level, fullname
has_children is needed for cases where multiple nodes exist with the same code...
"""
def OntRecurse(df):
# Secret sauce, build a row count number
#df['rn'] = df.sort_values(['Label']).groupby('Label').cumcount() + 1
#df['Code_Instance'] = df['Code'].str.cat(df['rn'].map(str),sep='-')
df['fullname']=''
df['tooltip']=''
df['path']=''
df['h_level']=np.nan
df=doRecurse(df,1,['-1'])
df['fullname']=df['fullname'].map(str)+"\\"
return df
""" Input a df with (minimally): Label, Code, Parent_Code, tooltip, h_level, fullname, Type
Outputs an i2b2 ontology compatible df.
"""
def OntBuild(df):
odf = pd.DataFrame()
odf['c_hlevel']=df['h_level']
odf['c_fullname']=df['fullname']
odf['c_visualattributes']=df['has_children'].apply(lambda x: 'FAE' if x=='Y' else 'LAE')
odf['c_name']=df['Label']
odf['c_path']=df['path']
odf['c_symbol']=df['Code']
odf['c_basecode']=df['Type'].str.cat(df['Code'],sep=':')
odf['c_synonym_cd']='N'
odf['c_facttablecolumn']='concept_cd'
odf['c_tablename']='concept_dimension'
odf['c_columnname']='concept_path'
odf['c_columndatatype']='T'
odf['c_operator']='LIKE'
odf['c_dimcode']=df['fullname']
odf['c_comment']=df['Type']
odf['c_tooltip']=df['tooltip']
odf['m_applied_path']='@'
return odf
# Load the ontology and build root node
df = pd.read_csv(path_in,delimiter=',',dtype={'Code':str,'Parent_Code':str})
df=df.drop('Full_Label',axis=1)
df=df.drop_duplicates(subset=['Label','Code','Type','Parent_Code']) # Brittany put in a lot of dups
df=df.dropna(axis=1,how='all')
# Create a root node with code -1
df.Parent_Code=df.Parent_Code.fillna('0')
df=df.append(pd.Series({'Full_Label':'CONCERN','Label':'CONCERN','Code':'0','Type':'root','Parent_Code':'-1'},name='root'))
# Add the has_children column
df['has_children']='Y'
df.loc[df['Type']=='row','has_children']='N'
# Build ontology
odf = OntRecurse(df)
odf = OntBuild(odf)
odf.to_csv(path_out,float_format='%.0f')
#df.loc[:,['Label','Code','Parent_Code','rn']].to_csv(path_out)
``` |
{
"source": "jklann/totalnum_tools",
"score": 3
} |
#### File: jklann/totalnum_tools/totalnum_builddb_v2.py
```python
import datetime as dt
import sqlite3
from os import listdir
import numpy as np
import pandas as pd
import math
"""
ISSUES 12-15
NCATS_DEMOGRAPHICS and visit details - not even there
X Diagnoses ok
ACT Labs doesn't show up after "full list"
ACT Laboratory Tests no show at all
ACT Meds can't drill into
X Procedures ok
X COVID-19 broken
Visit details not there
"""
"""
New version loads totalnum reports into a SQLite3 db from basedir (below) with the name format report_[siteid]_[foo].csv.
Columns must be (in order) c_fullname, agg_date, agg_count. (Case insensitive on column names however.)
Date format for agg_date (as enforced by the totalnum report script), should be YYYY-MM-DD, but the python parser can handle others.
Bigfullnamefile must be a file with all possible paths (e.g., from the concept dimension) with columns: c_fullname, c_name.
hlevel and "domain" are inferred.
SQLite db uses a totalnum_int column in the totalnums table and puts this for reference in bigfullname.
By <NAME>, PhD 05-2020
"""
""" Here's how I get the ontology data for the master list:
select distinct concept_path, name_char from concept_dimension
select distinct c_fullname, c_name, c_visualattributes, c_tooltip from act_covid
and c_visualattributes not like '%H%' and c_synonym_cd!='Y'
(only the first two columns are needed)
To do this for the whole ACT ontology, use my act_master_vw (separate script) and:
select distinct c_fullname, c_name, c_hlevel, c_visualattributes, c_tooltip from act_master_vw
where c_visualattributes not like '%H%' and c_synonym_cd!='Y'
"""
# Thanks https://stackoverflow.com/questions/2298339/standard-deviation-for-sqlite
class StdevFunc:
def __init__(self):
self.M = 0.0
self.S = 0.0
self.k = 1
def step(self, value):
if value is None:
return
tM = self.M
self.M += (value - tM) / self.k
self.S += (value - tM) * (value - self.M)
self.k += 1
def finalize(self):
if self.k < 3:
return None
return math.sqrt(self.S / (self.k-2))
basedir = "/Users/jeffklann/HMS/Projects/ACT/totalnum_data/reports"
bigfullnamefile = '/Users/jeffklann/HMS/Projects/ACT/totalnum_data/ACT_paths_full.csv' # ACT_covid_paths_v3.csv
conn = sqlite3.connect(basedir + '/totalnums.db')
conn.create_aggregate("stdev", 1, StdevFunc)
""" SQL code that creates views and additional tables on the totalnum db for analytics
"""
def postProcess():
sql = r"""
-- Create a pre-joined view for faster coding
drop view if exists totalnums_recent_joined;
create view totalnums_recent_joined as
select c_hlevel,domain,c_visualattributes,f.fullname_int,c_fullname,c_name,agg_date,agg_count,site from
bigfullname f left join totalnums_recent t on f.fullname_int=t.fullname_int;
-- Create a view with old column names
drop view if exists totalnums_oldcols;
create view totalnums_oldcols as
SELECT fullname_int, agg_date AS refresh_date, agg_count AS c, site
FROM totalnums;
drop view if exists totalnums_recent;
-- Set up view for most recent totalnums
create view totalnums_recent as
select t.* from totalnums t inner join
(select fullname_int, site, max(agg_date) agg_date from totalnums group by fullname_int, site) x
on x.fullname_int=t.fullname_int and x.site=t.site and x.agg_date=t.agg_date;
-- Get denominator: any pt in COVID ontology (commented out is any lab test which works better if the site has lab tests)
drop view if exists anal_denom;
create view anal_denom as
select site, agg_count denominator from totalnums_recent where fullname_int in
(select fullname_int from bigfullname where c_fullname='\ACT\UMLS_C0031437\SNOMED_3947185011\');--UMLS_C0022885\')
-- View total / denominator = pct
drop view if exists totalnums_recent_pct;
create view totalnums_recent_pct as
select fullname_int, agg_date, cast(cast(agg_count as float) / denominator * 100 as int) pct, tot.site from totalnums_recent tot inner join anal_denom d on tot.site=d.site;
-- Site outliers: compute avg and stdev.
-- I materialize this (rather than a view) because SQLite doesn't have a stdev function.
drop table if exists outliers_sites;
create table outliers_sites as
select agg_count-stdev-average,* from totalnums_recent r inner join
(select * from
(select fullname_int,avg(agg_count) average, stdev(agg_count) stdev, count(*) num_sites from totalnums_recent r where agg_count>-1 group by fullname_int)
where num_sites>1) stat on stat.fullname_int=r.fullname_int;
-- Site outliers: compute avg and stdev.
-- I materialize this (rather than a view) because SQLite doesn't have a stdev function.
drop table if exists outliers_sites_pct;
create table outliers_sites_pct as
select pct-stdev-average,* from totalnums_recent_pct r inner join
(select * from
(select fullname_int,avg(pct) average, stdev(pct) stdev, count(*) num_sites from totalnums_recent_pct r where pct>=0 group by fullname_int)
where num_sites>1) stat on stat.fullname_int=r.fullname_int;
-- Add some fullnames for summary measures and reporting
drop table if exists toplevel_fullnames;
create table toplevel_fullnames as
select fullname_int from bigfullname where c_fullname like '\ACT\Diagnosis\ICD10\%' and c_hlevel=2 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Diagnosis\ICD9\V2_2018AA\A18090800\%' and c_hlevel=2 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Procedures\CPT4\V2_2018AA\A23576389\%' and c_hlevel=2 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Procedures\HCPCS\V2_2018AA\A13475665\%' and c_hlevel=2 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Procedures\ICD10\V2_2018AA\A16077350\%' and c_hlevel=2 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Lab\LOINC\V2_2018AA\%' and c_hlevel=7 and c_visualattributes not like 'L%'
union all
select fullname_int from bigfullname where c_fullname like '\ACT\Medications\MedicationsByVaClass\V2_09302018\%' and c_hlevel=5 and c_visualattributes not like 'L%';
create index toplevel_fullnames_f on toplevel_fullnames(fullname_int);
"""
cur = conn.cursor()
cur.executescript(sql)
cur.close()
def buildDb():
# Build the main totalnums db
files = [f for f in listdir(basedir) if ".csv" in f[-4:]]
totals = []
# Load the files
for f in files:
print(basedir + '/' + f)
tot = totalnum_load(basedir + '/' + f)
totals.append(tot)
# 11-20 - support both utf-8 and cp1252
print(bigfullnamefile)
bigfullname = None
try:
bigfullname = pd.read_csv(bigfullnamefile,index_col='c_fullname',delimiter=',',dtype='str')
except UnicodeDecodeError:
bigfullname = pd.read_csv(bigfullnamefile,index_col='c_fullname',delimiter=',',dtype='str',encoding='cp1252')
# Add c_hlevel, domain, and fullname_int columns
if "c_hlevel" not in bigfullname.columns: bigfullname.insert(1, "c_hlevel", [x.count("\\") for x in bigfullname.index])
bigfullname.insert(1, "domain", [x.split('\\')[2] if "PCORI_MOD" not in x else "MODIFIER" for x in bigfullname.index])
bigfullname['fullname_int']=range(0,len(bigfullname))
bigfullname.to_sql('bigfullname',conn,if_exists='replace')
print("Converting path to int...")
# Shrink the frame (remove c_name and fullname and hlevel and domain and add just the fullname_int)
#outdf = delish.join(bigfullname,rsuffix='_bf',how='inner').reset_index()[['fullname_int','refresh_date','site','c']]
outdf = pd.DataFrame()
for t in totals:
outdf=outdf.append(t)
outdf=outdf.join(bigfullname,on='c_fullname',rsuffix='_bf',how='inner').reset_index()[['fullname_int','agg_date','agg_count','site']]
print("Writing totalnum SQL...")
# Temp step - use old style column names for compatibility
#outdf=outdf.rename(columns={'agg_date':'refresh_date','agg_count':'c'})
outdf.to_sql("totalnums",conn,if_exists='replace', index=False)
# Add indexes
print("Indexing...")
cur = conn.cursor()
cur.execute("CREATE INDEX bfn_0 on bigfullname(c_hlevel)")
cur.execute("CREATE INDEX bfn_int on bigfullname(fullname_int)")
cur.execute("CREATE INDEX tot_int on totalnums(fullname_int)")
print("Done!")
def totalnum_load(fname="",df=None):
if not df:
# Support both utf-8 and cp1252
try:
df = pd.read_csv(fname,index_col=0)
except UnicodeDecodeError:
df = pd.read_csv(fname, index_col=0,encoding='cp1252')
# Remove null rows
#df = df.loc[(df.ix[:,3:]!=0).any(axis=1)]
# Lowercase totalnum columns
df = df.reset_index().rename(columns=lambda x: x.lower())
# Reorder columns
df = df[['c_fullname','agg_date','agg_count']]
# Convert totalnums to floats
df = pd.concat([df.iloc[:,0:2],(df.iloc[:,2:].apply(pd.to_numeric,errors="coerce"))],axis=1)
# And convert date string to datetime
df = pd.concat([df.iloc[:, 0:1], pd.to_datetime(df['agg_date']),df.iloc[:,2]], axis=1)
# Get site id out of report_siteid_blah.csv
rfn = fname[::-1]
fname_only = rfn[0:rfn.index('/')][::-1]
fns = fname_only[fname_only.index('_')+1:]
fns = fns[0:fns.index('_')]
df['site']=fns
return df
if __name__=='__main__':
print("SQLite Version is:", sqlite3.sqlite_version)
buildDb()
postProcess()
None
``` |
{
"source": "jklarson/volttron-applications",
"score": 3
} |
#### File: MpcAgent/mpc/MPC.py
```python
from . import python_building
from . import python_control
from . import CBC_Gui
import time
# Scale the clock
def scale_time(seconds):
# 1 second real time = 1 hours simulated time
# return 30.0*seconds/3600.0
# Run in real time
return seconds
# This is a cludge. It should return the same value
# as the control period from the control object.
PERIOD = scale_time(10*60)
class MPC:
def __init__(self):
# Setup the actuator and control modules
self.bldg = python_building.Building()
self.cntrl = python_control.Control(self.bldg.get_num_zones())
self.cntrl.set_max_units(self.bldg.get_num_zones()/2)
self.gui = None
def make_gui(self):
self.gui = CBC_Gui.CBC_Gui(self.bldg)
return self.gui
def set_outdoor_temp(self,degF):
self.bldg.set_outdoor_temp(degF)
def get_outdoor_temp(self):
return self.bldg.get_outdoor_temp()
def run_control(self,simHrs):
self.bldg.advance(simHrs)
for zone in range(0,self.bldg.get_num_zones()):
self.cntrl.set_upper_limit(zone,self.bldg.get_high_temp_limit(zone))
self.cntrl.set_lower_limit(zone,self.bldg.get_low_temp_limit(zone))
self.cntrl.set_zone_temp(zone,self.bldg.get_indoor_temp(zone))
self.cntrl.set_outside_temp(self.bldg.get_outdoor_temp())
self.cntrl.run_control()
for zone in range(0,self.bldg.get_num_zones()):
self.bldg.set_hvac_mode(zone,self.cntrl.get_hvac_command(zone))
def cleanup(self):
self.gui.exit()
self.bldg.cleanup()
self.cntrl.cleanup()
```
#### File: MpcAgent/src/cbc_archiver.py
```python
import os
from string import *
import time
from pytz import timezone
from smap import driver, util
# SMAP heading
smapHeading = "ORNL/cbc"
# Data will be scraped from whichever of these files has the
# most recent write
fileA = "scanA.csv"
fileB = "scanB.csv"
fileHandle = None
# Structure to hold most recent data scraped for a thermostat
class Thermostat:
timestamp = None
temp = None
upper_temp_limit = None
lower_temp_limit = None
addr = None
mode = None
# Map from zone address to Thermostat object for that address
zoneInfo = dict()
# Get the most recently updated file, or return None
# if neither file exists
def select_most_recent_file():
mA = None
mB = None
try:
mA = os.path.getmtime(fileA)
except OSError:
pass
try:
mB = os.path.getmtime(fileB)
except OSError:
pass
if mA == None and mB == None:
return None
if mA == None and mB != None:
return fileB
if mA != None and mB == None:
return fileA
if mA > mB:
return fileA
return fileB
def scrape():
global fileHandle
count = 0
which = select_most_recent_file()
if which == None:
return
if fileHandle == None or fileHandle.name != which:
fileHandle = open(which,"rb",0)
# Reset the end of file indicator
fileHandle.seek(fileHandle.tell())
# Go through the file line by line updating the thermostat
# data as we go
for line in fileHandle:
words = line.split(",")
count = count + 1
if len(words) > 12:
newData = Thermostat()
newData.timestamp = words[0]
newData.addr = words[2]
newData.temp = words[4]
newData.mode = words[6]
if newData.mode == 'idle':
newData.mode = 0
elif newData.mode == 'heat1':
newData.mode = 1
elif newData.mode == 'heat2':
newData.mode = 2
elif newData.mode == 'cool1':
newData.mode = -1
elif newData.mode == 'cool2':
newData.mode = -2
else:
newData.mode = 999
newData.lower_temp_limit = words[10]
newData.upper_temp_limit = words[12]
zoneInfo[newData.addr] = newData
print(("Processed ",count," new lines in file ",fileHandle.name,
fileHandle.tell()))
class cbc_archiver(driver.SmapDriver):
def setup(self, opts):
# Scrape data until we have seen all four zones
while len(zoneInfo) < 4:
scrape()
# Register a timeseries for each zone
print("Adding subjects...")
self.add_timeseries(smapHeading+"/peak_power_reduction",'%',data_type='double',timezone='US/Eastern')
for data in list(zoneInfo.values()):
name = smapHeading+"/zone/"+data.addr
self.add_timeseries(name+'/temp', 'F', data_type='double', timezone='US/Eastern')
self.add_timeseries(name+'/mode', '', data_type='long', timezone='US/Eastern')
self.add_timeseries(name+'/lower_temp_limit', 'F', data_type='double', timezone='US/Eastern')
self.add_timeseries(name+'/upper_temp_limit', 'F', data_type='double', timezone='US/Eastern')
print("done!")
def start(self):
util.periodicSequentialCall(self.read).start(60)
def read(self):
# Look for new data
scrape()
# Record the new data
timestamp = 0
operating = 0.0
would_operate = 0.0
max_operate = 0.0
peak_power_reduction = 0.0
for data in list(zoneInfo.values()):
max_operate = max_operate + 1.0
if data.mode != 0:
operating = operating+1.0
if float(data.temp) < float(data.lower_temp_limit) or float(data.temp) > float(data.upper_temp_limit):
would_operate = would_operate+1.0
name = smapHeading+"/zone/"+data.addr
timestamp = time.mktime(time.strptime(data.timestamp,"%Y-%m-%d %H:%M:%S"))
self.add(name+'/temp',timestamp,float(data.temp))
self.add(name+'/mode',timestamp,int(data.mode))
self.add(name+'/lower_temp_limit',timestamp,float(data.lower_temp_limit))
self.add(name+'/upper_temp_limit',timestamp,float(data.upper_temp_limit))
if would_operate > 0.0:
peak_power_reduction = 1.0-operating/would_operate
self.add(smapHeading+"/peak_power_reduction",timestamp,peak_power_reduction)
```
#### File: MasterNode/masternode/agent.py
```python
import logging
import sys
import numpy
import os
import os.path as p
import time
import json
import gevent
from zmq.utils import jsonapi
from volttron.platform.vip.agent import *
from volttron.platform.agent.base_historian import BaseHistorian
from volttron.platform.agent import utils
from volttron.platform.messaging import topics, headers as headers_mod
utils.setup_logging()
Log = logging.getLogger(__name__)
def enum(**enums):
return type('Enum', (), enums)
class MasterNode(Agent):
def __init__(self, config_path, **kwargs):
super(MasterNode, self).__init__(**kwargs)
self.Config = utils.load_config(config_path)
self.AgentStatesEnum = enum(
OFF = 0,
HEATING_STAGE_ONE = 6,
HEATING_STAGE_TWO = 3,
COOLING_STAGE_ONE = -3,
COOLING_STAGE_TWO = -6
)
self.initTimeStamp = time.time()
@Core.receiver('onsetup')
def setup(self, sender, **kwargs):
self.agentID = 'masternode'
# super(MasterNode, self).setup()
self.Bld = self.Config["numberOfBuildings"]
self.modelNodes = []
self.modelNodesPlatform = []
self.x0 = []
self.xref = []
# values from state space model; after discretization
self.Ad = 0.99984
self.Bd = 0.2564993
self.Cd = 0.0019237
self.c = 1
self.Nsim = 144
print("DIRECTORY :::", os.path.abspath(os.curdir))
base_dir = p.abspath(p.dirname(__file__))
numpy_file = p.join(base_dir,self.Config['data_file'])
u_file = p.join(base_dir,self.Config['u_file'])
d1_file = p.join(base_dir,self.Config['d1_file'])
# read regulation signal and downsample to 10 mins
Sig = numpy.loadtxt(open(numpy_file,"rb"),delimiter=",",skiprows=1)
# downsample, 150 steps is 10 mins in this file
self.Reg = []
for i in range(0, 21601, 150):
self.Reg.append(Sig[i, 0])
# load outside air temp, u and d1 variables
self.u = numpy.loadtxt(open(u_file,"rb"),delimiter=",",skiprows=0)
self.d1 = numpy.loadtxt(open(d1_file,"rb"),delimiter=",",skiprows=0)
# Scaling regulation signal to number of expected registered buildings
self.Reg = numpy.multiply(self.Bld, self.Reg)
self.additionalInit = False
self.i = 0
@PubSub.subscribe('pubsub',"modelnode/register")
def ProcessIncomingMessage(self, peer, sender, bus, topic, headers, message):
msg = message
ID = msg['ID']
x0 = msg['x0']
xref = msg['xref']
platform = msg['platform']
self.modelNodes.append(ID)
self.modelNodesPlatform.append(platform)
self.x0.append(x0)
self.xref.append(xref)
Log.info( " REGISTER REQUEST ::::::::::::::::::::::::::::::::: " + ID )
# every 10 mins
@Core.periodic(2)
def RunControl(self):
if len(self.modelNodes) != self.Bld:
Log.info("Number of buildings registered with Master node is "
+ str(len(self.modelNodes)) + ", MasterNode configured for " + str(self.Bld) + " yet")
return
if not self.additionalInit:
self.X = numpy.zeros((self.Bld, self.Nsim))
self.X[:,0] = self.x0
self.X_T = numpy.zeros((self.Bld, self.Nsim))
self.X_T[:,0] = self.x0
self.u0 = numpy.zeros(self.Bld)
for j in range(0, self.Bld):
if self.x0[j] > self.xref[j]:
self.u0[j] = self.AgentStatesEnum.COOLING_STAGE_ONE
else:
self.u0[j] = self.AgentStatesEnum.OFF
self.U = numpy.zeros((self.Bld, self.Nsim))
self.U[:, 0] = self.u0
self.additionalInit = True
# control strategy
#for i in range(1, self.Nsim): # had to make this p from p+1
i = self.i
Log.info( "ITERATION ::::::::::::::::::::::::::::::::: " + str(i) )
for j in range(0, self.Bld):
self.X[j, i] = self.Ad*self.X_T[j,i-1] + self.Bd*self.U[j,i-1] #% ODE - state eqn
self.X_T[j,i] = self.X[j,i] + self.Cd*self.d1[i-1] #% ODE in state space format - measurement eq
print(i, j)
print((self.X.shape, self.X_T.shape, len(self.xref)))
if self.X_T[j,i] >= self.xref[j] + 1:
self.U[j,i] = self.AgentStatesEnum.COOLING_STAGE_ONE # % decision for bldg j
elif self.X_T[j,i] <= self.xref[j] - 0.5:
self.U[j,i] = self.AgentStatesEnum.OFF # % decision for bldg j
else:
self.U[j,i] = self.U[j,i-1] # % decision for bldg j; stay the same
# compute deviations frpm set pt and sort desc
Dev = self.X_T[:,i] - self.xref[j]
# sort desc
#Dev.sort()
#Dev = Dev[::-1] # reverses the array to desc
# insted of above 2 lines, use argsort bcoz we need the indices pointing to which buliding to commmand
# Ordered by lowest temp difference
OrderAsc = numpy.argsort(Dev)
# reverse asc order to get desc order
# ordered by highest temp ddiffrence
OrderDesc = OrderAsc[::-1]
# no of buildings reqd to satisfy reg signal,
# use prev step to get next step.
# bcoz bldgs go up or down in 3 kw increments, divide by 3 to get no of bldgs
ReqBld = int(abs(round(self.Reg[i-1]/3.0)))
Log.info("No of required bldgs: " +str(ReqBld) + " ! = regulation need of: " + str(self.Reg[i-1]))
count = 0
if self.Reg[i-1] > 0:
# increase power consumption starting with highest temp difference
for k in range(0, self.Bld):
if self.U[OrderDesc[k],i-1] == self.AgentStatesEnum.OFF:
self.U[OrderDesc[k],i] = self.AgentStatesEnum.COOLING_STAGE_ONE
count = count + 1
elif self.U[OrderDesc[k],i-1] == self.AgentStatesEnum.COOLING_STAGE_ONE:
self.U[OrderDesc[k],i] = self.AgentStatesEnum.COOLING_STAGE_TWO
count = count + 1
if count >= ReqBld:
break
if self.Reg[i-1] < 0:
# decrease power consumption, aka switch off equipment, starting with lowest temp difference for comfort
for k in range(0, ReqBld):
if self.U[OrderAsc[k],i-1] == self.AgentStatesEnum.COOLING_STAGE_ONE:
self.U[OrderAsc[k],i] = self.AgentStatesEnum.OFF
count = count + 1
elif self.U[OrderAsc[k],i-1] == self.AgentStatesEnum.COOLING_STAGE_TWO:
self.U[OrderAsc[k],i] = self.AgentStatesEnum.COOLING_STAGE_ONE
count = count + 1
if count >= ReqBld:
break
for j in range(0, self.Bld):
msg = {}
msg['ID'] = self.modelNodes[j]
msg['action'] = self.U[j,i]
headers = {headers_mod.FROM: self.agentID}
# headers[headers_mod.CONTENT_TYPE] = headers_mod.CONTENT_TYPE.JSON
# self.publish( topics.BUILDING_SEND(campus='ORNL',
# building=self.modelNodesPlatform[j],
# topic='masternode/command'),
# headers, json.dumps(msg) )
self.vip.pubsub.publish(
'pubsub', topic='masternode/command', headers=headers, message=msg)
Log.info( numpy.array_str(self.U[:,i]) )
self.i = self.i + 1
if self.i == self.Nsim:
self.i = 0
self.additionalInit = False
def main(argv=sys.argv):
try:
utils.vip_main(MasterNode)
except Exception as e:
Log.exception('unhandled exception')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
```
#### File: wbe/src/wu_helper.py
```python
import time
import urllib.request, urllib.error, urllib.parse
import json
def get_forecast_temp_10day():
retRows = []
url = "http://api.wunderground.com/api/e136063baaea177f/hourly10day/q/WA/Richland.json"
f = urllib.request.urlopen(url)
json_string = f.read()
parsed_json = json.loads(json_string)
records = parsed_json["hourly_forecast"]
for rec in records:
ts = float(rec["FCTTIME"]["epoch"])
ts = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))
temp = float(rec["temp"]["english"])
retRows.append((ts, temp))
f.close()
return retRows
if __name__ == '__main__':
retRows = get_forecast_temp_10day()
for row in retRows:
print("Results %s: %s" % (row[0], row[1]))
``` |
{
"source": "jklasa/fortheking-attack-calculator",
"score": 3
} |
#### File: jklasa/fortheking-attack-calculator/calc.py
```python
import flask
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
app = Flask(__name__)
Bootstrap(app)
# Server parameters
CLIENT_SIDE_URL = "http://172.16.58.3"
PORT = 8080
@app.route("/")
def index():
return render_template("calc.html")
if __name__ == "__main__":
#app.run(host='0.0.0.0', debug=False, threaded=True, port=PORT)
app.run(host="localhost", debug=True, port=PORT)
``` |
{
"source": "jkleczar/ttslabdev",
"score": 2
} |
#### File: ttslabdev/modules/HALIGN_Features.py
```python
from __future__ import unicode_literals, division, print_function # Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import codecs
import os
import sys
import logging
import subprocess
from tempfile import NamedTemporaryFile
from ConfigParser import ConfigParser
from speechlabels import parse_path, type_files
#EXTs
WAVE_EXT = "wav"
MFCC_EXT = "mfc"
#PSMFCC_EXT = "psmfc"
TIMES_EXT = "times"
#BINs
HCOPY_BIN = "HCopy"
log = logging.getLogger("HAlign.Features")
class AudioFeatures(object):
""" Manages audio and feature files...
"""
def __init__(self, wavlocation, featsconflocation):
""" Initialise...
"""
if not os.path.isdir(wavlocation):
raise Exception("'%s' is not an existing directory..." % wavlocation)
log.debug(unicode(self) + " loading audio files at '%s'." % (wavlocation))
self.wavlocation = wavlocation
self.wavfilelist = type_files(os.listdir(self.wavlocation), WAVE_EXT)
self.wavfilelist.sort()
self.hcopy_parms = self._loadFeatConf(featsconflocation)
def _loadFeatConf(self, location):
""" Load configuration needed for feature extraction...
"""
log.debug(unicode(self) + " loading config file at '%s'." % (location))
with codecs.open(location, encoding="utf-8") as fh:
featcfp = ConfigParser()
featcfp.readfp(fh)
return list(featcfp.items("HCOPY")) + list(featcfp.items("GLOBAL"))
def getWavFilelist(self):
return self.wavfilelist[:]
def dumpFeatConf(self, f):
""" Dump configuration to file...
"""
try:
for k, v in self.hcopy_parms:
f.write(k.upper() + " = " + v + "\n")
f.flush()
except AttributeError:
with codecs.open(f, "w", encoding="utf-8") as outfh:
for k, v in self.hcopy_parms:
outfh.write(k.upper() + " = " + v + "\n")
def dumpSCP(self, f, targetdir):
""" Dump SCP to file...
"""
try:
for filename in self.wavfilelist:
f.write(os.path.join(self.wavlocation, filename) + " " + \
os.path.join(targetdir, ".".join([parse_path(filename)[2], MFCC_EXT])) + "\n")
f.flush()
except AttributeError:
with codecs.open(f, "w", encoding="utf-8") as outfh:
for filename in self.wavfilelist:
outfh.write(os.path.join(self.wavlocation, filename) + " " + \
os.path.join(targetdir, ".".join([parse_path(filename)[2], MFCC_EXT])) + "\n")
def makeFeats(self, targetdir):
""" Run HCopy to make features...
"""
if not os.path.isdir(targetdir):
raise Exception("'%s' is not an existing directory..." % targetdir)
elif len(os.listdir(targetdir)) != 0:
print("WARNING: Directory '%s' is not empty..." % targetdir)
#raise Exception("'%s' is not empty..." % targetdir)
elif self.hcopy_parms is None:
raise Exception("HCopy configuration not loaded...")
#write SCP...
tempscpfh = NamedTemporaryFile(mode="w+t")#, encoding="utf-8")
self.dumpSCP(tempscpfh, targetdir)
#write hcopy.conf
tempconffh = NamedTemporaryFile(mode="w+t")#, encoding="utf-8")
self.dumpFeatConf(tempconffh)
#execute HCopy...
p = subprocess.Popen(" ".join([HCOPY_BIN,
"-A",
"-D",
"-V",
"-T",
"1",
"-C",
tempconffh.name,
"-S",
tempscpfh.name]),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
close_fds = True,
shell = True)
so, se = p.communicate()
log.info("makeFeats:\n" +
"================================================================================\n" +
unicode(so, encoding="utf-8") +
"================================================================================\n")
if bool(se):
log.warning("makeFeats:\n" +
"================================================================================\n" +
unicode(se, encoding="utf-8") +
"================================================================================\n")
returnval = p.returncode
tempscpfh.close()
tempconffh.close()
if returnval != 0:
raise Exception(HCOPY_BIN + " failed with code: " + unicode(returnval))
return returnval
# try:
# from wav2psmfcc import FeatExtractor
# import numpy as np
# except ImportError:
# print("WARNING: Could not import modules necessary to do pitch synchronous feature extraction...")
# def find_closest_index(array, value):
# """ Returns the index in the array that has the minimum difference
# with value...
# """
# return np.array([abs(avalue - value) for avalue in array]).argmin()
# class PS_AudioFeatures(AudioFeatures):
# """ Allows pitch synchronous features to be extracted...
# """
# def __init__(self, wavlocation, featsconflocation):
# """ Inherit...
# """
# AudioFeatures.__init__(self, wavlocation, featsconflocation)
# def _loadFeatConf(self, location):
# """ Load configuration needed for feature extraction...
# """
# log.debug(unicode(self) + " loading config file at '%s'." % (location))
# with codecs.open(location, encoding="utf-8") as fh:
# featcfp = ConfigParser()
# featcfp.readfp(fh)
# return dict(list(featcfp.items("SIG2FV")) + list(featcfp.items("PRAAT")) + list(featcfp.items("GLOBAL")))
# def makeFeats(self, targetdir):
# """ Use 'praat' and 'sig2fv' to make feats...
# """
# if not os.path.isdir(targetdir):
# raise Exception("'%s' is not an existing directory..." % targetdir)
# elif len(os.listdir(targetdir)) != 0:
# raise Exception("'%s' is not empty..." % targetdir)
# elif self.hcopy_parms is None:
# raise Exception("HCopy configuration not loaded...")
# self.featsdir = targetdir
# log.info("Making PS Feats in '%s'..." % targetdir)
# fe = FeatExtractor(min_pitch=float(self.hcopy_parms['minpitch']),
# max_pitch=float(self.hcopy_parms['maxpitch']),
# def_stepsize=float(self.hcopy_parms['targetrate']) / 10000000,
# preemph_coef=self.hcopy_parms['preemcoef'],
# windowfactor=self.hcopy_parms['windowfactor'],
# fbank_order=self.hcopy_parms['numchans'],
# melcep_order=self.hcopy_parms['numceps'],
# lifter_coef=self.hcopy_parms['ceplifter'],
# window_type=self.hcopy_parms['window_type'],
# coefs_type=self.hcopy_parms['coefs_type'],
# delta_type=self.hcopy_parms['delta_type'],
# acc_type=self.hcopy_parms['acc_type'])
# for wavfilename in self.wavfilelist:
# wavfilelocation = os.path.join(self.wavlocation, wavfilename)
# fe.get_feats(wavfilelocation)
# fe.write_htk_featfile(os.path.join(targetdir, parse_path(wavfilename)[2] + "." + MFCC_EXT))
# fe.write_times(os.path.join(targetdir, parse_path(wavfilename)[2] + "." + TIMES_EXT))
# def warpMLF(self, inmlflocation, outmlflocation):
# """ Adjusts actual times in MLF to warped times that HTK uses because
# of fixed stepsize assumption...
# """
# with codecs.open(inmlflocation, encoding="utf-8") as infh:
# mlflines = infh.readlines()
# if mlflines[0].strip() != "#!MLF!#":
# raise Exception("MLF header not found in '%s'" % (inmlflocation))
# period = int(float(self.hcopy_parms['targetrate']))
# with codecs.open(outmlflocation, "w", encoding="utf-8") as outfh:
# for line in mlflines:
# if line[0].isdigit(): #Then it must be a field with times...
# linelist = line.split()
# linelist[0] = unicode(find_closest_index(times, int(linelist[0])) * period)
# linelist[1] = unicode(find_closest_index(times, int(linelist[1])) * period)
# outfh.write(" ".join(linelist) + "\n")
# elif line.startswith('"'):
# current_basename = parse_path(line.strip().strip('"'))[2]
# with codecs.open(os.path.join(self.featsdir, current_basename + "." + TIMES_EXT), encoding="utf-8") as infh:
# times = [0] + [float(time.strip()) * 10000000 for time in infh.readlines()]
# outfh.write(line)
# else: #must be a "#!MLF!#" or "."
# outfh.write(line)
# def unwarpRec(self, inreclocation, outreclocation):
# """ Translates times in a rec file from HTK time to actual time...
# roughly inverse procedure to what is done in 'warpMLF'
# """
# with codecs.open(inreclocation, encoding="utf-8") as infh:
# reclines = infh.readlines()
# basename = parse_path(inreclocation)[2]
# with codecs.open(os.path.join(self.featsdir, basename + "." + TIMES_EXT), encoding="utf-8") as infh:
# times = [0] + [int(float(time.strip()) * 10000000) for time in infh.readlines()]
# period = int(float(self.hcopy_parms['targetrate']))
# with codecs.open(outreclocation, "w", encoding="utf-8") as outfh:
# for line in reclines:
# if not line[0].isdigit():
# raise Exception("Error while parsing '%s'" % (inreclocation))
# else: #all lines should have a start and end time...
# linelist = line.split()
# # DEMITASSE: Eish... Fix off by a couple errors...
# starttime = int(linelist[0])
# endtime = int(linelist[1])
# if starttime % 10 != 0:
# if starttime % 10 >= 5:
# print("-%s" % (10 - starttime % 10))
# starttime += 10 - starttime % 10
# else:
# print("+%s" % (starttime % 10))
# starttime -= starttime % 10
# if endtime % 10 != 0:
# if endtime % 10 >= 5:
# print("-%s" % (10 - endtime % 10))
# endtime += 10 - endtime % 10
# else:
# print("+%s" % (endtime % 10))
# endtime -= endtime % 10
# linelist[0] = unicode(times[starttime / period])
# linelist[1] = unicode(times[endtime / period])
# outfh.write(" ".join(linelist) + "\n")
```
#### File: voices/yoruba/ttslab_make_tonevoice.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import sys, os
import ttslab
PHONESET_FILE = "phoneset.pickle"
PRONUNDICT_FILE = "pronundict.pickle"
PRONUNADDENDUM_FILE = "pronunaddendum.pickle"
G2P_FILE = "g2p.pickle"
ENGPHONESET_FILE = "engphoneset.pickle"
ENGPRONUNDICT_FILE = "engpronundict.pickle"
ENGPRONUNADDENDUM_FILE = "engpronunaddendum.pickle"
ENGG2P_FILE = "engg2p.pickle"
HTSMODELS_DIR = "data/hts"
USCATALOGUE_FILE = "data/unitcatalogue.pickle"
def hts():
from ttslab.defaultvoice import LwaziHTSVoice
from ttslab.voices.yoruba_default import SynthesizerHTSME_Tone_NoTone
voice = LwaziHTSVoice(phoneset=ttslab.fromfile(PHONESET_FILE),
g2p=ttslab.fromfile(G2P_FILE),
pronundict=ttslab.fromfile(PRONUNDICT_FILE),
pronunaddendum=ttslab.fromfile(PRONUNADDENDUM_FILE),
synthesizer=SynthesizerHTSME_Tone_NoTone(voice=None, models_dir=os.path.join(os.getcwd(), HTSMODELS_DIR)))
ttslab.tofile(voice, "hts.voice.pickle")
if __name__ == "__main__":
try:
switch = sys.argv[1]
assert switch in ["hts"]
except IndexError:
print("USAGE: ttslab_make_tonevoice.py [hts|...]")
sys.exit(1)
if switch == "hts":
hts()
else:
raise NotImplementedError
```
#### File: data/scripts/extract_f0.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import sys
import os
import multiprocessing
from glob import glob
import subprocess
def extract_lf0(parms):
cmds = "python scripts/wav2lf0_fixocterrs.py %(infn)s %(outfn)s %(lowerf0)s %(upperf0)s"
subprocess.call(cmds % parms, shell=True)
if __name__ == "__main__":
try:
import multiprocessing
POOL = multiprocessing.Pool(processes=multiprocessing.cpu_count())
def map(f, i):
return POOL.map(f, i, chunksize=1)
except ImportError:
pass
argnames = ["lowerf0", "upperf0"]
assert len(argnames) == len(sys.argv[1:])
args = dict(zip(argnames, sys.argv[1:]))
#make parms:
parms = []
for fn in glob(os.path.join("wav", "*.wav")):
tempd = dict(args)
tempd["infn"] = fn
base = os.path.basename(fn).rstrip(".wav")
tempd["outfn"] = os.path.join("lf0", base + ".lf0")
parms.append(tempd)
#run:
map(extract_lf0, parms)
```
#### File: data/scripts/wav2lf0_fixocterrs.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import sys
import array
import math
import numpy as np
import ttslab
from ttslab.trackfile import Track
ttslab.extend(Track, "ttslab.trackfile.funcs.tfuncs_praat")
def friendly_log(f):
try:
return math.log(f)
except ValueError:
return float('-1e+10')
if __name__ == "__main__":
fn = sys.argv[1]
outfn = sys.argv[2]
minf0 = float(sys.argv[3])
maxf0 = float(sys.argv[4])
t = Track()
t.get_f0(fn, minpitch=minf0, maxpitch=maxf0, timestep=0.005, fixocterrs=True) #timestep hardcoded here because of hack below...
#hack aligns samples with equiv from HTS script:
pad = np.array([0.0, 0.0]).reshape(-1, 1)
f0hzvalues = np.concatenate([pad, t.values, pad])
lf0 = array.array(b"f", map(friendly_log, f0hzvalues))
with open(outfn, "wb") as outfh:
lf0.tofile(outfh)
```
#### File: voicetools/speechbrowser/speechbrowser.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import sys
import os
import codecs
import time
import pygtk
pygtk.require("2.0")
import gtk, gobject
from matplotlib.backends.backend_gtk import FigureCanvasGTK
from matplotlib.figure import Figure
import ttslab
from ttslab.hrg import Utterance
ttslab.extend(Utterance, "ufuncs_analysis")
from ttslab.waveform import Waveform
def loadworklist(fn, sep=","):
worklist = []
with codecs.open(fn, encoding="utf-8") as infh:
for line in infh:
try:
fname, wordindex = line.strip().split(sep)
wordindex = int(wordindex)
worklist.append([fname, wordindex])
except ValueError:
pass
return worklist
def getpronun(word, phmap):
pronun = []
for syl in word.get_daughters():
for ph in syl.get_daughters():
pronun.append(phmap[ph["name"]])
return pronun
class CorpusView(object):
def __init__(self, worklist, phmap):
self.phmap = phmap
self.worklist = worklist
self.current_index = 0
self.current_wordindex = self.worklist[self.current_index][1]
self.current_utt = ttslab.fromfile(self.worklist[self.current_index][0])
self.current_utt.fill_startendtimes()
self.transcriptions = {self.worklist[self.current_index][0]: self.current_utt["text"]}
self.comments = {self.worklist[self.current_index][0]: ""}
self.pronuns = {self.worklist[self.current_index][0]: [" ".join(getpronun(w, self.phmap)) for w in self.current_utt.gr("SylStructure")]}
def save_data(self):
ttslab.tofile([self.transcriptions, self.pronuns, self.comments],
"ttslab_speechbrowser_" + time.strftime("%Y%m%d%H%M%S", time.localtime(time.time())) + ".pickle")
def next(self):
self.save_data()
if self.current_index < len(self.worklist) - 1:
self.current_index += 1
self.current_wordindex = self.worklist[self.current_index][1]
self.current_utt = ttslab.fromfile(self.worklist[self.current_index][0])
self.current_utt.fill_startendtimes()
if self.worklist[self.current_index][0] not in self.transcriptions:
self.transcriptions[self.worklist[self.current_index][0]] = self.current_utt["text"]
if self.worklist[self.current_index][0] not in self.comments:
self.comments[self.worklist[self.current_index][0]] = ""
if self.worklist[self.current_index][0] not in self.pronuns:
self.pronuns[self.worklist[self.current_index][0]] = [" ".join(getpronun(w, self.phmap)) for w in self.current_utt.gr("SylStructure")]
def prev(self):
self.save_data()
if self.current_index > 0:
self.current_index -= 1
self.current_wordindex = self.worklist[self.current_index][1]
self.current_utt = ttslab.fromfile(self.worklist[self.current_index][0])
self.current_utt.fill_startendtimes()
class SpeechbrowserApp(object):
def __init__(self, phmap):
builder = gtk.Builder()
builder.add_from_file(os.path.join(os.getenv("TTSLABDEV_ROOT"), "voicetools/speechbrowser", "speechbrowser.glade"))
builder.connect_signals({"on_window1_destroy": gtk.main_quit,
"on_toolbutton_open_clicked": self.on_toolbutton_open_clicked,
"on_button_playutt_clicked": self.on_button_playutt_clicked,
"on_button_playwordorig_clicked": self.on_button_playwordorig_clicked,
"on_button_playwordsynth_clicked": self.on_button_playwordsynth_clicked,
"on_button_next_clicked": self.on_button_next_clicked,
"on_button_prev_clicked": self.on_button_prev_clicked})
self.window1 = builder.get_object("window1")
self.frame_specutt = builder.get_object("frame_specutt")
self.button_playutt = builder.get_object("button_playutt")
self.frame_words = builder.get_object("frame_words")
self.entry_transcription = builder.get_object("entry_transcription")
self.table_utt = builder.get_object("table_utt")
self.table_words = builder.get_object("table_words")
self.frame_wordspecorig = builder.get_object("frame_wordspecorig")
self.frame_wordspecsynth = builder.get_object("frame_wordspecsynth")
self.button_playwordorig = builder.get_object("button_playwordorig")
self.button_playwordsynth = builder.get_object("button_playwordsynth")
self.label_word1 = builder.get_object("label_word1")
self.label_word2 = builder.get_object("label_word2")
self.label_word3 = builder.get_object("label_word3")
self.entry_word1 = builder.get_object("entry_word1")
self.entry_word2 = builder.get_object("entry_word2")
self.entry_word3 = builder.get_object("entry_word3")
self.statusbar = builder.get_object("statusbar")
self.entry_comment = builder.get_object("entry_comment")
# self.combobox_comment = builder.get_object("combobox_comment")
# liststore = gtk.ListStore(gobject.TYPE_STRING)
# self.combobox_comment.set_model(liststore)
# self.combobox_comment.set_entry_text_column(0)
# self.combobox_comment.append_text("transcription error")
# self.combobox_comment.append_text("pronunciation error")
# self.combobox_comment.append_text("noise present")
# self.combobox_comment.append_text("no problem")
# cell = gtk.CellRendererText()
# self.combobox_comment.pack_start(cell, True)
# self.combobox_comment.add_attribute(cell, 'text', 1)
self.window1.show()
self.phmap = phmap
def update_wordview(self):
u = self.corpusview.current_utt
words = u.get_relation("SylStructure").as_list()
word = words[self.corpusview.current_wordindex]
try:
prevword = word.prev_item
prevwordname = prevword["name"]
origstartsample = u["waveform"].samplerate * prevword["start"]
synthstartsample = u["lindists"]["utt"]["waveform"].samplerate * prevword["start"]
prevwordpronun = self.corpusview.pronuns[self.corpusview.worklist[self.corpusview.current_index][0]][self.corpusview.current_wordindex-1]
except TypeError:
prevwordname = "NONE"
origstartsample = 0
synthstartsample = 0
prevwordpronun = ""
wordname = word["name"]
wordpronun = self.corpusview.pronuns[self.corpusview.worklist[self.corpusview.current_index][0]][self.corpusview.current_wordindex]
try:
nextword = word.next_item
nextwordname = nextword["name"]
origendsample = u["waveform"].samplerate * nextword["end"]
synthendsample = u["lindists"]["utt"]["waveform"].samplerate * nextword["end"]
nextwordpronun = self.corpusview.pronuns[self.corpusview.worklist[self.corpusview.current_index][0]][self.corpusview.current_wordindex+1]
except TypeError:
nextwordname = "NONE"
origendsample = len(u["waveform"].samples)
synthendsample = len(u["waveform"].samples)
nextwordpronun = ""
self.label_word1.set_label(prevwordname)
self.label_word2.set_label(wordname)
self.label_word3.set_label(nextwordname)
self.entry_word1.set_text(prevwordpronun)
self.entry_word2.set_text(wordpronun)
self.entry_word3.set_text(nextwordpronun)
self.origwordcontextwav = Waveform()
self.origwordcontextwav.samplerate = u["waveform"].samplerate
self.origwordcontextwav.samples = u["waveform"].samples[origstartsample:origendsample]
origwordcontext_specfig = Figure(dpi=72)
origwordcontext_specplot = origwordcontext_specfig.add_subplot(111)
origwordcontext_specplot.specgram(self.origwordcontextwav.samples,
Fs=self.origwordcontextwav.samplerate,
NFFT=128, noverlap=64,
xextent=(0.0, self.origwordcontextwav.samplerate*len(self.origwordcontextwav.samples)))
origwordcontext_speccanvas = FigureCanvasGTK(origwordcontext_specfig)
framecontents = self.frame_wordspecorig.get_children()
if framecontents:
self.frame_wordspecorig.remove(framecontents[0])
self.frame_wordspecorig.add(origwordcontext_speccanvas)
self.synthwordcontextwav = Waveform()
self.synthwordcontextwav.samplerate = u["lindists"]["utt"]["waveform"].samplerate
self.synthwordcontextwav.samples = u["lindists"]["utt"]["waveform"].samples[synthstartsample:synthendsample]
synthwordcontext_specfig = Figure(dpi=72)
synthwordcontext_specplot = synthwordcontext_specfig.add_subplot(111)
synthwordcontext_specplot.specgram(self.synthwordcontextwav.samples,
Fs=self.synthwordcontextwav.samplerate,
NFFT=128, noverlap=64,
xextent=(0.0, self.synthwordcontextwav.samplerate*len(self.synthwordcontextwav.samples)))
synthwordcontext_speccanvas = FigureCanvasGTK(synthwordcontext_specfig)
framecontents = self.frame_wordspecsynth.get_children()
if framecontents:
self.frame_wordspecsynth.remove(framecontents[0])
self.frame_wordspecsynth.add(synthwordcontext_speccanvas)
self.statusbar.push(0, "Item: %s/%s (Word index: %s)" % (self.corpusview.current_index + 1, len(self.corpusview.worklist), self.corpusview.current_wordindex))
self.table_words.show_all()
def savepronuns(self, wordindex):
if wordindex != 0:
self.corpusview.pronuns[self.corpusview.worklist[self.corpusview.current_index][0]][wordindex-1] = unicode(self.entry_word1.get_text(), "utf-8")
self.corpusview.pronuns[self.corpusview.worklist[self.corpusview.current_index][0]][wordindex] = unicode(self.entry_word2.get_text(), "utf-8")
try:
self.corpusview.pronuns[self.corpusview.worklist[self.corpusview.current_index][0]][wordindex+1] = unicode(self.entry_word3.get_text(), "utf-8")
except IndexError:
pass
def change_wordview(self, button):
self.savepronuns(self.corpusview.current_wordindex)
self.corpusview.current_wordindex = button.wordindex
self.update_wordview()
def update_uttview(self):
utt = self.corpusview.current_utt
origspeech_specfig = Figure(dpi=72)
origspeech_specplot = origspeech_specfig.add_subplot(111)
origspeech_specplot.specgram(utt["waveform"].samples, Fs=utt["waveform"].samplerate, NFFT=128, noverlap=64)
origspeech_speccanvas = FigureCanvasGTK(origspeech_specfig)
framecontents = self.frame_specutt.get_children()
if framecontents:
self.frame_specutt.remove(framecontents[0])
self.frame_specutt.add(origspeech_speccanvas)
self.entry_transcription.set_text(self.corpusview.transcriptions[self.corpusview.worklist[self.corpusview.current_index][0]])
self.entry_comment.set_text(self.corpusview.comments[self.corpusview.worklist[self.corpusview.current_index][0]])
self.buttonbox_words = gtk.HButtonBox()
words = utt.get_relation("Word").as_list()
for i, word in enumerate(words):
button = gtk.Button()
button.wordindex = i
button.connect("clicked", self.change_wordview)
button.set_label(word["name"])
self.buttonbox_words.pack_end(button)
framecontents = self.frame_words.get_children()
if framecontents:
self.frame_words.remove(framecontents[0])
self.frame_words.add(self.buttonbox_words)
self.table_utt.show_all()
self.update_wordview()
def on_button_next_clicked(self, obj):
self.corpusview.transcriptions[self.corpusview.worklist[self.corpusview.current_index][0]] = unicode(self.entry_transcription.get_text(), "utf-8")
self.corpusview.comments[self.corpusview.worklist[self.corpusview.current_index][0]] = unicode(self.entry_comment.get_text(), "utf-8")
self.savepronuns(self.corpusview.current_wordindex)
self.corpusview.next()
self.update_uttview()
def on_button_prev_clicked(self, obj):
self.corpusview.transcriptions[self.corpusview.worklist[self.corpusview.current_index][0]] = unicode(self.entry_transcription.get_text(), "utf-8")
self.corpusview.comments[self.corpusview.worklist[self.corpusview.current_index][0]] = unicode(self.entry_comment.get_text(), "utf-8")
self.savepronuns(self.corpusview.current_wordindex)
self.corpusview.prev()
self.update_uttview()
def on_button_playutt_clicked(self, obj):
self.corpusview.current_utt["waveform"].play()
def on_button_playwordorig_clicked(self, obj):
self.origwordcontextwav.play()
def on_button_playwordsynth_clicked(self, obj):
self.synthwordcontextwav.play()
def on_toolbutton_open_clicked(self, obj):
chooser = gtk.FileChooserDialog(title=None,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN,
gtk.RESPONSE_OK))
chooser.set_current_folder(os.getcwd())
response = chooser.run()
if response == gtk.RESPONSE_OK:
filename = chooser.get_filename()
worklist = loadworklist(filename)
self.corpusview = CorpusView(worklist, self.phmap)
elif response == gtk.RESPONSE_CANCEL:
print('Closed, no files selected')
chooser.destroy()
self.update_uttview()
self.update_wordview()
if __name__ == "__main__":
voice = ttslab.fromfile(sys.argv[1])
app = SpeechbrowserApp(voice.phonemap)
gtk.main()
```
#### File: ttslabdev/voicetools/tfuncs_analysis.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import numpy as np
from scipy.spatial.distance import cdist
from ttslab.trackfile import Track
def dtw_align(track, track2, metric="euclidean", VI=None):
"""DP alignment between tracks....
Returns: cumdist, dist, path (corresponding sample indices)
The functionality is based on the distance implementations
available in scipy.spatial.distance.cdist thus refer to
this documentation for explanation of function args...
"""
assert track.numchannels == track2.numchannels, "Tracks don't have the same number of channels..."
dpp = np.zeros((track.numframes, track2.numframes), dtype=int)
cumdist = cdist(track.values, track2.values, metric=metric, VI=VI)
dist = np.array(cumdist)
dpp[0][0] = -1
for i in range(1, track.numframes):
cumdist[i][0] += cumdist[i-1][0]
dpp[i][0] = -1
for i in range(1, track2.numframes):
cumdist[0][i] += cumdist[0][i-1]
dpp[0][i] = 1
for i in range(1, track.numframes):
for j in range(1, track2.numframes):
if cumdist[i-1][j] < cumdist[i-1][j-1]:
if cumdist[i][j-1] < cumdist[i-1][j]:
cumdist[i][j] += cumdist[i][j-1]
dpp[i][j] = 1 #hold
else: #horizontal best
cumdist[i][j] += cumdist[i-1][j]
dpp[i][j] = -1 #jump
elif cumdist[i][j-1] < cumdist[i-1][j-1]:
cumdist[i][j] += cumdist[i][j-1]
dpp[i][j] = 1 #hold
else:
cumdist[i][j] += cumdist[i-1][j-1]
dpp[i][j] = 0 #jump
mapping = np.zeros(track.numframes, dtype=int)
cost = -1
j = track2.numframes - 1
for i in range(track.numframes - 1, -1, -1): #n-1 downto 0
if cost == -1:
cost = cumdist[i][j]
mapping[i] = j
while dpp[i][j] == 1:
j -= 1
if dpp[i][j] == 0:
j -= 1
path = []
for i, c in enumerate(mapping):
if i == 0:
path.append((i, c))
continue
repeating = range(path[-1][-1], c)
if repeating:
path.pop()
for j in repeating:
path.append((i-1, j))
path.append((i, c))
return cumdist, dist, path
def dtw_distances(track, track2, metric="euclidean", VI=None):
cumdist, dist, path = track.dtw_align(track2, metric=str(metric), VI=VI)
framedists = []
frametimes = []
for pathcoord in path:
x, y = pathcoord
framedists.append(dist[x][y])
frametimes.append(track.times[x])
t = Track()
t.values = np.array(framedists)
t.values = t.values.reshape(-1, 1)
t.times = np.array(frametimes)
return t
def linearpath_distances(track, track2, metric="euclidean", VI=None):
dist = cdist(track.values, track2.values, metric=str(metric), VI=VI)
framedists = []
try:
for i in range(len(track.times)):
framedists.append(dist[i][i])
except IndexError:
pass
t = Track()
t.values = np.array(framedists)
t.values = t.values.reshape(-1, 1)
t.times = np.array([track.times[i] for i in range(len(t.values))])
if track2.numframes != track.numframes:
print("linearpath_distances: WARNING: num frames difference is %s" % (track2.numframes - track.numframes))
return t
def distances(track, track2, method="dtw", metric="euclidean", VI=None):
if method == "dtw":
return track.dtw_distances(track2, metric=metric, VI=VI)
if method == "linear":
return track.linearpath_distances(track2, metric=metric, VI=VI)
else:
raise NotImplementedError("method: " + method)
def mask_indices(track, intervals):
""" return indices falling within intervals:
[[starttime, endtime], [starttime, endtime], ...]
"""
indices = np.array([], dtype=int)
for interval in intervals:
ca = track.times >= interval[0]
cb = track.times < interval[1]
indices = np.append(indices, np.nonzero(ca & cb))
#indices.extend([e[1] for e in zip(track.times, range(len(track.times))) if e[0] >= interval[0] and e[0] < interval[1]])
return indices
```
#### File: ttslabdev/voicetools/ttslab_make_htsmodels.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import os
import shutil
from glob import glob
import tarfile
import ttslab
NAME = "ttslab_make_htsmodels.py"
WAV_EXT = "wav"
RAW_EXT = "raw"
UTT_EXT = "utt.pickle"
LAB_EXT = "lab"
FEAT_EXT = "mcep"
#Default options:
DEF_WORKING_DIR = "hts"
DEF_UTTS_DIR = "utts"
DEF_QUESTIONS_FILE = "etc/questions_qst001.hed"
DEF_UTTQUESTIONS_FILE = "etc/questions_utt_qst001.hed"
#HTS Training script vars...
DATASET = "dataset"
SPEAKER = "speaker"
UTT_SUBDIR = "data/utts"
RAW_SUBDIR = "data/raw"
WAV_SUBDIR = "data/wav"
QUESTIONS_SUBDIR = "data/questions"
COMPARE_TMP_SUBDIR = "data/tempcmp"
OUTWAV_SUBDIR = "gen/qst001/ver1/hts_engine"
WITH_SPTK_SEARCH_PATH = os.environ.get("SPTK_BIN")
WITH_HTS_SEARCH_PATH = os.environ.get("HTS_BIN")
WITH_HTS_ENGINE_SEARCH_PATH = os.environ.get("HTS_ENGINE_BIN")
CONFIGURE = "./configure --with-sptk-search-path=%s --with-hts-search-path=%s --with-hts-engine-search-path=%s SPEAKER=%s DATASET=%s LOWERF0=%s UPPERF0=%s SYNVP=False VOICE=%s"
MAKE = "make all"
def train_standard(parms):
#setup dirs...
os.makedirs(parms["workingdir"])
t = tarfile.open(parms["template"], "r:*")
t.extractall(parms["workingdir"])
#SETUP FILES
shutil.copy(parms["questionsfile"], os.path.join(parms["workingdir"], QUESTIONS_SUBDIR))
shutil.copy(parms["uttquestionsfile"], os.path.join(parms["workingdir"], QUESTIONS_SUBDIR))
print(os.getcwd())
for fn in sorted(glob(os.path.join(parms["utts"], "*." + UTT_EXT))):
print("PROCESSING: %s" % (fn))
#copy utt with DATASET_SPEAKER_bname to HTS tree:
shutil.copy(fn, os.path.join(parms["workingdir"], UTT_SUBDIR, "_".join([DATASET, SPEAKER, os.path.basename(fn)])))
#get raw audio files from utts:
u = ttslab.fromfile(fn)
waveform = u["waveform"]
waveform.write(os.path.join(parms["workingdir"],
RAW_SUBDIR,
"_".join([DATASET, SPEAKER, os.path.basename(fn)])[:-len(UTT_EXT)] + RAW_EXT))
waveform.write(os.path.join(parms["workingdir"],
WAV_SUBDIR,
"_".join([DATASET, SPEAKER, os.path.basename(fn)])[:-len(UTT_EXT)] + WAV_EXT))
#TRAIN...
os.chdir(parms["workingdir"])
os.system(CONFIGURE % (WITH_SPTK_SEARCH_PATH,
WITH_HTS_SEARCH_PATH,
WITH_HTS_ENGINE_SEARCH_PATH,
SPEAKER, DATASET, parms["pitchmin"], parms["pitchmax"],
parms["voice"]))
os.system(MAKE)
########################################
## SCRIPT ADMIN
def parse_arguments():
""" Setup all possible command line arguments....
"""
from argparse import ArgumentParser
parser = ArgumentParser(description="Sets up and performs HTS training...", prog=NAME)
parser.add_argument("voice",
help="specify location of voice file.",
metavar="VOICE_FILE")
parser.add_argument("template",
help="specify location of HTS training template script.",
metavar="HTS_TEMPLATE")
parser.add_argument("pitchmin",
help="minimum F0 value.",
metavar="PITCHMIN",
type=int)
parser.add_argument("pitchmax",
help="maximum F0 value.",
metavar="PITCHMAX",
type=int)
parser.add_argument("-o", "--workingdir",
help="specify location to create HTS training tree.",
metavar="WORKING_DIR",
default=DEF_WORKING_DIR)
parser.add_argument("-u", "--utts",
help="specify location of training utt files.",
metavar="UTTS_DIR",
default=DEF_UTTS_DIR)
parser.add_argument("-q", "--questionsfile",
help="specify location of the tree questions file.",
metavar="QUESTIONS_FILE",
default=DEF_QUESTIONS_FILE)
parser.add_argument("-Q", "--uttquestionsfile",
help="specify location of utterance level tree questions file.",
metavar="UTTQUESTIONS_FILE",
default=DEF_UTTQUESTIONS_FILE)
return parser.parse_args()
if __name__ == "__main__":
parms = parse_arguments().__dict__
train_standard(parms)
``` |
{
"source": "jkleczar/ttslab",
"score": 3
} |
#### File: ttslab/funcs/ifuncs_hts.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
def itempos_inparent_f(item, relation):
item = item.get_item_in_relation(relation)
if item is None:
return 0
else:
return item.parent_item.get_daughters().index(item) + 1
def itempos_inparent_b(item, relation):
item = item.get_item_in_relation(relation)
if item is None:
return 0
else:
l = item.parent_item.get_daughters()
return len(l) - l.index(item)
def syllistsylstructrel_inphrase(phraseitem):
l = []
for worditem in phraseitem.get_daughters():
worditem = worditem.get_item_in_relation("SylStructure")
for sylitem in worditem.get_daughters():
l.append(sylitem)
return l
def numsyls_inphrase(phraseitem):
return len(syllistsylstructrel_inphrase(phraseitem))
def segpos_insyl_f(segitem):
""" position of the segment in the syllable (forward)
"""
return itempos_inparent_f(segitem, "SylStructure")
def segpos_insyl_b(segitem):
""" position of the segment in the syllable (backward)
"""
return itempos_inparent_b(segitem, "SylStructure")
def sylpos_inword_f(sylitem):
""" position of the current syllable in the current word (forward)
"""
return itempos_inparent_f(sylitem, "SylStructure")
def sylpos_inword_b(sylitem):
""" position of the current syllable in the current word (backward)
"""
return itempos_inparent_b(sylitem, "SylStructure")
def sylpos_inphrase_f(sylitem):
""" position of the current syllable in the current phrase (forward)
"""
sylitem = sylitem.get_item_in_relation("SylStructure")
try:
phraseitem = sylitem.traverse("parent.R:Phrase.parent")
except TraversalError:
return 0
syllist = syllistsylstructrel_inphrase(phraseitem)
return syllist.index(sylitem) + 1
def sylpos_inphrase_b(sylitem):
""" position of the current syllable in the current phrase (backward)
"""
sylitem = sylitem.get_item_in_relation("SylStructure")
try:
phraseitem = sylitem.traverse("parent.R:Phrase.parent")
except TraversalError:
return 0
syllist = syllistsylstructrel_inphrase(phraseitem)
return len(syllist) - syllist.index(sylitem)
def numsylsbeforesyl_inphrase(sylitem, feat, featvalue):
""" the number of syllables before the current syllable in the
current phrase with 'feat' = 'featvalue'
"""
sylitem = sylitem.get_item_in_relation("SylStructure")
try:
phraseitem = sylitem.traverse("parent.R:Phrase.parent")
except TraversalError:
return 0
syllist = syllistsylstructrel_inphrase(phraseitem)
idx = syllist.index(sylitem)
return len([syl for syl in syllist[:idx] if syl[feat] == featvalue])
def numsylsaftersyl_inphrase(sylitem, feat, featvalue):
""" the number of syllables after the current syllable in the
current phrase with 'feat' = 'featvalue'
"""
sylitem = sylitem.get_item_in_relation("SylStructure")
try:
phraseitem = sylitem.traverse("parent.R:Phrase.parent")
except TraversalError:
return 0
syllist = syllistsylstructrel_inphrase(phraseitem)
idx = syllist.index(sylitem)
return len([syl for syl in syllist[idx+1:] if syl[feat] == featvalue])
def syldistprev(sylitem, feat, featvalue):
""" the number of syllables from the current syllable to the
previous syllable with 'feat' = 'featvalue'
"""
sylitem = sylitem.get_item_in_relation("Syllable")
count = 1
nextsyl = sylitem.prev_item
while nextsyl:
if feat in nextsyl:
if nextsyl[feat] == featvalue:
return count
count += 1
nextsyl = nextsyl.prev_item
return 0
def syldistnext(sylitem, feat, featvalue):
""" the number of syllables from the current syllable to the
next syllable with 'feat' = 'featvalue'
"""
sylitem = sylitem.get_item_in_relation("Syllable")
count = 1
nextsyl = sylitem.next_item
while nextsyl:
if feat in nextsyl:
if nextsyl[feat] == featvalue:
return count
count += 1
nextsyl = nextsyl.next_item
return 0
def wordpos_inphrase_f(worditem):
""" position of the current word in the current phrase (forward)
"""
worditem = worditem.get_item_in_relation("Phrase")
phraseitem = worditem.parent_item
wordlist = phraseitem.get_daughters()
return wordlist.index(worditem) + 1
def wordpos_inphrase_b(worditem):
""" position of the current word in the current phrase (backward)
"""
worditem = worditem.get_item_in_relation("Phrase")
phraseitem = worditem.parent_item
wordlist = phraseitem.get_daughters()
return len(wordlist) - wordlist.index(worditem)
def numwordsbeforeword_inphrase(worditem, feat, featvalue):
""" the number of words before the current word in the
current phrase with 'feat' = 'featvalue'
"""
worditem = worditem.get_item_in_relation("Phrase")
phraseitem = worditem.parent_item
wordlist = phraseitem.get_daughters()
idx = wordlist.index(worditem)
return len([word for word in wordlist[:idx] if word[feat] == featvalue])
def numsylsaftersyl_inphrase(worditem, feat, featvalue):
""" the number of words after the current word in the
current phrase with 'feat' = 'featvalue'
"""
worditem = worditem.get_item_in_relation("Phrase")
phraseitem = worditem.parent_item
wordlist = phraseitem.get_daughters()
idx = wordlist.index(worditem)
return len([word for word in wordlist[idx+1:] if word[feat] == featvalue])
def worddistprev(worditem, feat, featvalue):
""" the number of words from the current words to the
previous word with 'feat' = 'featvalue'
"""
worditem = worditem.get_item_in_relation("Word")
count = 1
nextword = worditem.prev_item
while nextword:
if feat in nextword:
if nextword[feat] == featvalue:
return count
count += 1
nextword = nextword.prev_item
return 0
def worddistnext(worditem, feat, featvalue):
""" the number of words from the current words to the
next word with 'feat' = 'featvalue'
"""
worditem = worditem.get_item_in_relation("Word")
count = 1
nextword = worditem.next_item
while nextword:
if feat in nextword:
if nextword[feat] == featvalue:
return count
count += 1
nextword = nextword.next_item
return 0
def phrasepos_inutt_f(phraseitem):
""" position of the current phrase in utterence (forward)
"""
phraselist = phraseitem.relation.utterance.get_relation("Phrase").as_list()
return phraselist.index(phraseitem) + 1
def phrasepos_inutt_b(phraseitem):
""" position of the current phrase in utterence (backward)
"""
phraselist = phraseitem.relation.utterance.get_relation("Phrase").as_list()
return len(phraselist) - phraselist.index(phraseitem)
```
#### File: ttslab/ttslab/hrg.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class DuplicateItemInRelation(Exception):
pass
class TraversalError(Exception):
pass
class ItemContent(object):
""" Stores the actual features of an Item and keeps track of Items
belonging to specific Relations...
This class essentially exists so that actual content referred
to by Items can be shared by Items in different Relations.
"""
def __init__(self):
self.features = {}
self.relations = {}
def add_item_relation(self, item):
""" Adds the given item to the set of relations. Whenever an
Item is added to a Relation, it should add the name and
the Item reference to this set of name/item mappings. This
allows an Item to find out the set of all Relations that
it is contained in.
"""
relationname = item.relation.name
#two items sharing content are not allowed in the same relation
if relationname in self.relations:
raise DuplicateItemInRelation
self.relations[relationname] = item
def remove_item(self, item):
""" Removes Item and deletes self if this was the last Item
referencing this content...
"""
del self.relations[item.relation.name]
def remove(self, remove_dependent_content=False):
""" This function will remove the ItemContent and all
dependent Items...
"""
for relationname in self.relations.keys():
self.relations[relationname].remove(remove_dependent_content)
def __str__(self):
""" Method to sensibly convert object to string lines that can
be used to print HRG structure from higher levels...
"""
return "\n".join([repr(self), str(self.features)])
class Item(object):
""" Represents a node in a Relation...
"""
def __init__(self, relation, itemcontent):
self.relation = relation
self.content = itemcontent
#update ItemContent to be aware of item in this relation:
self.content.add_item_relation(self)
self.next_item = None
self.prev_item = None
self.parent_item = None
self.first_daughter = None
self.last_daughter = None
def __eq__(self, item):
""" Determines if the shared contents of the two items are the
same.
"""
return self.content is item.content
def __ne__(self, item):
return not self.__eq__(item)
def __getitem__(self, featname):
""" Returns the requested feature from itemcontent.
"""
try:
return self.content.features[featname]
except KeyError:
return None
def __setitem__(self, featname, feat):
""" Sets the specific feature in itemcontent.
"""
self.content.features[featname] = feat
def __delitem__(self, featname):
""" Deletes the specific feature in itemcontent.
"""
del self.content.features[featname]
def __iter__(self):
""" Iterate over features.
"""
return self.content.features.__iter__()
def __contains__(self, featname):
""" Contains feature?
"""
return featname in self.content.features
def remove(self, remove_dependent_content=False):
""" This function serves to remove (delete) the current Item
and the corresponding ItemContent if no other Items are
referencing it....
"""
#fix pointers:
if self.relation.head_item is self:
self.relation.head_item = self.next_item
if self.relation.tail_item is self:
self.relation.tail_item = self.prev_item
if self.parent_item:
if self.parent_item.first_daughter is self:
self.parent_item.first_daughter = self.next_item
if self.parent_item.last_daughter is self:
self.parent_item.last_daughter = self.prev_item
if self.next_item:
self.next_item.prev_item = self.prev_item
if self.prev_item:
self.prev_item.next_item = self.next_item
#remove daughters:
for d in self.get_daughters():
if remove_dependent_content:
d.remove_content(remove_dependent_content)
else:
d.remove()
#update/remove ItemContent:
self.content.remove_item(self)
def remove_content(self, remove_dependent_content=True):
""" This function will remove the ItemContent and all Items
sharing...
"""
self.content.remove(remove_dependent_content)
def keys(self):
""" Returns the set of feature keys of this item.
"""
return self.content.features.keys()
def _create_related_item(self, item=None):
""" Create new Item related to self..
"""
if item is None:
newitem = Item(self.relation, ItemContent())
else:
#create new Item sharing content...
newitem = Item(self.relation, item.content)
return newitem
def add_daughter(self, item=None):
""" Add the given item as a daughter to this item..
if item is None then creates new ItemContent...
"""
newitem = self._create_related_item(item)
#if first daughter...
if self.first_daughter is None:
newitem.prev_item = None
self.first_daughter = newitem
else:
newitem.prev_item = self.last_daughter
self.last_daughter.next_item = newitem
newitem.parent_item = self
newitem.next_item = None
self.last_daughter = newitem
return newitem
def append_item(self, item=None):
""" Appends an item in this list after this item.
"""
if self.next_item is None: #then is last item in containing list...
if self.parent_item is not None: #then is daughter..
newitem = self.parent_item.add_daughter(item)
else: #then is in relation directly
newitem = self.relation.append_item(item)
else: #is inserted in the middle of list...
newitem = self._create_related_item(item)
self.next_item.prev_item = newitem
newitem.next_item = self.next_item
self.next_item = newitem
newitem.prev_item = self
newitem.parent_item = self.parent_item
return newitem
def prepend_item(self, item=None):
""" Prepends an item in this list before this item.
"""
newitem = self._create_related_item(item)
if self.prev_item is None: #then is first item in containing list...
if self.parent_item is not None: #then is daughter..
self.parent_item.first_daughter = newitem
else: #then is in relation directly
self.relation.head_item = newitem
else: #is inserted in the middle of list...
self.prev_item.next_item = newitem
newitem.next_item = self
newitem.prev_item = self.prev_item #can be None...
self.prev_item = newitem
newitem.parent_item = self.parent_item
return newitem
def get_item_in_relation(self, relationname):
""" Finds the item in the given relation that has the same
shared contents.
"""
try:
return self.content.relations[relationname]
except KeyError:
return None
def in_relation(self, relationname):
""" Returns true if this item has shared contents linked to an
item in 'relationname'.
"""
return relationname in self.content.relations
####
# This function originally implemented based on similar function in
# EST/Festival, however 'get_daughters' provides comparable utility
# and I think it might be a good idea to diverge from EST/Festival
# convention of indexing from 1 here anyway.
#
# Might revive this function with indexing from 0 if we want an
# implementation that does not create a new list...
####
# def get_daughter(self, n=1):
# """ Retrieves the nth daughter of this item.
# """
# if n < 0:
# i = 1
# item = self.last_daughter
# while item is not None:
# if n == -i:
# return item
# else:
# i += 1
# item = item.prev_item
# elif n > 0:
# i = 1
# item = self.first_daughter
# while item is not None:
# if n == i:
# return item
# else:
# i += 1
# item = item.next_item
# else:
# return None
# return None
def get_daughters(item):
""" Constructs a list of daughters of the current Item and
returns this...
"""
l = []
daughter_item = item.first_daughter
while daughter_item is not None:
l.append(daughter_item)
daughter_item = daughter_item.next_item
return l
def get_utterance(self):
""" Returns the utterance associated with this item.
"""
return self.relation.utterance
def has_daughters(self):
""" Determines if this item has daughters.
"""
return bool(self.first_daughter)
def __str__(self):
""" A method to sensibly convert object to string lines that
can be used to print HRG structure from higher levels...
"""
lines = [repr(self)] + ["\t" + line for line in str(self.content).splitlines()]
#using get_daughters might actually be faster (because of
#string concatenation here...
daughter = self.first_daughter
while daughter is not None:
lines.append("\tDaughter:")
lines += ["\t" + line for line in str(daughter).splitlines()]
daughter = daughter.next_item
return "\n".join(lines)
class Relation(object):
""" Represents an ordered set of Items and their associated
children.
"""
def __init__(self, utterance, relationname):
self.name = relationname
self.utterance = utterance
self.head_item = None
self.tail_item = None
def __iter__(self):
self.iterstart = True
return self
def __next__(self):
if self.iterstart:
self.curr_item = self.head_item
self.iterstart = False
else:
self.curr_item = self.curr_item.next_item
if self.curr_item is None:
self.iterstart = True
raise StopIteration
return self.curr_item
def __len__(self):
c = 0
for i in self:
c += 1
return c
### PYTHON2 ###
def next(self):
return self.__next__()
### PYTHON2 ###
def append_item(self, item=None):
""" Adds a new item to this relation.
"""
if item is None:
newitem = Item(self, ItemContent())
else:
#create new Item sharing content...
newitem = Item(self, item.content)
#if head item...
if self.head_item is None:
newitem.prev_item = None
self.head_item = newitem
else:
newitem.prev_item = self.tail_item
self.tail_item.next_item = newitem
newitem.next_item = None
self.tail_item = newitem
return newitem
#we could still implement a prepend_item
def as_list(self):
""" Creates a list of Items in this Relation and returns
this..
"""
return list(self)
def __str__(self):
""" A method to sensibly convert object to string lines that
can be used to print HRG structure from higher levels...
"""
lines = [repr(self)]
item = self.head_item
while item is not None:
lines.append("\tItem:")
lines += ["\t" + line for line in str(item).splitlines()]
item = item.next_item
return "\n".join(lines)
class Utterance(object):
""" An Utterance contains a set of Features (essentially a set of
properties) and a set of Relations.
"""
def __init__(self, voice=None):
""" Creates a new, empty utterance.
"""
self.voice = voice
self.features = {}
self.relations = {}
def __getstate__(self):
""" When pickling, we sever the link to the voice...
"""
return (self.features, self.relations)
def __setstate__(self, state):
self.voice = None
(self.features,
self.relations) = state
def __getitem__(self, featname):
""" Returns the requested feature.
"""
try:
return self.features[featname]
except KeyError:
return None
def __setitem__(self, featname, feat):
""" Sets the specific feature.
"""
self.features[featname] = feat
def __delitem__(self, featname):
""" Deletes the specific feature.
"""
del self.features[featname]
def __iter__(self):
""" Iterate over features.
"""
return self.features.__iter__()
def __contains__(self, featname):
""" Contains feature?
"""
return featname in self.features
def new_relation(self, relationname):
""" Creates a new relation with the given name and adds it to
this utterance.
"""
newrelation = Relation(self, relationname)
self.relations[relationname] = newrelation
return newrelation
def get_relation(self, relationname):
""" Retrieves a relation from this utterance.
"""
try:
return self.relations[relationname]
except KeyError:
return None
def __str__(self):
""" This is a temporary method to sensibly convert object to
string lines that can be used to print HRG structure...
"""
lines = [repr(self), str(self.features)]
for relationname in self.relations:
lines.append("\tRelation %s:" % (relationname))
lines += ["\t" + line for line in str(self.get_relation(relationname)).splitlines()]
return "\n".join(lines)
# Convenience functions for HRG traversal... should be moved to
# ifuncs.py once pytts.extend has been improved...
############################################################
def first_item(item):
""" Returns the first item in the list of items linked to
'item'...
"""
if item.prev_item:
return first_item(item.prev_item)
else:
return item
def last_item(item):
""" Returns the last item in the list of items linked to
'item'...
"""
if item.next_item:
return last_item(item.next_item)
else:
return item
def traverse(item, pathstring):
""" pathstring e.g.
"n.R:SylStructure.parent.p.daughter.last.daughtern.first.F:name"
TO BE IMPROVED!!! DEMITASSE: this seems to break with special strings (unicode issue...)
"""
mapping = {"n": ".next_item",
"p": ".prev_item",
"parent": ".parent_item",
"daughter": ".first_daughter",
"daughtern": ".last_daughter",
"first": ".first_item()",
"last": ".last_item()",
"R": ".get_item_in_relation('%s')",
"F": "['%s']",
"M": ".%s"
}
pathlist = pathstring.split(".")
cmdstring = "item"
for step in pathlist:
if step.startswith("R:") or step.startswith("F:") or step.startswith("M:"):
a, b = step.split(":")
cmdstring += mapping[a] % b
else:
cmdstring += mapping[step]
try:
return eval(cmdstring)
except (TypeError, AttributeError):
raise TraversalError
def num_daughters(item):
count = 0
daughter_item = item.first_daughter
while daughter_item is not None:
count += 1
daughter_item = daughter_item.next_item
return count
#extend Item:
#########################
Item.first_item = first_item
Item.last_item = last_item
Item.traverse = traverse
Item.num_daughters = num_daughters
#HRG method shorthand forms:
#############################
#This exists to make interactive work easier (not intended to be used in serious code)..
Item.ad = Item.add_daughter
Item.gd = Item.get_daughters
Item.ai = Item.append_item
Item.pi = Item.prepend_item
Item.ir = Item.in_relation
Item.gir = Item.get_item_in_relation
Relation.al = Relation.as_list
Relation.ai = Relation.append_item
Utterance.gr = Utterance.get_relation
if __name__ == "__main__":
import hrg
utt = hrg.Utterance()
utt["text"] = "mathematics is easy"
word1 = [['m', 'ae', 'th'], ['ax'], ['m', 'ae'], ['t', 'ih', 'k', 's']]
word2 = [['ih', 'z']]
word3 = [['ii'], ['z', 'ih']]
all_words = [word1, word2, word3]
wordrel = utt.new_relation("Words")
sylrel = utt.new_relation("Syllable")
segmentrel = utt.new_relation("Segment")
sylstructrel = utt.new_relation("SylStructure")
# add words
for word in utt["text"].split():
tmpitem = wordrel.append_item()
tmpitem["name"] = word
# iterate over words
for i, wordrel_worditem in enumerate(wordrel):
sylstructrel_worditem = sylstructrel.append_item(wordrel_worditem)
word = all_words[i]
for syl in word:
sylrel_sylitem = sylrel.append_item()
sylrel_sylitem["name"] = "syl"
sylstructrel_sylitem = sylstructrel_worditem.add_daughter(sylrel_sylitem)
for seg in syl:
segmentrel_segmentitem = segmentrel.append_item()
segmentrel_segmentitem["name"] = seg
sylrel_segmentitem = sylstructrel_sylitem.add_daughter(segmentrel_segmentitem)
print("'name' in tmpitem: ", "name" in tmpitem)
print("'blah' in tmpitem: ", "blah" in tmpitem)
print("iterate over featnames in tmpitem:")
for featname in tmpitem:
print("\t" + featname + ": " + tmpitem[featname])
print("deleting 'name'")
del tmpitem["name"]
print("'name' in tmpitem: ", "name" in tmpitem)
print("")
print(utt)
```
#### File: ttslab/trackfile/trackfile.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import os
import numpy as np
import scipy.io.wavfile
#from trackfile package:
import io.htk
#recognised file extensions:
WAV_EXT = "wav"
EST_EXT = "est"
class FileFormatError(Exception):
pass
class Track(object):
""" assert len(self.times) == self.values.shape[0]
"""
def __init__(self, name=None):
self.values = np.array([]).reshape((0,0))
self.times = np.array([])
if name is not None:
self._name = name
def __len__(self):
return len(self.times)
def __str__(self):
return "\n".join(["name: " + self.name,
"numchannels: " + str(self.numchannels),
"numframes: " + str(self.numframes),
"starttime: " + str(self.starttime),
"endtime: " + str(self.endtime)])
@property
def name(self):
try:
return self._name
except AttributeError:
return ""
@name.setter
def name(self, name):
self._name = name
@property
def numframes(self):
return len(self)
@property
def numchannels(self):
numframes, numchannels = self.values.shape
return numchannels
@property
def starttime(self):
try:
return self._starttime
except AttributeError:
return 0.0
@starttime.setter
def starttime(self, starttime):
self._starttime = starttime
@property
def endtime(self):
try:
return self._endtime
except AttributeError:
return self.times[-1]
######################################## FILE IO METHODS
def load_wave(self, filepath):
""" loads a RIFF wave file...
"""
sr, s = scipy.io.wavfile.read(filepath)
if len(s.shape) == 1:
self.values = s.reshape(-1, 1)
else:
self.values = s
self.times = np.arange(len(self.values)) * (sr**-1)
self.name = os.path.basename(filepath)
def load_binary(self, filepath, numchannels, timestep, dtype='float32'):
""" loads from binary file...
"""
self.values = np.fromfile(filepath, dtype=dtype)
self.values = np.reshape(self.values, (-1, numchannels))
self.times = np.arange(len(self.values)) * timestep
self.name = os.path.basename(filepath)
def load_htk(self, filepath, windowsize):
""" DEMITASSE: I need to review io.htk.HTKFeatureFile
implementation...
"""
h = io.htk.HTKFeatureFile(filepath)
self.times = np.array(map(io.htk.htk_int_to_float, h.central_times(io.htk.float_to_htk_int(windowsize))))
self.values = np.array(h.observations)
self.name = os.path.basename(filepath)
#DEMITASSE remove this when code reviewed:
assert len(self.values.shape) == 2
def load_track(self, filepath):
"""Reads an Edinburgh Speech Tools ASCII Track file (ignores
'Breaks')...
DEMITASSE: need to review this...
"""
firstline = True
headerend = False
breakspresent = False
breakvals = []
values = []
times = []
with open(filepath) as infh:
for line in infh:
if firstline:
firstline = False
if line.split() != ["EST_File", "Track"]:
raise FileFormatError("File is not an EST_Track file...")
else:
if not headerend:
linelist = line.split()
if linelist[0] == "DataType" and linelist[1] != "ascii":
raise FileFormatError("File is not in ASCII format...")
if linelist[0] == "NumFrames":
numframes = int(linelist[1])
if linelist[0] == "NumChannels":
numchannels = int(linelist[1])
if linelist[0] == "BreaksPresent" and linelist[1].lower() == "true":
breakspresent = True
if linelist[0] == "EST_Header_End":
headerend = True
else:
linelist = line.split()
if breakspresent:
time, breakval, vals = float(linelist[0]), float(linelist[1]), map(float, linelist[2:])
times.append(time)
breakvals.append(breakval)
values.append(vals)
else:
time, vals = float(linelist[0]), map(float, linelist[1:])
times.append(time)
values.append(vals)
self.times = np.array(times)
self.values = np.array(values)
#sanity check...
if self.values.shape != (numframes, numchannels):
raise FileFormatError("Data does not match header info...")
self.name = os.path.basename(filepath)
#DEMITASSE remove this when reviewed:
assert len(self.values.shape) == 2
######################################## EDIT METHODS
def zero_starttime(self):
self.times -= self.starttime
if hasattr(self, "_endtime"):
self._endtime -= self.starttime
if hasattr(self, "_starttime"):
self._starttime = 0.0
def slice(self, idxa, idxb, copy=True):
""" returns a new track sliced using provided indices (like
Python list slicing)... copy=False makes use of Numpy
views to share data/memory...
"""
t = Track()
if copy:
t.values = self.values[idxa:idxb].copy()
t.times = self.times[idxa:idxb].copy()
else:
t.values = self.values[idxa:idxb]
t.times = self.times[idxa:idxb]
return t
def index_at(self, time, method="round"):
""" Returns the index of the closest sample to 'time'...
method in ['round', 'ceil', 'floor']
"""
diffs = self.times - time
if method == "round":
return np.abs(diffs).argmin()
elif method == "ceil":
pdiffi = np.flatnonzero(diffs > 0.0)
try:
return pdiffi[0]
except IndexError:
return len(diffs)
elif method == "floor":
ndiffi = np.flatnonzero(diffs < 0.0)
try:
return ndiffi[-1]
except IndexError:
return 0
else:
raise Exception("Unsupported method: %s" % method)
# INCLUDE WHEN UNITTESTS IMPLEMENTED
# def index_at(self, time):
# """ Returns the index of the closest sample to time... faster
# due to taking into account ordering of time (TEST LATER).
# """
# from bisect import bisect_left #move out when using this function...
# if time < self.starttime:
# return 0
# elif time > self.endtime:
# return len(self) - 1
# else:
# i = bisect_left(self.times, time)
# if self.times[i] - time > 0.5:
# i -= 1
# return i
```
#### File: ttslab/voices/afrikaans_default.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import re
from collections import OrderedDict
from .. phoneset import Phoneset
from .. defaultvoice import LwaziHTSVoice, LwaziPromHTSVoice
from .. synthesizer_htsme import SynthesizerHTSME
import ttslab.hts_labels_prom as hts_labels_prom
class LwaziAfrikaansPhoneset(Phoneset):
""" The clusters and syllabification are ripped from the English
implementation and should be revisited...
"""
def __init__(self):
#Phoneset.__init__(self)
#syllable_clusters are processed in order, thus a list, not a set...
self.features = {"name": "Lwazi Afrikaans Phoneset",
"syllable_clusters": ["VCV", "VCCV", "VCCCV", "VCCCCV",
"VCGV", "VCCGV", "VCCCGV", "VV"],
"wellformed_plosive_clusters": [["p","l"], ["b","l"], ["k","l"], ["g","l"], ["p","r"],
["b","r"], ["t","r"], ["d","r"], ["k","r"], ["g","r"],
["t","w"], ["d","w"], ["g","w"], ["k","w"]],
"wellformed_fricative_clusters": [["f","l"], ["f","r"], ["f","j"], ["ʃ","j"]],
"wellformed_other_clusters": [["m","j"], ["n","j"]],
"wellformed_s_clusters": [["s","p"], ["s","t"], ["s","k"], ["s","m"], ["s","n"],
["s","f"], ["s","w"], ["s","l"], ["s","p","l"],
["s","p","r"], ["s","t","r"], ["s","k","l"],
["s","k","r"], ["s","k","w"]]
}
self.features["wellformed_clusters"] = (self.features["wellformed_plosive_clusters"] +
self.features["wellformed_fricative_clusters"] +
self.features["wellformed_other_clusters"] +
self.features["wellformed_s_clusters"])
self.features["silence_phone"] = "pau"
self.features["closure_phone"] = "paucl"
self.phones = {"pau" : set(["pause"]),
"paucl" : set(["closure"]),
"ʔ" : set(["glottal-stop"]),
"ə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_central"]),
"əi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"a" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_back"]),
"ai" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"ɛ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"œ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front", "articulation_rounded"]),
"əu" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"œy" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"ŋ" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_velar", "voiced"]),
"ɔ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"ɔi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"ʃ" : set(["class_consonantal", "consonant", "manner_fricative", "place_post-alveolar"]),
"ʒ" : set(["class_consonantal", "consonant", "manner_fricative", "place_post-alveolar", "voiced"]),
"æ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front"]),
"ɑː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_low", "position_back"]),
"ɑːi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"b" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial", "voiced"]),
"d" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar", "voiced"]),
"iə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_mid", "position_front"]),
"øː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_mid", "position_front", "articulation_rounded"]),
"f" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental"]),
"g" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar", "voiced"]),
"ɦ" : set(["consonant", "manner_fricative", "place_glottal", "voiced"]),
"i" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"iu" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"j" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_palatal", "voiced"]),
"k" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar"]),
"l" : set(["class_sonorant", "class_consonantal", "consonant", "manner_approximant", "manner_liquid", "manner_lateral", "place_alveolar", "voiced"]),
"m" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_bilabial", "voiced"]),
"n" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_alveolar", "voiced"]),
"uə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_mid", "position_back", "articulation_rounded"]),
"uəi" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"p" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial"]),
"r" : set(["class_sonorant", "class_consonantal", "consonant", "manner_trill", "place_alveolar", "voiced"]),
"s" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar"]),
"t" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar"]),
"tʃ" : set(["class_consonantal", "consonant", "manner_affricate", "place_alveolar"]),
"u" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back"]),
"ui" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"v" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental", "voiced"]),
"w" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_labial", "place_velar", "voiced"]),
"x" : set(["class_consonantal", "consonant", "manner_fricative", "place_velar"]),
"y" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"z" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar", "voiced"])
}
self.map = {"pau":"pau",
"paucl":"paucl",
"ʔ":"paugs",
"ə":"q", #sin
"əi":"qi", #wyn
"a":"a", #man
"ai":"ai", #katjie
"ɛ":"E", #ken
"œ":"qoeq", #mus
"əu":"qu", #bou
"œy":"qoeqy", #huis
"ŋ":"N", #sing
"ɔ":"O", #son
"ɔi":"Oi", #potjie
"ʃ":"S", #chef
"ʒ":"Z", #mirage
"æ":"qaeq", #ek
"ɑː":"AA", #aan
"ɑːi":"AAi", #saai
"b":"b",
"d":"d",
"iə":"iq", #seer
"øː":"qooq", #seun
"f":"f",
"g":"g",
"ɦ":"hq",
"i":"i", #sien
"iu":"iu", #meeu
"j":"j",
"k":"k",
"l":"l",
"m":"m",
"n":"n",
"uə":"uq", #room
"uəi":"uqi", #rooi
"p":"p",
"r":"r",
"s":"s",
"t":"t",
"tʃ":"tS", #tjek
"u":"u", #boek
"ui":"ui", #boei
"v":"v", #wens
"w":"w", #twee
"x":"x", #gee
"y":"y", #muur
"z":"z",
"xxx":"xxx"
}
def is_plosive(self, phonename):
return "manner_plosive" in self.phones[phonename]
def is_voiced(self, phonename):
return ("voiced" in self.phones[phonename] or
"vowel" in self.phones[phonename])
def is_obstruent(self, phonename):
return ("class_consonantal" in self.phones[phonename] and
"class_sonorant" not in self.phones[phonename] and
"class_syllabic" not in self.phones[phonename])
def is_vowel(self, phonename):
return "vowel" in self.phones[phonename]
def is_glide(self, phonename):
return "manner_glide" in self.phones[phonename]
def is_liquid(self, phonename):
return "manner_liquid" in self.phones[phonename]
def is_syllabicconsonant(self, phonename):
return "class_syllabic" in self.phones[phonename] and "consonant" in self.phones[phonename]
def is_fricative(self, phonename):
return "manner_fricative" in self.phones[phonename]
def is_nasal(self, phonename):
return "manner_nasal" in self.phones[phonename]
def sonority_level(self, phonename):
""" Assigns levels of sonority to phones based on their nature...
"""
if self.is_vowel(phonename):
if "height_low" in self.phones[phonename]:
return 9
if "height_mid" in self.phones[phonename]:
return 8
if "height_high" in self.phones[phonename]:
return 7
if self.is_liquid(phonename):
return 6
if self.is_nasal(phonename):
return 5
if self.is_fricative(phonename):
if self.is_voiced(phonename):
return 4
else:
return 3
if self.is_plosive(phonename):
if self.is_voiced(phonename):
return 2
else:
return 1
return 0
def _process_cluster(self, cluster, phonelist, match):
""" Break cluster into syllables according to the rules defined by
<NAME>, "English syllabification as the interaction of
markedness constraints" in Studia Linguistica, vol. 60, 2006,
pp. 1-33
Need to refactor the if statements to make clearer/simpler...
Implementation for English... needs to be revisited...
"""
phonecluster = phonelist[match.start() : match.end()]
if cluster == "VCV":
#always split -> V.CV:
return "V.CV"
if cluster == "VCCV":
CC = phonecluster[1:3]
#if CC cluster is Tautosyllabic -> V.CCV:
if ((CC in self.features["wellformed_clusters"] and
self.sonority_level(CC[1]) > self.sonority_level(CC[0])) or
(CC[0] == "s" and
self.is_plosive(CC[1]) and
not self.is_voiced(CC[1]))):
return "V.CCV"
#if CC cluster is Heterosyllabic -> VC.CV:
if ((self.sonority_level(CC[1]) < self.sonority_level(CC[0])) or
(self.sonority_level(CC[1]) == self.sonority_level(CC[0])) or
(CC not in self.features["wellformed_clusters"] and
self.sonority_level(CC[1]) > self.sonority_level(CC[0]))):
return "VC.CV"
if cluster == "VCCCV":
CCC = phonecluster[1:4]
C2C3 = CCC[1:]
#if CCC are all obstruents -> VC.CCV:
if all([self.is_obstruent(C) for C in CCC]):
return "VC.CCV"
#if C2C3 are wellformed onsets -> VC.CCV:
if C2C3 in self.features["wellformed_clusters"]:
return "VC.CCV"
else:
return "VCC.CV"
if cluster == "VCCCCV":
#always split -> VC.CCCV:
return "VC.CCCV"
if cluster == "VCGV":
CG = phonecluster[1:3]
if not self.is_plosive(CG[0]): #C not a stop
return "VC.GV"
else:
if CG not in self.features["wellformed_clusters"]: #C a stop and CG not wellformed
return "VC.GV"
else:
return "V.CGV" #C a stop and CG wellformed
if cluster == "VCCGV":
CCG = phonecluster[1:4]
if CCG[0] == "s":
return "V.CCGV"
else:
return "VC.CGV"
if cluster == "VCCCGV":
return "VC.CCGV"
if cluster == "VV": #not described in the Hall paper...
return "V.V"
def syllabify(self, phonelist):
""" Classes:
C -> Consonant,
V -> Short/Long Vowel/Syllabic sonorant/Diphthong
G -> Glide
"""
#make a copy (to be edited internally)
plist = list(phonelist)
#first construct string representing relevant classes...
classstr = ""
for phone in plist:
if self.is_vowel(phone):
classstr += "V"
elif self.is_glide(phone):
classstr += "G"
else:
classstr += "C"
#Begin Aby's hacks:
# - Change the last phoneclass under certain conditions..
try:
if (self.is_syllabicconsonant(plist[-1]) and
self.is_obstruent(plist[-2])):
classstr = classstr[:-1] + "V"
if (self.is_syllabicconsonant(plist[-1]) and
self.is_nasal(plist[-2])):
classstr = classstr[:-1] + "V"
except IndexError:
pass
#End Aby's hacks...
#find syllable_clusters in order and apply syllabification
#process on each...this should be redone... FIXME!!!
for cluster in self.features["syllable_clusters"]:
match = re.search(cluster, classstr)
while match:
#syllabify cluster
clustersylstr = self._process_cluster(cluster, plist, match)
#update classstr...
start, end = match.span()
classstr = clustersylstr.join([classstr[:start], classstr[end:]])
plist = (plist[:match.start() + clustersylstr.index(".")] +
[""] + plist[match.start() + clustersylstr.index("."):])
#next match...
match = re.search(cluster, classstr)
sylls = [[]]
index = 0
for char in classstr:
if char != ".":
sylls[-1].append(phonelist[index])
index += 1
else:
sylls.append([])
return sylls
class LwaziAfrikaans_simpleGPOS_HTSVoice(LwaziPromHTSVoice):
""" GPOS from Festival English example...
"""
PREPOSITIONS = ["in", "van", "vir", "op", "daardie", "met",
"by", "vanaf", "as", "teen", "voor", "onder",
"na", "oor", "terwyl", "sonder", "dat", "deur",
"tussen", "per", "af", "langs", "hierdie", "naas"]
DETERMINERS = ["die", "n", "geen", "nie", "elke", "nog", "al",
"enige", "beide", "baie"]
MODAL = ["sal", "wil", "mag", "sou", "wou", "moet", "wees"]
CONJUNCTIONS = ["en", "maar", "omdat", "want", "of"]
INTERROGATIVE_PRONOUNS = ["wie", "wat", "watter", "waar", "hoe", "wanneer", "hoekom"]
PERSONAL_PRONOUNS = ["haar", "sy", "hulle", "hul", "ons", "syne", "myne", "hare"]
AUXILIARY_VERBS = ["is", "het"]
GPOS = dict([(word, "prep") for word in PREPOSITIONS] +
[(word, "det") for word in DETERMINERS] +
[(word, "md") for word in MODAL] +
[(word, "cc") for word in CONJUNCTIONS] +
[(word, "wp") for word in INTERROGATIVE_PRONOUNS] +
[(word, "pps") for word in PERSONAL_PRONOUNS] +
[(word, "aux") for word in AUXILIARY_VERBS])
def __init__(self, phoneset, g2p, pronundict, pronunaddendum, synthesizer):
LwaziHTSVoice.__init__(self,
phoneset=phoneset,
g2p=g2p,
pronundict=pronundict,
pronunaddendum=pronunaddendum,
synthesizer=synthesizer)
self.processes = {"text-to-words": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None)]),
"text-to-segments": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None),
("phonetizer", None),
("pauses", None)]),
"text-to-label": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None),
("phonetizer", None),
("pauses", None),
("synthesizer", "label_only")]),
"text-to-wave": OrderedDict([("tokenizer", "default"),
("normalizer", "default"),
("gpos", None),
("phrasifier", None),
("phonetizer", None),
("pauses", None),
("synthesizer", "label_and_synth")]),
"utt-to-label": OrderedDict([("synthesizer", "label_only")]),
"utt-to-wave": OrderedDict([("synthesizer", "label_and_synth")])}
def gpos(self, utt, processname):
word_rel = utt.get_relation("Word")
for word_item in word_rel:
if word_item["name"] in self.GPOS:
word_item["gpos"] = "nc"
else:
word_item["gpos"] = "c"
return utt
class SynthesizerHTSME_Prominence(SynthesizerHTSME):
def hts_label(self, utt, processname):
lab = []
starttime = 0
for phone_item in utt.get_relation("Segment"):
if "end" in phone_item:
endtime = hts_labels_prom.float_to_htk_int(phone_item["end"])
else:
endtime = None
phlabel = [hts_labels_prom.p(phone_item),
hts_labels_prom.a(phone_item),
hts_labels_prom.b(phone_item),
hts_labels_prom.c(phone_item),
hts_labels_prom.d(phone_item),
hts_labels_prom.e(phone_item),
hts_labels_prom.f(phone_item),
hts_labels_prom.g(phone_item),
hts_labels_prom.h(phone_item),
hts_labels_prom.i(phone_item),
hts_labels_prom.j(phone_item)]
if endtime is not None:
lab.append("%s %s " % (str(starttime).rjust(10), str(endtime).rjust(10)) + "/".join(phlabel))
else:
lab.append("/".join(phlabel))
starttime = endtime
utt["hts_label"] = lab
return utt
```
#### File: ttslab/voices/bomu_default.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import re
from .. phoneset import Phoneset
class BomuPhoneset(Phoneset):
""" Based on Bomu description and data received from Stephane
Boyera...
DEMITASSE: check again later when the phoneset/language is more familiar!
"""
def __init__(self):
Phoneset.__init__(self)
self.features = {"name": "Bomu Phoneset",
"silence_phone": "pau",
"closure_phone": "pau_cl"
}
self.phones = {"pau" : set(["pause"]),
"pau_cl" : set(["closure"]),
#vowels
"a" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front"]),
"e" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"ɛ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"i" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"o" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"u" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back"]),
#nasalised
"ã" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front"]),
"ẽ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"ɛ̃" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"ĩ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"õ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"ũ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back"]),
#long
"aː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front"]),
"eː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"ɛː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"iː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"oː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"uː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back"]),
#nasalised long
"ãː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front"]),
"ẽː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"ɛ̃ː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"ĩː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"õː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"ũː" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back"]),
#consonants
"ʔ" : set(["class_consonantal", "consonant", "manner_plosive", "place_glottal"]),
"b" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial", "voiced"]),
"β" : set(["class_consonantal", "consonant", "manner_fricative", "place_bilabial", "voiced"]),
"tʃ" : set(["class_consonantal", "consonant", "manner_affricate", "manner_strident", "place_alveolar", "place_post-alveolar"]),
"d" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar", "voiced"]),
"f" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental"]),
"g" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar", "voiced"]),
"h" : set(["consonant", "manner_fricative", "place_glottal"]),
"k" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar"]),
"l" : set(["class_sonorant", "class_consonantal", "consonant", "manner_approximant", "manner_liquid", "manner_lateral", "place_alveolar", "voiced"]),
"m" : set(["class_sonorant", "class_syllabic", "class_consonantal", "consonant", "manner_nasal", "place_bilabial", "voiced"]),
"n" : set(["class_sonorant", "class_syllabic", "class_consonantal", "consonant", "manner_nasal", "place_alveolar", "voiced"]),
"ɲ" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_palatal", "voiced"]),
"p" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial"]),
"r" : set(["class_sonorant", "class_consonantal", "consonant", "manner_trill", "place_alveolar", "voiced"]),
"s" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar"]),
"t" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar"]),
"v" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental", "voiced"]),
"w" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_labial", "place_velar", "voiced"]),
"j" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_palatal", "voiced"]),
"z" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar", "voiced"])
}
self.map = {"pau" : "pau",
"pau_cl" : "pau_cl",
"a" : "a",
"e" : "e",
"ɛ" : "E",
"i" : "i",
"o" : "o",
"u" : "u",
"ã" : "an",
"ẽ" : "en",
"ɛ̃" : "En",
"ĩ" : "in",
"õ" : "on",
"ũ" : "un",
"ʔ" : "pau_gs",
"b" : "b",
"β" : "B",
"tʃ" : "tS",
"d" : "d",
"f" : "f",
"g" : "g",
"h" : "h",
"k" : "k",
"l" : "l",
"m" : "m",
"n" : "n",
"ɲ" : "J",
"p" : "p",
"r" : "r",
"s" : "s",
"t" : "t",
"v" : "v",
"w" : "w",
"j" : "j",
"z" : "z"
}
def is_vowel(self, phonename):
return "vowel" in self.phones[phonename]
def is_consonant(self, phonename):
return "consonant" in self.phones[phonename]
def is_syllabicconsonant(self, phonename):
return "class_syllabic" in self.phones[phonename] and "consonant" in self.phones[phonename]
def syllabify(self, phonelist):
""" Basic syllabification, based on the syllabification scheme
devised by <NAME> for isiZulu (Nguni language).
"""
sylls = [[]]
phlist = list(phonelist)
while phlist:
phone = phlist[0]
try:
nphone = phlist[1]
nnphone = phlist[2]
#Syllabic consonant followed by C:
if (self.is_syllabicconsonant(phone) and
self.is_consonant(nphone)):
#sC.C
sylls[-1].append(phlist.pop(0))
if phlist: sylls.append([])
continue
#If there is a three phone cluster:
if (self.is_vowel(phone) and
not self.is_vowel(nphone) and
not self.is_vowel(nnphone)):
#VC.C
sylls[-1].append(phlist.pop(0))#phone
sylls[-1].append(phlist.pop(0))#nphone
if phlist: sylls.append([])
continue
except IndexError:
pass
if self.is_vowel(phone):
#V.Any
sylls[-1].append(phlist.pop(0))
if phlist: sylls.append([])
continue
#anything not caught above is added to current syl...
sylls[-1].append(phlist.pop(0))
return sylls
```
#### File: ttslab/voices/english_default.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import re
from .. phoneset import Phoneset
class LwaziEnglishPhoneset(Phoneset):
""" Based on MRPA...
"""
def __init__(self):
#Phoneset.__init__(self)
#syllable_clusters are processed in order, thus a list, not a set...
self.features = {"name": "Lwazi English Phoneset",
"syllable_clusters": ["VCV", "VCCV", "VCCCV", "VCCCCV",
"VCGV", "VCCGV", "VCCCGV", "VV"],
"wellformed_plosive_clusters": [["p","l"], ["b","l"], ["k","l"], ["g","l"], ["p","ɹ"],
["b","ɹ"], ["t","ɹ"], ["d","ɹ"], ["k","ɹ"], ["g","ɹ"],
["t","w"], ["d","w"], ["g","w"], ["k","w"], ["p","j"],
["b","j"], ["t","j"], ["d","j"], ["k","j"], ["g","j"]],
"wellformed_fricative_clusters": [["f","l"], ["f","ɹ"], ["θ","ɹ"], ["ʃ","ɹ"],
["θ","w"], ["h","w"], ["f","j"], ["v","j"],
["θ","j"], ["z","j"], ["h","j"]],
"wellformed_other_clusters": [["m","j"], ["n","j"], ["l","j"]],
"wellformed_s_clusters": [["s","p"], ["s","t"], ["s","k"], ["s","m"], ["s","n"],
["s","f"], ["s","w"], ["s","l"], ["s","j"], ["s","p","l"],
["s","p","ɹ"], ["s","p","j"], ["s","m","j"], ["s","t","ɹ"],
["s","t","j"], ["s","k","l"], ["s","k","ɹ"], ["s","k","w"],
["s","k","j"]]
}
self.features["wellformed_clusters"] = (self.features["wellformed_plosive_clusters"] +
self.features["wellformed_fricative_clusters"] +
self.features["wellformed_other_clusters"] +
self.features["wellformed_s_clusters"])
self.features["silence_phone"] = "pau"
self.features["closure_phone"] = "pau_cl"
self.phones = {"pau" : set(["pause"]),
"pau_cl" : set(["closure"]),
"ʔ" : set(["glottal-stop"]),
"ə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_central"]),
"ɜ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_mid", "position_central"]),
"a" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front"]),
"ɑ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_low", "position_back"]),
"aɪ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"aʊ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"b" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial", "voiced"]),
"tʃ" : set(["class_consonantal", "consonant", "manner_affricate", "manner_strident", "place_alveolar", "place_post-alveolar"]),
"d" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar", "voiced"]),
"ð" : set(["class_consonantal", "consonant", "manner_fricative", "place_dental", "voiced"]),
"ɛ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"ɛə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"eɪ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"f" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental"]),
"g" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar", "voiced"]),
"h" : set(["consonant", "manner_fricative", "place_glottal"]),
"ɪ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"ɪə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"i" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"dʒ" : set(["class_consonantal", "consonant", "manner_affricate", "manner_strident", "place_alveolar", "place_post-alveolar", "voiced"]),
"k" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar"]),
"l" : set(["class_sonorant", "class_consonantal", "consonant", "manner_approximant", "manner_liquid", "manner_lateral", "place_alveolar", "voiced"]),
"m" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_bilabial", "voiced"]),
"n" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_alveolar", "voiced"]),
"ŋ" : set(["class_sonorant", "class_consonantal", "consonant", "manner_nasal", "place_velar", "voiced"]),
"ɒ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_back", "articulation_rounded"]),
"ɔɪ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"ɔ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"əʊ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"p" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial"]),
"ɹ" : set(["class_sonorant", "class_consonantal", "consonant", "manner_approximant", "manner_liquid", "place_alveolar", "voiced"]),
"s" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar"]),
"ʃ" : set(["class_consonantal", "consonant", "manner_fricative", "place_post-alveolar"]),
"t" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar"]),
"θ" : set(["class_consonantal", "consonant", "manner_fricative", "place_dental"]),
"ʊ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back", "articulation_rounded"]),
"ʊə" : set(["class_sonorant", "class_syllabic", "vowel", "duration_diphthong"]),
"ʌ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back"]),
"u" : set(["class_sonorant", "class_syllabic", "vowel", "duration_long", "height_high", "position_back", "articulation_rounded"]),
"v" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental", "voiced"]),
"w" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_labial", "place_velar", "voiced"]),
"j" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_palatal", "voiced"]),
"z" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar", "voiced"]),
"ʒ" : set(["class_consonantal", "consonant", "manner_fricative", "place_post-alveolar", "voiced"])
}
self.map = {"pau":"pau",
"pau_cl":"pau_cl",
"ʔ":"pau_gs",
"ə":"_", #about
"ɜ":"__", #bird
"a":"a", #bad
"ɑ":"aa", #bard
"aɪ":"ai", #buy
"aʊ":"au", #cow
"b":"b",
"tʃ":"ch", #chin
"d":"d",
"ð":"dh", #then
"ɛ":"e", #bed
"ɛə":"e_", #bare
"eɪ":"ei", #bay
"f":"f",
"g":"g",
"h":"h",
"ɪ":"i", #bid
"ɪə":"i_", #beer
"i":"ii", #bead
"dʒ":"jh", #edge
"k":"k",
"l":"l",
"m":"m",
"n":"n",
"ŋ":"ng", #sing
"ɒ":"o", #pot
"ɔɪ":"oi", #boy
"ɔ":"oo", #port
"əʊ":"ou", #go
"p":"p",
"ɹ":"r", #ray
"s":"s",
"ʃ":"sh", #she
"t":"t",
"θ":"th", #thin
"ʊ":"u", #put
"ʊə":"u_", #poor
"ʌ":"uh", #bud
"u":"uu", #boot
"v":"v",
"w":"w",
"j":"y", #yes
"z":"z",
"ʒ":"zh" #beige
}
def is_plosive(self, phonename):
return "manner_plosive" in self.phones[phonename]
def is_voiced(self, phonename):
return ("voiced" in self.phones[phonename] or
"vowel" in self.phones[phonename])
def is_obstruent(self, phonename):
return ("class_consonantal" in self.phones[phonename] and
"class_sonorant" not in self.phones[phonename] and
"class_syllabic" not in self.phones[phonename])
def is_vowel(self, phonename):
return "vowel" in self.phones[phonename]
def is_glide(self, phonename):
return "manner_glide" in self.phones[phonename]
def is_liquid(self, phonename):
return "manner_liquid" in self.phones[phonename]
def is_syllabicconsonant(self, phonename):
return "class_syllabic" in self.phones[phonename] and "consonant" in self.phones[phonename]
def is_fricative(self, phonename):
return "manner_fricative" in self.phones[phonename]
def is_nasal(self, phonename):
return "manner_nasal" in self.phones[phonename]
def sonority_level(self, phonename):
""" Assigns levels of sonority to phones based on their nature...
"""
if self.is_vowel(phonename):
if "height_low" in self.phones[phonename]:
return 9
if "height_mid" in self.phones[phonename]:
return 8
if "height_high" in self.phones[phonename]:
return 7
if self.is_liquid(phonename):
return 6
if self.is_nasal(phonename):
return 5
if self.is_fricative(phonename):
if self.is_voiced(phonename):
return 4
else:
return 3
if self.is_plosive(phonename):
if self.is_voiced(phonename):
return 2
else:
return 1
return 0
def _process_cluster(self, cluster, phonelist, match):
""" Break cluster into syllables according to the rules defined by
T.A. Hall, "English syllabification as the interaction of
markedness constraints" in Studia Linguistica, vol. 60, 2006,
pp. 1-33
Need to refactor the if statements to make clearer/simpler...
"""
phonecluster = phonelist[match.start() : match.end()]
if cluster == "VCV":
#always split -> V.CV:
return "V.CV"
if cluster == "VCCV":
CC = phonecluster[1:3]
#if CC cluster is Tautosyllabic -> V.CCV:
if ((CC in self.features["wellformed_clusters"] and
self.sonority_level(CC[1]) > self.sonority_level(CC[0])) or
(CC[0] == "s" and
self.is_plosive(CC[1]) and
not self.is_voiced(CC[1]))):
return "V.CCV"
#if CC cluster is Heterosyllabic -> VC.CV:
if ((self.sonority_level(CC[1]) < self.sonority_level(CC[0])) or
(self.sonority_level(CC[1]) == self.sonority_level(CC[0])) or
(CC not in self.features["wellformed_clusters"] and
self.sonority_level(CC[1]) > self.sonority_level(CC[0]))):
return "VC.CV"
if cluster == "VCCCV":
CCC = phonecluster[1:4]
C2C3 = CCC[1:]
#if CCC are all obstruents -> VC.CCV:
if all([self.is_obstruent(C) for C in CCC]):
return "VC.CCV"
#if C2C3 are wellformed onsets -> VC.CCV:
if C2C3 in self.features["wellformed_clusters"]:
return "VC.CCV"
else:
return "VCC.CV"
if cluster == "VCCCCV":
#always split -> VC.CCCV:
return "VC.CCCV"
if cluster == "VCGV":
CG = phonecluster[1:3]
if not self.is_plosive(CG[0]): #C not a stop
return "VC.GV"
else:
if CG not in self.features["wellformed_clusters"]: #C a stop and CG not wellformed
return "VC.GV"
else:
return "V.CGV" #C a stop and CG wellformed
if cluster == "VCCGV":
CCG = phonecluster[1:4]
if CCG[0] == "s":
return "V.CCGV"
else:
return "VC.CGV"
if cluster == "VCCCGV":
return "VC.CCGV"
if cluster == "VV": #not described in the Hall paper...
return "V.V"
def syllabify(self, phonelist):
""" Classes:
C -> Consonant,
V -> Short/Long Vowel/Syllabic sonorant/Diphthong
G -> Glide
"""
#make a copy (to be edited internally)
plist = list(phonelist)
#first construct string representing relevant classes...
classstr = ""
for phone in plist:
if self.is_vowel(phone):
classstr += "V"
elif self.is_glide(phone):
classstr += "G"
else:
classstr += "C"
#Begin Aby's hacks:
# - Change the last phoneclass under certain conditions..
try:
if (self.is_syllabicconsonant(plist[-1]) and
self.is_obstruent(plist[-2])):
classstr = classstr[:-1] + "V"
if (self.is_syllabicconsonant(plist[-1]) and
self.is_nasal(plist[-2])):
classstr = classstr[:-1] + "V"
except IndexError:
pass
#End Aby's hacks...
#find syllable_clusters in order and apply syllabification
#process on each...this should be redone... FIXME!!!
for cluster in self.features["syllable_clusters"]:
match = re.search(cluster, classstr)
while match:
#syllabify cluster
clustersylstr = self._process_cluster(cluster, plist, match)
#update classstr...
start, end = match.span()
classstr = clustersylstr.join([classstr[:start], classstr[end:]])
plist = (plist[:match.start() + clustersylstr.index(".")] +
[""] + plist[match.start() + clustersylstr.index("."):])
#next match...
match = re.search(cluster, classstr)
sylls = [[]]
index = 0
for char in classstr:
if char != ".":
sylls[-1].append(phonelist[index])
index += 1
else:
sylls.append([])
return sylls
def guess_sylstress(self, syllables):
""" Try to guess stress pattern for an unknown word...
"""
if len(syllables) == 1:
if "ə" not in syllables[0]:
return "1"
else:
return "0"
else:
return "0" * len(syllables) #implement other cases later
class CMUEnglishPhoneset(LwaziEnglishPhoneset):
""" Based on ARPAbet... see: http://en.wikipedia.org/wiki/Arpabet
Phoneme Example Translation
------- ------- -----------
AA odd AA D
AE at AE T
AH hut HH AH T
AO ought AO T
AW cow K AW
AY hide HH AY D
B be B IY
CH cheese CH IY Z
D dee D IY
DH thee DH IY
EH Ed EH D
ER hurt HH ER T
EY ate EY T
F fee F IY
G green G R IY N
HH he HH IY
IH it IH T
IY eat IY T
JH gee JH IY
K key K IY
L lee L IY
M me M IY
N knee N IY
NG ping P IH NG
OW oat OW T
OY toy T OY
P pee P IY
R read R IY D
S sea S IY
SH she SH IY
T tea T IY
TH theta TH EY T AH
UH hood HH UH D
UW two T UW
V vee V IY
W we W IY
Y yield Y IY L D
Z zee Z IY
ZH seizure S IY ZH ER
"""
def __init__(self):
#Phoneset.__init__(self)
#syllable_clusters are processed in order, thus a list, not a set...
self.features = {"name": "CMU English Phoneset",
"syllable_clusters": ["VCV", "VCCV", "VCCCV", "VCCCCV",
"VCGV", "VCCGV", "VCCCGV", "VV"],
"wellformed_plosive_clusters": [["p","l"], ["b","l"], ["k","l"], ["g","l"], ["p","r"],
["b","r"], ["t","r"], ["d","r"], ["k","r"], ["g","r"],
["t","w"], ["d","w"], ["g","w"], ["k","w"], ["p","y"],
["b","y"], ["t","y"], ["d","y"], ["k","y"], ["g","y"]],
"wellformed_fricative_clusters": [["f","l"], ["f","r"], ["th","r"], ["sh","r"],
["th","w"], ["hh","w"], ["f","y"], ["v","y"],
["th","y"], ["z","y"], ["hh","y"]],
"wellformed_other_clusters": [["m","y"], ["n","y"], ["l","y"]],
"wellformed_s_clusters": [["s","p"], ["s","t"], ["s","k"], ["s","m"], ["s","n"],
["s","f"], ["s","w"], ["s","l"], ["s","y"], ["s","p","l"],
["s","p","r"], ["s","p","y"], ["s","m","y"], ["s","t","r"],
["s","t","y"], ["s","k","l"], ["s","k","r"], ["s","k","w"],
["s","k","y"]]
}
self.features["wellformed_clusters"] = (self.features["wellformed_plosive_clusters"] +
self.features["wellformed_fricative_clusters"] +
self.features["wellformed_other_clusters"] +
self.features["wellformed_s_clusters"])
self.features["silence_phone"] = "pau"
self.features["closure_phone"] = "pau_cl"
self.phones = {"pau" : set(["pause"]),
"pau_cl" : set(["closure"]),
"pau_gs" : set(["glottal-stop"]),
"aa" : set(["duration_short", "position_back", "height_low", "class_syllabic", "vowel", "class_sonorant"]),
"iy" : set(["position_front", "duration_long", "height_high", "class_syllabic", "vowel", "class_sonorant"]),
"ch" : set(["place_alveolar", "class_consonantal", "manner_affricate", "consonant", "manner_strident", "place_post-alveolar"]),
"ae" : set(["duration_short", "position_front", "height_low", "class_syllabic", "vowel", "class_sonorant"]),
"eh" : set(["duration_short", "position_front", "class_syllabic", "vowel", "height_mid", "class_sonorant"]),
"ah" : set(["duration_short", "position_back", "class_syllabic", "vowel", "height_mid", "class_sonorant"]),
"ao" : set(["duration_long", "articulation_round", "position_back", "class_syllabic", "vowel", "height_mid", "class_sonorant"]),
"ih" : set(["duration_short", "position_front", "height_high", "class_syllabic", "vowel", "class_sonorant"]),
"ey" : set(["position_front", "duration_diphthong", "class_syllabic", "vowel", "height_mid", "class_sonorant"]),
"aw" : set(["position_front", "duration_diphthong", "height_low", "class_syllabic", "vowel", "class_sonorant"]),
"ay" : set(["position_front", "duration_diphthong", "height_low", "class_syllabic", "vowel", "class_sonorant"]),
"zh" : set(["class_consonantal", "voiced", "manner_fricative", "consonant", "place_post-alveolar"]),
"er" : set(["position_central", "duration_short", "class_syllabic", "vowel", "height_mid", "class_sonorant"]),
"ng" : set(["class_consonantal", "voiced", "manner_nasal", "place_velar", "consonant", "class_sonorant"]),
"r" : set(["place_alveolar", "class_consonantal", "manner_liquid", "voiced", "manner_approximant", "consonant", "class_sonorant"]),
"th" : set(["class_consonantal", "manner_fricative", "consonant", "place_dental"]),
"uh" : set(["duration_short", "position_back", "height_high", "class_syllabic", "vowel", "class_sonorant"]),
"oy" : set(["duration_diphthong", "articulation_round", "position_back", "class_syllabic", "vowel", "height_mid", "class_sonorant"]),
"dh" : set(["class_consonantal", "voiced", "manner_fricative", "consonant", "place_dental"]),
"ow" : set(["duration_diphthong", "articulation_round", "position_back", "class_syllabic", "vowel", "height_mid", "class_sonorant"]),
"hh" : set(["manner_fricative", "consonant", "place_glottal"]),
"jh" : set(["place_alveolar", "class_consonantal", "manner_affricate", "voiced", "consonant", "manner_strident", "place_post-alveolar"]),
"b" : set(["class_consonantal", "place_bilabial", "voiced", "manner_plosive", "consonant"]),
"d" : set(["place_alveolar", "class_consonantal", "voiced", "manner_plosive", "consonant"]),
"g" : set(["class_consonantal", "voiced", "place_velar", "manner_plosive", "consonant"]),
"f" : set(["class_consonantal", "manner_fricative", "consonant", "manner_strident", "place_labiodental"]),
"uw" : set(["duration_long", "articulation_round", "position_back", "height_high", "class_syllabic", "vowel", "class_sonorant"]),
"m" : set(["class_consonantal", "voiced", "manner_nasal", "consonant", "place_labial", "class_sonorant"]),
"l" : set(["place_alveolar", "class_consonantal", "manner_liquid", "voiced", "manner_approximant", "consonant", "manner_lateral", "class_sonorant"]),
"n" : set(["place_alveolar", "class_consonantal", "voiced", "manner_nasal", "consonant", "class_sonorant"]),
"p" : set(["class_consonantal", "place_bilabial", "manner_plosive", "consonant"]),
"s" : set(["place_alveolar", "class_consonantal", "manner_fricative", "consonant", "manner_strident"]),
"sh" : set(["class_consonantal", "manner_fricative", "consonant", "place_post-alveolar"]),
"t" : set(["place_alveolar", "class_consonantal", "manner_plosive", "consonant"]),
"w" : set(["voiced", "place_velar", "manner_approximant", "manner_glide", "consonant", "place_labial", "class_sonorant"]),
"v" : set(["class_consonantal", "voiced", "manner_fricative", "consonant", "manner_strident", "place_labiodental"]),
"y" : set(["voiced", "place_palatal", "manner_approximant", "manner_glide", "consonant", "class_sonorant"]),
"z" : set(["place_alveolar", "class_consonantal", "voiced", "manner_fricative", "consonant", "manner_strident"]),
"k" : set(["class_consonantal", "place_velar", "manner_plosive", "consonant"])
}
self.map = dict((k, k) for k in self.phones) # redundant mapping
def guess_sylstress(self, syllables):
""" Try to guess stress pattern for an unknown word...
"""
if len(syllables) == 1:
if "ah" not in syllables[0]: #schwa
return "1"
else:
return "0"
else:
return "0" * len(syllables) #implement other cases later
```
#### File: ttslab/voices/yoruba_default.py
```python
from __future__ import unicode_literals, division, print_function #Py2
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import sys
import os
import re
import codecs
import unicodedata
from tempfile import mkstemp
import numpy as np
from .. phoneset import Phoneset
from .. g2p import G2P_Rewrites_Semicolon, GraphemeNotDefined, NoRuleFound
from .. defaultvoice import LwaziMultiHTSVoice
import ttslab.hts_labels_tone as hts_labels_tone
from .. synthesizer_htsme import SynthesizerHTSME
from . yoruba_orth2tones import word2tones
from ttslab.waveform import Waveform
from ttslab.trackfile import Track
from .. pronundict import PronunLookupError
## SOME UPDATES REVERTED IN ORDER TO PROCESS LEGACY DATA
class YorubaPhoneset(Phoneset):
""" Developed for PhD studies, based on Yoruba data received from
Etienne Barnard...
DEMITASSE: check again later when the phoneset/language is more familiar!
"""
def __init__(self):
Phoneset.__init__(self)
self.features = {"name": "Yoruba Phoneset",
"silence_phone": "pau",
"closure_phone": "pau_cl"
}
self.phones = {"pau" : set(["pause"]),
"pau_cl" : set(["closure"]),
"ʔ" : set(["glottal-stop"]),
#vowels
"a" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front"]),
"ã" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_low", "position_front", "articulation_nasalized"]),
"e" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"ɛ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front"]),
"ɛ̃" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_front", "articulation_nasalized"]),
"i" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front"]),
"ĩ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_front", "articulation_nasalized"]),
"o" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"õ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded", "articulation_nasalized"]),
"ɔ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded"]),
"ɔ̃" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_mid", "position_back", "articulation_rounded", "articulation_nasalized"]),
"u" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back"]),
"ũ" : set(["class_sonorant", "class_syllabic", "vowel", "duration_short", "height_high", "position_back", "articulation_nasalized"]),
#consonants
"b" : set(["class_consonantal", "consonant", "manner_plosive", "place_bilabial", "voiced"]),
"d" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar", "voiced"]),
"f" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_labiodental"]),
"g" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar", "voiced"]),
"gb" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar", "place_bilabial", "voiced"]),
"h" : set(["consonant", "manner_fricative", "place_glottal"]),
"j" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_palatal", "voiced"]),
"dʒ" : set(["class_consonantal", "consonant", "manner_affricate", "place_alveolar", "place_post-alveolar", "voiced"]),
"k" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar"]),
"l" : set(["class_sonorant", "class_consonantal", "consonant", "manner_approximant", "manner_liquid", "manner_lateral", "place_alveolar", "voiced"]),
"m" : set(["class_sonorant", "class_syllabic", "class_consonantal", "consonant", "manner_nasal", "place_bilabial", "voiced"]),
"n" : set(["class_sonorant", "class_syllabic", "class_consonantal", "consonant", "manner_nasal", "place_alveolar", "voiced"]),
"kp" : set(["class_consonantal", "consonant", "manner_plosive", "place_velar", "place_bilabial"]),
"r" : set(["class_sonorant", "class_consonantal", "consonant", "manner_trill", "place_alveolar", "voiced"]),
"s" : set(["class_consonantal", "consonant", "manner_fricative", "manner_strident", "place_alveolar"]),
"ʃ" : set(["class_consonantal", "consonant", "manner_fricative", "place_post-alveolar"]),
"t" : set(["class_consonantal", "consonant", "manner_plosive", "place_alveolar"]),
"w" : set(["class_sonorant", "consonant", "manner_approximant", "manner_glide", "place_labial", "place_velar", "voiced"])
}
self.map = {"pau" : "pau",
"pau_cl" : "pau_cl",
"ʔ" : "pau_gs",
"a" : "a",
"ã" : "an",
"e" : "e",
"ɛ" : "E",
"ɛ̃" : "En",
"i" : "i",
"ĩ" : "in",
"o" : "o",
"õ" : "on",
"ɔ" : "O",
"ɔ̃" : "On",
"u" : "u",
"ũ" : "un",
"b" : "b",
"d" : "d",
"dʒ" : "dZ",
"f" : "f",
"g" : "g",
"gb" : "gb",
"h" : "h",
"j" : "j",
"k" : "k",
"kp" : "kp",
"l" : "l",
"m" : "m",
"n" : "n",
"r" : "r",
"s" : "s",
"t" : "t",
"ʃ" : "S",
"w" : "w"
}
def is_vowel(self, phonename):
return "vowel" in self.phones[phonename]
def is_consonant(self, phonename):
return "consonant" in self.phones[phonename]
def is_syllabicconsonant(self, phonename):
return "class_syllabic" in self.phones[phonename] and "consonant" in self.phones[phonename]
def syllabify(self, phonelist):
""" Basic syllabification, based on the syllabification scheme
devised by <NAME> for isiZulu (Nguni language).
"""
sylls = [[]]
phlist = list(phonelist)
while phlist:
phone = phlist[0]
try:
nphone = phlist[1]
nnphone = phlist[2]
#Syllabic consonant followed by C:
if (self.is_syllabicconsonant(phone) and
self.is_consonant(nphone)):
#sC.C
sylls[-1].append(phlist.pop(0))
if phlist: sylls.append([])
continue
##DEMITASSE: Yoruba doesn't seem to have these:
##########
# #If there is a three phone cluster:
if (self.is_vowel(phone) and
not self.is_vowel(nphone) and
not self.is_vowel(nnphone)):
#VC.C
sylls[-1].append(phlist.pop(0))#phone
sylls[-1].append(phlist.pop(0))#nphone
if phlist: sylls.append([])
continue
except IndexError:
pass
if self.is_vowel(phone):
#V.Any
sylls[-1].append(phlist.pop(0))
if phlist: sylls.append([])
continue
#anything not caught above is added to current syl...
sylls[-1].append(phlist.pop(0))
return sylls
class SynthesizerHTSME_Tone(SynthesizerHTSME):
def hts_label(self, utt, processname):
lab = []
starttime = 0
for phone_item in utt.get_relation("Segment"):
if "end" in phone_item:
endtime = hts_labels_tone.float_to_htk_int(phone_item["end"])
else:
endtime = None
phlabel = [hts_labels_tone.p(phone_item),
hts_labels_tone.a(phone_item),
hts_labels_tone.b(phone_item),
hts_labels_tone.c(phone_item),
hts_labels_tone.d(phone_item),
hts_labels_tone.e(phone_item),
hts_labels_tone.f(phone_item),
hts_labels_tone.g(phone_item),
hts_labels_tone.h(phone_item),
hts_labels_tone.i(phone_item),
hts_labels_tone.j(phone_item),
hts_labels_tone.k(phone_item),
hts_labels_tone.l(phone_item),
hts_labels_tone.m(phone_item),]
if endtime is not None:
lab.append("%s %s " % (str(starttime).rjust(10), str(endtime).rjust(10)) + "/".join(phlabel))
else:
lab.append("/".join(phlabel))
starttime = endtime
utt["hts_label"] = lab
return utt
def hts_synth(self, utt, processname):
htsparms = self.engine_parms.copy()
htsparms["-of"] = "%(tempolf0_file)s"
if "htsparms" in utt:
htsparms.update(utt["htsparms"]) #parm overrides for this utt...
#build command string and execute:
cmds = self.hts_bin
for k in htsparms:
if htsparms[k]:
if htsparms[k] is True:
cmds += " " + k
else:
cmds += " " + k + " " + str(htsparms[k])
cmds += " %(tempilab_file)s"
fd1, tempwav_file = mkstemp(prefix="ttslab_", suffix=".wav")
fd2, tempilab_file = mkstemp(prefix="ttslab_")
fd3, tempolab_file = mkstemp(prefix="ttslab_")
fd4, tempolf0_file = mkstemp(prefix="ttslab_")
cmds = cmds % {'models_dir': self.models_dir,
'tempwav_file': tempwav_file,
'tempilab_file': tempilab_file,
'tempolab_file': tempolab_file,
'tempolf0_file': tempolf0_file}
#print(cmds)
with codecs.open(tempilab_file, "w", encoding="utf-8") as outfh:
outfh.write("\n".join(utt["hts_label"]))
os.system(cmds)
#load seg endtimes into utt:
with open(tempolab_file) as infh:
lines = infh.readlines()
segs = utt.get_relation("Segment").as_list()
assert len(segs) == len(lines)
for line, seg in zip(lines, segs):
seg["end"] = hts_labels_tone.htk_int_to_float(line.split()[1])
#load audio:
utt["waveform"] = Waveform(tempwav_file)
#load lf0:
f0 = np.exp(np.fromfile(tempolf0_file, "float32")) #load and lf0 to hertz
#to semitones relative to 1Hz:
f0[f0.nonzero()] = 12.0 * np.log2(f0[f0.nonzero()]) # 12 * log2 (F0 / F0reference) where F0reference = 1
f0t = Track()
f0t.values = f0
f0t.times = np.arange(len(f0), dtype=np.float64) * 0.005
utt["f0"] = f0t
#cleanup tempfiles:
os.close(fd1)
os.close(fd2)
os.close(fd3)
os.close(fd4)
os.remove(tempwav_file)
os.remove(tempolab_file)
os.remove(tempilab_file)
os.remove(tempolf0_file)
return utt
class SynthesizerHTSME_Tone2(SynthesizerHTSME_Tone):
def hts_label(self, utt, processname):
lab = []
starttime = 0
for phone_item in utt.get_relation("Segment"):
if "end" in phone_item:
endtime = hts_labels_tone.float_to_htk_int(phone_item["end"])
else:
endtime = None
phlabel = [hts_labels_tone.p(phone_item),
hts_labels_tone.a(phone_item),
hts_labels_tone.b(phone_item),
hts_labels_tone.c(phone_item),
hts_labels_tone.d(phone_item),
hts_labels_tone.e(phone_item),
hts_labels_tone.f(phone_item),
hts_labels_tone.g(phone_item),
hts_labels_tone.h(phone_item),
hts_labels_tone.i(phone_item),
hts_labels_tone.j(phone_item),
hts_labels_tone.k(phone_item),
hts_labels_tone.l(phone_item),
hts_labels_tone.m(phone_item),
hts_labels_tone.n(phone_item)]
if endtime is not None:
lab.append("%s %s " % (str(starttime).rjust(10), str(endtime).rjust(10)) + "/".join(phlabel))
else:
lab.append("/".join(phlabel))
starttime = endtime
utt["hts_label"] = lab
return utt
class SynthesizerHTSME_Tone_NoTone(SynthesizerHTSME_Tone): #no tone labels but loads generated f0
def hts_label(self, utt, processname):
lab = []
starttime = 0
for phone_item in utt.get_relation("Segment"):
if "end" in phone_item:
endtime = hts_labels_tone.float_to_htk_int(phone_item["end"])
else:
endtime = None
phlabel = [hts_labels_tone.p(phone_item),
hts_labels_tone.a(phone_item),
hts_labels_tone.b(phone_item),
hts_labels_tone.c(phone_item),
hts_labels_tone.d(phone_item),
hts_labels_tone.e(phone_item),
hts_labels_tone.f(phone_item),
hts_labels_tone.g(phone_item),
hts_labels_tone.h(phone_item),
hts_labels_tone.i(phone_item),
hts_labels_tone.j(phone_item)]
if endtime is not None:
lab.append("%s %s " % (str(starttime).rjust(10), str(endtime).rjust(10)) + "/".join(phlabel))
else:
lab.append("/".join(phlabel))
starttime = endtime
utt["hts_label"] = lab
return utt
class LwaziYorubaMultiHTSVoice(LwaziMultiHTSVoice):
CONJUNCTIONS = ["ẹyin", "ati", # both,and
"sibẹ-sibẹ", "sibẹsibẹ", "afi", "ṣugbọn", #but
"fun", "nitori", "ni", "to", "ri", #for,because
"boya", "tabi", "yala", #either/or/nor
"pẹlu", "jubẹlọ", "bi", "o", "ti", "lẹ", "jẹ", "pe", #yet,although
"lati", "lẹhin", "igbati", # since
"titi", #until
"akoko" #while
] #Unicode NFC form
CGRAVE = "\u0300"
CACUTE = "\u0301"
CUNDOT = "\u0323"
SMALLGRAPHSET = "abdeẹfghijklmnoọprsṣtuwy"
ENGWORD_CHARTHRESHOLD = 4 #Only prefer entry in English lexicon for words longer (num chars) than this
def __init__(self, phoneset, g2p, pronundict, pronunaddendum,
engphoneset, engg2p, engpronundict, engpronunaddendum,
synthesizer):
LwaziMultiHTSVoice.__init__(self, phoneset=phoneset, g2p=g2p,
pronundict=pronundict,
pronunaddendum=pronunaddendum,
engphoneset=engphoneset, engg2p=engg2p,
engpronundict=engpronundict,
engpronunaddendum=engpronunaddendum,
synthesizer=synthesizer)
def normalizer(self, utt, processname):
""" words marked with a prepended pipe character "|" and words
in the English pronunciation dictionary or addendum will
be marked as English...
"""
token_rel = utt.get_relation("Token")
word_rel = utt.new_relation("Word")
for token_item in token_rel:
tokentext = token_item["name"]
tokentext = tokentext.lower()
tokentextlist = tokentext.split("-") #split tokens on dashes to create multiple words...revisit
for wordname in tokentextlist:
pronunform = unicodedata.normalize("NFC", re.sub(u"[%s%s]" % (self.CGRAVE, self.CACUTE), "", wordname))
word_item = word_rel.append_item()
#try to determine language:
if wordname.startswith("|"):
word_item["lang"] = "eng"
wordname = wordname[1:]
pronunform = pronunform[1:]
elif (((wordname in self.engpronunaddendum or
wordname in self.engpronundict) and
len(pronunform) > self.ENGWORD_CHARTHRESHOLD and
pronunform not in self.pronunaddendum) or
not all([c in self.SMALLGRAPHSET for c in pronunform.lower()])):
word_item["lang"] = "eng"
else:
word_item["lang"] = "def" #default language...
#determine type:
if re.search("[\d]+", wordname):
#TODO: normalisation of digits... at the moment
#insert string to make phonetizer fail:
pronunform = "1234567890"
word_item["type"] = "num"
word_item["lang"] = "eng" #will pronounce digits in English...
else:
word_item["type"] = "norm"
#tokenizer does NFKD... for Yoruba pronunciation
#resources are in NFC without ACUTE and GRAVE
#ACCENTS. But we need the ACCENTS to determine tone
#after syllabification...
word_item["pronunform"] = pronunform
word_item["name"] = wordname
token_item.add_daughter(word_item)
return utt
def phonetizer(self, utt, processname):
def g2p(word, phoneset, pronundict, pronunaddendum, g2p):
syltones = None
syllables = None
if pronunaddendum and word["pronunform"] in pronunaddendum:
phones = pronunaddendum[word["pronunform"]]
syllables = phoneset.syllabify(phones)
else:
try:
wordpronun = pronundict.lookup(word["pronunform"], word["pos"])
except PronunLookupError as e:
if e.value == "no_pos":
wordpronun = self.pronundict.lookup(word_item["name"])
else:
wordpronun = None
except AttributeError:
wordpronun = None
if wordpronun:
if "syllables" in wordpronun:
syllables = wordpronun["syllables"]
syltones = wordpronun["syltones"] #None if doesn't exist
else:
phones = wordpronun["phones"]
syllables = phoneset.syllabify(phones)
else:
try:
phones = pronundict[word["pronunform"]]
except KeyError:
try:
phones = g2p.predict_word(word["pronunform"])
except (GraphemeNotDefined, NoRuleFound):
warns = "WARNING: No pronunciation found for '%s'" % word["name"]
print(warns.encode("utf-8"), file=sys.stderr)
phones = [self.phoneset.features["silence_phone"]]
syllables = phoneset.syllabify(phones)
if not syltones:
try:
syltones = phoneset.guess_sylstress(syllables)
except AttributeError:
try:
syltones = word2tones(word["name"])
assert len(syltones) == len(syllables)
except AssertionError:
#print(word_item["name"], word_item["pronunform"], syllables, syltones)
syltones = "N" * len(syllables)
return syllables, syltones
word_rel = utt.get_relation("Word")
syl_rel = utt.new_relation("Syllable")
sylstruct_rel = utt.new_relation("SylStructure")
seg_rel = utt.new_relation("Segment")
for word_item in word_rel:
if word_item["lang"] == "eng":
syllables, syltones = g2p(word_item, self.engphoneset, self.engpronundict, self.engpronunaddendum, self.engg2p)
#rename phones:
for syl in syllables:
for i in range(len(syl)):
syl[i] = "eng_" + syl[i]
else:
syllables, syltones = g2p(word_item, self.phoneset, self.pronundict, self.pronunaddendum, self.g2p)
word_item_in_sylstruct = sylstruct_rel.append_item(word_item)
for syl, syltone in zip(syllables, syltones):
syl_item = syl_rel.append_item()
syl_item["name"] = "syl"
syl_item["tone"] = syltone
syl_item_in_sylstruct = word_item_in_sylstruct.add_daughter(syl_item)
for phone in syl:
seg_item = seg_rel.append_item()
seg_item["name"] = phone
seg_item_in_sylstruct = syl_item_in_sylstruct.add_daughter(seg_item)
return utt
def phrasifier(self, utt, processname):
""" Determine phrases/phrase breaks in the utterance...
"""
def anycharsin(s, stemplate):
for c in s:
if c in stemplate:
return True
return False
word_rel = utt.get_relation("Word")
punctuation = self.PHRASING_PUNCTUATION
phrase_rel = utt.new_relation("Phrase")
phrase_item = phrase_rel.append_item()
phrase_item["name"] = "BB"
for word_item in word_rel:
phrase_item.add_daughter(word_item)
token_item = word_item.get_item_in_relation("Token").parent_item
if word_item.get_item_in_relation("Token") is token_item.last_daughter:
if word_item is not word_rel.tail_item:
if (("postpunc" in token_item and anycharsin(token_item["postpunc"], punctuation)) or
word_item.next_item["pronunform"] in self.CONJUNCTIONS):
phrase_item = phrase_rel.append_item()
phrase_item["name"] = "BB"
return utt
``` |
{
"source": "Jklein64/SIGGRAPH18SSS",
"score": 3
} |
#### File: SIGGRAPH18SSS/deeplab_resnet/hc_deeplab.py
```python
import os
import time
import tensorflow as tf
import numpy as np
# from tensorflow.python.keras._impl.keras.initializers import he_normal
from tensorflow.python import debug as tf_debug
from .base import Model
from .image_reader import read_data_list, get_indicator_mat, get_batch_1chunk, get_batch, get_batch_1chunk
from .utils import inv_preprocess
from .model import DeepLabResNetModel
from .loader import load_single_image, load_batch_samplepts
# Loader
DIR_ANNOTATION = 'anno_png'
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
NINST = 3
NSAMPLEPTS = 500
DROPOUT_PROB = 1
FEAT_DIM = 128
#######################################################
'''
Helper functions
'''
def lowrank_linear(input_, dim_bottle, dim_out, name="lowrank_linear"):
with tf.compat.v1.variable_scope(name):
weights1 = tf.compat.v1.get_variable("fc_weights1", [input_.get_shape()[-1], dim_bottle], initializer=tf.keras.initializers.HeNormal())
weights2 = tf.compat.v1.get_variable("fc_weights2", [dim_bottle, dim_out], initializer=tf.keras.initializers.HeNormal())
biases = tf.compat.v1.get_variable("biases", [dim_out], initializer=tf.compat.v1.constant_initializer(0.01))
activation = tf.add(tf.matmul(tf.matmul(input_, weights1), weights2), biases)
return activation
def linear(input_, dim_out, name="linear"):
with tf.compat.v1.variable_scope(name):
weights1 = tf.compat.v1.get_variable("fc_weights1", [input_.get_shape()[-1], dim_out], initializer=tf.keras.initializers.HeNormal())
biases = tf.compat.v1.get_variable("fc_biases", [dim_out], initializer=tf.compat.v1.constant_initializer(0.01))
activation = tf.add(tf.matmul(input_, weights1), biases)
return activation
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
with tf.compat.v1.variable_scope(name):
w = tf.compat.v1.get_variable('weights', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.compat.v1.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input=input_, filters=w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.compat.v1.get_variable('biases', [output_dim], initializer=tf.compat.v1.constant_initializer(0.0))
# conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
conv = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(input=conv))
return conv
#######################################################
'''
HyperColumn architure class definition
'''
class HyperColumn_Deeplabv2(Model):
"""HyperColumn_Deeplabv2."""
def __init__(self, sess, args):
"""Initialize the parameters.
sess: tensorflow session
"""
self.sess = sess
self.batch_size = args.batch_size
self.args = args
# parameters used to save a checkpoint
self.dataset = "Hypcol"
self.options = []
self._attrs = ['batch_size', 'dataset']
self.build_model()
def fn_map2visualization(self, full_embedding, sz2d):
randproj = tf.random.normal([FEAT_DIM,3])
visualized = tf.matmul(full_embedding, randproj)
tensorshape = tf.concat([tf.constant([-1]), sz2d, tf.constant([3])], 0)
visualized = tf.reshape(visualized, tensorshape)
maxval = tf.reduce_max(input_tensor=visualized)
minval = tf.reduce_min(input_tensor=visualized)
visimg = tf.truediv(visualized - minval, maxval-minval)*255.0
return visimg
# Deprecated
def lossfunction(self, tweightmat, tindicator, tembeddings):
with tf.compat.v1.variable_scope('loss_computation') as scope:
# tembeddings: #pts x 64
sqrvals = tf.reduce_sum(input_tensor=tf.square(tembeddings), axis=1, keepdims=True)
# sqrvals: #pts x 1
sqrvalsmat = tf.tile(sqrvals, [1, tf.shape(input=sqrvals)[0]])
sqrvalsmat2 = tf.add(sqrvalsmat,tf.transpose(a=sqrvalsmat))
distmat = tf.add(sqrvalsmat2, tf.scalar_mul(-2.0, tf.matmul(tembeddings, tf.transpose(a=tembeddings))))/64.0
sigmamat = tf.scalar_mul(2.0, tf.math.reciprocal(1.0+tf.exp(distmat)))
posnegmapping = tf.math.log(tf.add(tf.scalar_mul(0.5, 1.0-tindicator), tf.multiply(tindicator, sigmamat)))
wcrossentropy = tf.multiply(tf.negative(tindicator+2.0), posnegmapping)
lossval = tf.reduce_mean(input_tensor=wcrossentropy)
return lossval
def build_model(self):
args = self.args
npindicator = get_indicator_mat(NSAMPLEPTS, NINST)
# TF part: Input feeding
self.netcontainer = dict()
tinput_img = tf.compat.v1.placeholder(tf.float32,shape=(None,None,3),name='feed_img')
self.netcontainer['input_img'] = tinput_img
sz_lossmat = (NSAMPLEPTS*NINST,NSAMPLEPTS*NINST)
tlossweight = tf.compat.v1.placeholder(tf.float32, shape=sz_lossmat, name='const_weight')
tlossindicator = tf.constant(npindicator, dtype=tf.float32, name='const_indicator')
tsample_points = tf.compat.v1.placeholder(tf.float32, shape=(NSAMPLEPTS*NINST, 2),name='feed_x2d')
self.netcontainer['input_weightmat'] = tlossweight
self.netcontainer['input_samplepts'] = tsample_points
input_img = tf.expand_dims(tinput_img, axis=0)
# Create network.
with tf.compat.v1.variable_scope('', reuse=False):
net = DeepLabResNetModel({'data': input_img}, is_training=self.args.is_training, num_classes=self.args.num_classes)
self.netcontainer['net'] = net
t_imsz = tf.shape(input=input_img)[1:3]
with tf.compat.v1.variable_scope('hypercolumn_layers') as scope:
raw_color = conv2d(input_img/128.0, 4, k_h=1, k_w=1, d_h=1, d_w=1, name="hc_cv_0")
raw_1 = tf.image.resize(conv2d(net.layers['pool1'], 124, k_h=1, k_w=1, d_h=1, d_w=1, name="hc_cv_1"), t_imsz, method=tf.image.ResizeMethod.BILINEAR)
raw_3 = tf.image.resize(conv2d(net.layers['res3b3_relu'], 128, k_h=1, k_w=1, d_h=1, d_w=1, name="hc_cv_3"), t_imsz, method=tf.image.ResizeMethod.BILINEAR)
raw_4 = tf.image.resize(conv2d(net.layers['res4b22_relu'], 256, k_h=3, k_w=3, d_h=1, d_w=1, name="hc_cv_4"), t_imsz, method=tf.image.ResizeMethod.BILINEAR)
raw_5 = tf.image.resize(conv2d(net.layers['res5c'], 512, k_h=3, k_w=3, d_h=1, d_w=1, name="hc_cv_5"), t_imsz, method=tf.image.ResizeMethod.BILINEAR)
raw_output = tf.nn.relu(tf.concat([raw_color, raw_1, raw_3, raw_4, raw_5], 3))
nfeatdim = raw_output.get_shape()[-1]
full_activation = tf.reshape(raw_output, [-1, nfeatdim])
# FC layes
with tf.compat.v1.variable_scope('fc1_matting') as scope:
full_act1 = tf.nn.relu(lowrank_linear(full_activation, 256, 512, name="linear"))
with tf.compat.v1.variable_scope('fc2_matting') as scope:
full_act2 = tf.nn.relu(lowrank_linear(full_act1, 256, 512, name="linear"))
with tf.compat.v1.variable_scope('fc3_matting') as scope:
full_input3 = tf.concat([full_act1, full_act2], -1) # similar to DenseNet
full_embedding = linear(full_input3, FEAT_DIM)
# embeddings: #pts x FEAT_DIM
visimg = self.fn_map2visualization(full_embedding, tf.shape(input=raw_output)[1:3])
outshape = tf.concat([tf.shape(input=raw_output)[0:3], tf.constant([-1])], 0)
self.netcontainer['out_hypcol'] = tf.reshape(full_embedding, outshape)
self.netcontainer['out_visimg'] = visimg
# Deprecated
def setup_optimizer(self):
args = self.args
# Which variables to load. Running means and variances are not trainable,
# thus all_variables() should be restored.
restore_var = [v for v in tf.compat.v1.global_variables()]
all_trainable = [v for v in tf.compat.v1.trainable_variables() if 'beta' not in v.name and 'gamma' not in v.name]
fc_trainable = [v for v in all_trainable if 'fc' in v.name]
conv_trainable = [v for v in all_trainable if 'fc' not in v.name] # lr * 1.0
fc_w_trainable = [v for v in fc_trainable if 'weights' in v.name] # lr * 10.0
fc_b_trainable = [v for v in fc_trainable if 'biases' in v.name] # lr * 20.0
assert(len(all_trainable) == len(fc_trainable) + len(conv_trainable))
assert(len(fc_trainable) == len(fc_w_trainable) + len(fc_b_trainable))
# Define loss and optimisation parameters.
base_lr = tf.constant(args.learning_rate)
step_ph = tf.compat.v1.placeholder(dtype=tf.float32, shape=(), name='ph_step')
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - step_ph / args.num_steps), args.power))
opt_conv = tf.compat.v1.train.MomentumOptimizer(learning_rate, args.momentum)
opt_fc_w = tf.compat.v1.train.MomentumOptimizer(learning_rate * 10.0, args.momentum)
opt_fc_b = tf.compat.v1.train.MomentumOptimizer(learning_rate * 20.0, args.momentum)
# Define a variable to accumulate gradients.
accum_grads = [tf.Variable(tf.zeros_like(v.initialized_value()),
trainable=False) for v in conv_trainable + fc_w_trainable + fc_b_trainable]
# Define an operation to clear the accumulated gradients for next batch.
zero_op = [v.assign(tf.zeros_like(v)) for v in accum_grads]
# Compute gradients.
grads = tf.gradients(ys=self.loss, xs=conv_trainable + fc_w_trainable + fc_b_trainable)
# Accumulate and normalise the gradients.
accum_grads_op = [accum_grads[i].assign_add(tf.scalar_mul(1.0/np.float32(args.grad_update_every), grad)) for i, grad in
enumerate(grads) if grad is not None]
grads_conv = accum_grads[:len(conv_trainable)]
grads_fc_w = accum_grads[len(conv_trainable) : (len(conv_trainable) + len(fc_w_trainable))]
grads_fc_b = accum_grads[(len(conv_trainable) + len(fc_w_trainable)):]
# Apply the gradients.
train_op_conv = opt_conv.apply_gradients(zip(grads_conv, conv_trainable))
train_op_fc_w = opt_fc_w.apply_gradients(zip(grads_fc_w, fc_w_trainable))
train_op_fc_b = opt_fc_b.apply_gradients(zip(grads_fc_b, fc_b_trainable))
train_op = tf.group(train_op_conv, train_op_fc_w, train_op_fc_b)
self.train_container = dict()
self.train_container['train_op'] = train_op
self.train_container['acc_grads_op'] = accum_grads_op
self.train_container['zero_op'] = zero_op
self.train_container['restore_var'] = restore_var
self.train_container['step_ph'] = step_ph
# Deprecated
def train(self):
"""Training code.
"""
args = self.args
self.max_iter = args.num_steps
self.checkpoint_dir = args.snapshot_dir
self.imgflist = read_data_list(os.path.join(args.data_dir, 'img'), args.data_list, '.jpg')
self.labelmapflist = read_data_list(os.path.join(args.data_dir, DIR_ANNOTATION), args.data_list, '.png')
## Image, Labelmap loader
h, w = map(int, args.input_size.split(','))
input_size = (h, w)
loader_img = load_single_image(args, input_size)
caller_imgloader = [loader_img['output_img'], loader_img['output_labelmap']]
## Point sampler
pt_sampler = load_batch_samplepts()
caller_sampler = [pt_sampler['out_batchx2d'], pt_sampler['out_weightmat']]
# Pixel-wise softmax loss.
l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.compat.v1.trainable_variables() if 'weights' in v.name]
self.loss = tf.add_n(l2_losses)
# Processed predictions: for visualisation.
pred = tf.cast(tf.image.resize(self.netcontainer['out_visimg'], input_size, method=tf.image.ResizeMethod.BILINEAR), tf.uint8)
# Image summary.
images_summary = tf.compat.v1.py_func(inv_preprocess, [tf.expand_dims(self.netcontainer['input_img'], axis=0), args.save_num_images, IMG_MEAN], tf.uint8)
total_summary = tf.compat.v1.summary.image('images', tf.concat(axis=2, values=[images_summary, pred]),
max_outputs=args.save_num_images) # Concatenate row-wise.
summary_writer = tf.compat.v1.summary.FileWriter(args.snapshot_dir, graph=tf.compat.v1.get_default_graph())
self.setup_optimizer()
self.step = self.train_container['step_ph']
tf.compat.v1.global_variables_initializer().run()
self.load(self.checkpoint_dir)
start_time = time.time()
start_iter = self.step.eval()
nimg = len(self.imgflist)
# Iterate over training steps.
for step in range(0, args.num_steps):
start_time = time.time()
feed_dict = { self.step : step }
loss_value = 0
# Clear the accumulated gradients.
sess.run(self.train_container['zero_op'], feed_dict=feed_dict)
# Image loading
feed_dict_imgloader = {loader_img['input_img_name']: self.imgflist[step%nimg],
loader_img['input_lbm_name']: self.labelmapflist[step%nimg]}
cur_image, cur_labelmap = sess.run(caller_imgloader, feed_dict=feed_dict_imgloader)
if len(np.unique(cur_labelmap)) < NINST:
continue
print('Loaded image: %s' % self.imgflist[step%nimg])
# Accumulate gradients.
for i in range(args.grad_update_every):
# Point sampling
feed_dict_sampler = {pt_sampler['input_labelmap']: cur_labelmap}
batchx2d, weightmat = sess.run(caller_sampler, feed_dict=feed_dict_sampler)
# print('Sampled %d' % i)
feed_dict_backprob = {self.netcontainer['input_img']: cur_image,
self.netcontainer['input_weightmat']: weightmat,
self.netcontainer['input_samplepts']: batchx2d,
self.step : step}
_, l_val = sess.run([self.train_container['acc_grads_op'], self.loss], feed_dict=feed_dict_backprob)
loss_value += l_val
# Normalise the loss.
loss_value /= args.grad_update_every
# Apply gradients.
if step % args.save_pred_every == 0:
print('Summary')
feed_dict_summary = {self.netcontainer['input_img']: cur_image,
self.netcontainer['input_weightmat']: weightmat,
self.netcontainer['input_samplepts']: batchx2d,
self.step : step}
summary, _ = sess.run([total_summary, self.train_container['train_op']], feed_dict=feed_dict_summary)
summary_writer.add_summary(summary, step)
self.save(self.checkpoint_dir, step)
else:
sess.run(self.train_container['train_op'], feed_dict=feed_dict)
duration = time.time() - start_time
print('step {:d} \t loss = {:.5f}, ({:.3f} sec/step)'.format(step, loss_value, duration))
def test(self, img):
feed = {self.netcontainer['input_img']: img}
embedmap = self.sess.run(self.netcontainer['out_hypcol'], feed_dict=feed)
return embedmap
``` |
{
"source": "jkleinwaechter/FeelsLike",
"score": 3
} |
#### File: jkleinwaechter/FeelsLike/dynamo.py
```python
import boto3
import globals
from json import dumps
import datetime
# from flexceptions import FlNotFound, FlProviderFailure
class Record(object):
'''----------------------------------------------------------------------------------------------
Class used to encapsualte the dynamo db record. This is used to get a history of how people are
using this skill.
Methods
-------
write()
Commit current record to db.
Setters
-------
cid(cc)
c : string
The customer id as provided in the 'userId' section of the JSON Request object
source(s)
s : string
The location of the Alexa device
destination(d)
d: string
The location requested
intent(i)
i : string
Name of the intent that was invoked
Getters
-------
cid()
spreadsheetId()
----------------------------------------------------------------------------------------------'''
def __init__(self, cid='n/a', intent='n/a'):
self._cid = cid
self._intent = intent
self._source = 'n/a'
self._destination = 'n/a'
# initialize database access
self._ddb = boto3.resource('dynamodb', region_name='us-east-1')
self._table = self._ddb.Table('feelslike')
def write(self):
count = 1
# If record already exists, just incrment count and update time
response = self._table.get_item(Key={'cid': self.cid, 'destination': self._destination})
if 'Item' in response:
if 'count' in response['Item']:
count = response['Item']['count'] + 1
time = datetime.datetime.now().strftime('%a %d-%b-%y %I:%M%p')
self._record = {'cid': self._cid, 'source': self._source, 'destination': self._destination, 'intent': self._intent, 'count': count, 'time': time}
if globals.debug:
print self._record
if globals.debug:
print response
response = self._table.put_item(Item=self._record)
if globals.debug:
print '--------------------------DYNAMO-------------------------'
print response
print dumps(response, indent=4)
print '---------------------------------------------------------'
return response
@property
def cid(self):
return self._cid
@cid.setter
def cid(self, c):
self._cid = c
@property
def source(self):
return self._source
@source.setter
def source(self, s):
self._source = s
@property
def destination(self):
return self._destination
@destination.setter
def destination(self, d):
self._destination = d
@property
def intent(self):
return self._intent
@intent.setter
def intent(self, i):
self._intent = i
``` |
{
"source": "jklemm/menu-fullstack-challenge",
"score": 2
} |
#### File: api/resources/clientes_resource.py
```python
import json
import falcon
from api.resources import BaseResource
from core.clientes.exceptions import ClienteNotFoundException, DuplicatedEntityException
from core.clientes.gateway import ClienteGateway
class ClientesResource(BaseResource):
def on_get(self, req, resp, cliente_id=None):
cliente_gateway = ClienteGateway(self.db.session)
if cliente_id:
try:
clientes = cliente_gateway.get_one(int(cliente_id))
content = clientes.as_dict
except ClienteNotFoundException as exc:
resp.status = falcon.HTTP_404
resp.body = json.dumps({"erro": str(exc)})
return resp
else:
clientes = cliente_gateway.get_all()
content = [cliente.as_dict for cliente in clientes]
resp.status = falcon.HTTP_200
resp.body = json.dumps(content)
def on_post(self, req, resp):
cliente_gateway = ClienteGateway(self.db.session)
resp.status = falcon.HTTP_201
raw_json = json.loads(req.bounded_stream.read().decode())
primeiro_nome = raw_json["primeiro_nome"]
ultimo_nome = raw_json["ultimo_nome"]
email = raw_json["email"]
try:
cliente_gateway.create(primeiro_nome, ultimo_nome, email)
except DuplicatedEntityException as exc:
resp.status = falcon.HTTP_412
resp.body = json.dumps({"erro": str(exc)})
return resp
def on_put(self, req, resp, cliente_id=None):
cliente_gateway = ClienteGateway(self.db.session)
if not cliente_id:
resp.status = falcon.HTTP_412
return resp
resp.status = falcon.HTTP_200
raw_json = json.loads(req.bounded_stream.read().decode())
primeiro_nome = raw_json.get("primeiro_nome", None)
ultimo_nome = raw_json.get("ultimo_nome", None)
email = raw_json.get("email", None)
cliente_gateway.update(cliente_id, primeiro_nome, ultimo_nome, email)
```
#### File: core/clientes/gateway.py
```python
from sqlalchemy.exc import IntegrityError
from core.clientes.exceptions import (
ClienteNotFoundException,
RequiredDataException,
DuplicatedEntityException,
)
from api.database.models import Cliente
class ClienteGateway(object):
def __init__(self, session):
self.session = session
def get_all(self):
return self.session.query(Cliente).all()
def get_one(self, cliente_id: int):
cliente = self.session.query(Cliente).filter_by(id=cliente_id).first()
if cliente:
return cliente
raise ClienteNotFoundException("Cliente ID = {} não encontrado!".format(cliente_id))
def create(self, primeiro_nome: str, ultimo_nome: str, email: str):
if not primeiro_nome or not ultimo_nome or not email:
raise RequiredDataException
cliente = Cliente(primeiro_nome=primeiro_nome, ultimo_nome=ultimo_nome, email=email)
self.session.add(cliente)
try:
self.session.commit()
except IntegrityError:
raise DuplicatedEntityException("Cliente com e-mail '{}' já está cadastrado.".format(email))
return cliente
def update(
self,
cliente_id: int,
primeiro_nome: str = None,
ultimo_nome: str = None,
email: str = None,
):
cliente = self.get_one(cliente_id)
if primeiro_nome and primeiro_nome != cliente.primeiro_nome:
cliente.primeiro_nome = primeiro_nome
if ultimo_nome and ultimo_nome != cliente.ultimo_nome:
cliente.ultimo_nome = ultimo_nome
if email and email != cliente.email:
cliente.email = email
self.session.add(cliente)
self.session.commit()
return cliente
def delete(self, cliente_id):
cliente = self.get_one(cliente_id)
self.session.delete(cliente)
self.session.commit()
def delete_all(self):
self.session.query(Cliente).delete()
self.session.commit()
```
#### File: pedidos/tests/test_pedido_gateway.py
```python
from datetime import datetime
import pytest
from api.config import load_config_file
from api.database.manager import DBManager
from core.pedidos.exceptions import PedidoNotFoundException, RequiredDataException
from core.pedidos.gateway import PedidoGateway
class TestPedidoGatewayTestCase(object):
def setup(self):
configurations = load_config_file()
db_manager = DBManager(configurations.db_test.connection)
db_manager.setup()
self.pedido_gateway = PedidoGateway(db_manager.session)
def teardown(self):
self.pedido_gateway.delete_all()
def _cria_um_pedido(self):
return self.pedido_gateway.create(data=datetime.now(), cliente_id=1, valor=1.0)
class TestPedidoGatewayGetAll(TestPedidoGatewayTestCase):
def test_retorna_vazio_quando_nao_ha_pedidos_cadastrados(self):
pedidos = self.pedido_gateway.get_all()
assert pedidos == []
def test_retorna_o_pedido_gerado(self):
data = datetime.now().replace(microsecond=0)
cliente_id = 1
valor = 1.0
self.pedido_gateway.create(data=data, cliente_id=cliente_id, valor=valor)
pedidos = self.pedido_gateway.get_all()
assert isinstance(pedidos, list)
assert len(pedidos) == 1
assert pedidos[0].data == data
assert pedidos[0].cliente_id == cliente_id
assert pedidos[0].valor == valor
def test_retorna_tres_pedidos(self):
self._cria_um_pedido()
self._cria_um_pedido()
self._cria_um_pedido()
pedidos = self.pedido_gateway.get_all()
assert len(pedidos) == 3
class TestPedidoGatewayGetOne(TestPedidoGatewayTestCase):
def test_retorna_erro_quando_nao_existe_pedido_informado(self):
pedido_id_inexistente = 1337
with pytest.raises(PedidoNotFoundException):
self.pedido_gateway.get_one(pedido_id_inexistente)
def test_retorna_pedido_quando_busca_pelo_id(self):
pedido = self._cria_um_pedido()
pedido_db = self.pedido_gateway.get_one(pedido.id)
assert pedido_db is pedido
class TestPedidoGatewayCreate(TestPedidoGatewayTestCase):
def test_retorna_erro_quando_nao_informa_dados_do_pedido(self):
data = None
cliente_id = 0
valor = 0
with pytest.raises(RequiredDataException):
self.pedido_gateway.create(data, cliente_id, valor)
def test_gera_pedido_quando_informa_dados_corretos(self):
data = datetime.now().replace(microsecond=0)
cliente_id = 1
valor = 123
pedido = self.pedido_gateway.create(data, cliente_id, valor)
pedido_db = self.pedido_gateway.get_one(pedido.id)
assert pedido_db.data == data
assert pedido_db.cliente_id == cliente_id
assert pedido_db.valor == valor
class TestPedidoGatewayUpdate(TestPedidoGatewayTestCase):
def test_retorna_erro_quando_pedido_nao_existe(self):
pedido_id_inexistente = 1337
data = None
cliente_id = 0
valor = 0
with pytest.raises(PedidoNotFoundException):
self.pedido_gateway.update(pedido_id_inexistente, data, cliente_id, valor)
def test_altera_pedido_quando_informa_dados_corretos(self):
data_alterada = datetime.strptime("2020-02-02 02:02:02", "%Y-%m-%d %H:%M:%S")
cliente_id_alterado = 2
valor_alterado = 321
pedido = self._cria_um_pedido()
self.pedido_gateway.update(pedido.id, data_alterada, cliente_id_alterado, valor_alterado)
pedido_alterado = self.pedido_gateway.get_one(pedido.id)
assert pedido_alterado.data == data_alterada
assert pedido_alterado.cliente_id == cliente_id_alterado
assert pedido_alterado.valor == valor_alterado
class TestPedidoGatewayDelete(TestPedidoGatewayTestCase):
def test_retorna_erro_quando_pedido_nao_existe(self):
pedido_id_inexistente = 1337
with pytest.raises(PedidoNotFoundException):
self.pedido_gateway.delete(pedido_id_inexistente)
def test_remove_pedido_com_sucesso(self):
pedido = self._cria_um_pedido()
self.pedido_gateway.delete(pedido.id)
with pytest.raises(PedidoNotFoundException):
self.pedido_gateway.get_one(pedido.id)
class TestPedidoGatewayDeleteAll(TestPedidoGatewayTestCase):
def test_remove_pedidos_com_sucesso(self):
self._cria_um_pedido()
self._cria_um_pedido()
self._cria_um_pedido()
self.pedido_gateway.delete_all()
pedidos = self.pedido_gateway.get_all()
assert len(pedidos) == 0
``` |
{
"source": "jklemm/py-dnd",
"score": 3
} |
#### File: core/entities/default_race_entity.py
```python
from core.structs import AbilityScoreStruct
from core.structs import RaceStruct
class DefaultRaceEntity(object):
def __init__(self):
self.race = RaceStruct()
def get_struct(self):
return self.race
def set_ability_score(self, strength=0, constitution=0, dexterity=0, intelligence=0, wisdom=0, charisma=0):
ability_score = AbilityScoreStruct(strength, constitution, dexterity, intelligence, wisdom, charisma)
self.race.ability_score = ability_score
```
#### File: core/tests/test_dice.py
```python
from unittest import TestCase
from core.utils import Dice, DiceRoller
class DiceSidesTests(TestCase):
def setUp(self):
self.dice = Dice()
def test_dice_with_default_side(self):
self.assertEqual(self.dice.sides, 4)
def test_dice_with_none_sides(self):
self.dice.set_sides(None)
self.assertEqual(self.dice.sides, 4)
def test_dice_with_two_sides(self):
self.dice.set_sides(2)
self.assertEqual(self.dice.sides, 4)
def test_dice_with_four_sides(self):
self.dice.set_sides(4)
self.assertEqual(self.dice.sides, 4)
def test_dice_with_six_sides(self):
self.dice.set_sides(6)
self.assertEqual(self.dice.sides, 6)
class RollTheDiceTestCase(TestCase):
def roll_the_dice(self, sides):
return Dice(sides).roll()
def dice_roller_roll_the_dice(self, formula):
return DiceRoller(formula).roll()
def assertLimits(self, value, minimum, maximum):
self.assertGreaterEqual(value, minimum)
self.assertLessEqual(value, maximum)
def assertSideDiceResults(self, sides):
result = self.roll_the_dice(sides)
self.assertLimits(result, 1, sides)
def assertFormulaDiceResults(self, formula, minimum_result, maximum_result):
result = self.dice_roller_roll_the_dice(formula)
self.assertLimits(result, minimum_result, maximum_result)
class RollSimpleDiceTests(RollTheDiceTestCase):
def test_roll_a_dice_with_d20_sides(self):
self.assertSideDiceResults(4)
self.assertSideDiceResults(6)
self.assertSideDiceResults(8)
self.assertSideDiceResults(10)
self.assertSideDiceResults(12)
self.assertSideDiceResults(20)
class RollComplexDiceTests(RollTheDiceTestCase):
def test_roll_normal_dices(self):
self.assertFormulaDiceResults('d4', 1, 4)
self.assertFormulaDiceResults('d6', 1, 6)
self.assertFormulaDiceResults('d8', 1, 8)
self.assertFormulaDiceResults('d12', 1, 12)
self.assertFormulaDiceResults('d20', 1, 20)
def test_roll_twice_the_dice(self):
self.assertFormulaDiceResults('2d4', 2, 8)
self.assertFormulaDiceResults('2d6', 2, 12)
self.assertFormulaDiceResults('2d8', 2, 16)
self.assertFormulaDiceResults('2d10', 2, 20)
self.assertFormulaDiceResults('2d12', 2, 24)
self.assertFormulaDiceResults('2d20', 2, 40)
def test_roll_three_times_the_dice(self):
self.assertFormulaDiceResults('3d4', 3, 12)
self.assertFormulaDiceResults('3d6', 3, 18)
self.assertFormulaDiceResults('3d8', 3, 24)
self.assertFormulaDiceResults('3d10', 3, 30)
self.assertFormulaDiceResults('3d12', 3, 36)
self.assertFormulaDiceResults('3d20', 3, 60)
def test_roll_four_times_with_bonus(self):
self.assertFormulaDiceResults('4d4+4', 8, 20)
self.assertFormulaDiceResults('4d6+4', 8, 28)
self.assertFormulaDiceResults('4d8+4', 8, 36)
self.assertFormulaDiceResults('4d10+4', 8, 44)
self.assertFormulaDiceResults('4d12+4', 8, 52)
self.assertFormulaDiceResults('4d20+4', 8, 84)
def test_roll_custom_dice(self):
self.assertFormulaDiceResults('10d10+20', 30, 120)
self.assertFormulaDiceResults('10d20+20', 30, 220)
self.assertFormulaDiceResults('10d30+20', 30, 320)
self.assertFormulaDiceResults('10d40+20', 30, 420)
self.assertFormulaDiceResults('10d50+20', 30, 520)
self.assertFormulaDiceResults('10d60+20', 30, 620)
```
#### File: core/usecases/create_character_usecase.py
```python
from core.entities.default_race_entity import DefaultRaceEntity
class CreateCharacterUsecase():
def __init__(self, character_gateway, race_entity=None):
self.character_gateway = character_gateway
self.race_entity = race_entity or DefaultRaceEntity()
def execute(self):
self.character_gateway.race_entity = self.race_entity
return self.character_gateway.create_character()
```
#### File: core/utils/dotted_dict.py
```python
class DottedDict(dict):
__slots__ = ()
def __getattr__(self, item):
return self[item]
def __setattr__(self, name, value):
self[name] = value
``` |
{
"source": "jklemm/pygame",
"score": 4
} |
#### File: jklemm/pygame/game.py
```python
import pygame
from pygame.locals import *
FULLSCREEN = 0
BLACK = (0, 0, 0)
RED = (255, 0, 0)
MAX_WIDTH = 800
MAX_HEIGHT = 600
RESOLUTION = (MAX_WIDTH, MAX_HEIGHT)
def redraw_screen(screen, width, height, x, y):
screen.fill(BLACK)
pygame.draw.rect(screen, RED, (x, y, width, height))
pygame.display.update()
def main():
x = 50
y = 500
width = 40
height = 60
speed = 0.25
is_jumping = False
jump_count = 10
pygame.init()
bestdepth = pygame.display.mode_ok(RESOLUTION, FULLSCREEN, 32)
screen = pygame.display.set_mode(RESOLUTION, FULLSCREEN, bestdepth)
pygame.display.set_caption('Pygame')
clock = pygame.time.Clock()
running = True
while running:
dt = clock.tick(30)
for event in pygame.event.get():
press_close_window = event.type == pygame.QUIT
press_esc_button = (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE)
if press_close_window or press_esc_button:
running = False
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and x > 0:
x -= speed * dt
if keys[pygame.K_RIGHT] and (x + width) < MAX_WIDTH:
x += speed * dt
if not is_jumping:
if keys[pygame.K_UP] and y > 0:
y -= speed * dt
if keys[pygame.K_DOWN] and (y + height) < MAX_HEIGHT:
y += speed * dt
if keys[pygame.K_SPACE]:
is_jumping = True
else:
if jump_count >= -10:
neg = 1
if jump_count < 0:
neg = -1
y -= (jump_count ** 2) * 0.5 * neg
jump_count -= 1
else:
is_jumping = False
jump_count = 10
redraw_screen(screen, width, height, x, y)
pygame.quit()
if __name__ == '__main__':
main()
``` |
{
"source": "jklenzing/pysat",
"score": 3
} |
#### File: pysat/instruments/pysat_testing_xarray.py
```python
from __future__ import print_function
from __future__ import absolute_import
import os
import numpy as np
import pandas as pds
import xarray
import pysat
from pysat.instruments.methods import testing as test
# pysat required parameters
platform = 'pysat'
name = 'testing_xarray'
# dictionary of data 'tags' and corresponding description
tags = {'': 'Regular testing data set'}
# dictionary of satellite IDs, list of corresponding tags
sat_ids = {'': ['']}
_test_dates = {'': {'': pysat.datetime(2009, 1, 1)}}
pandas_format = False
def init(self):
self.new_thing = True
def load(fnames, tag=None, sat_id=None, sim_multi_file_right=False,
sim_multi_file_left=False, malformed_index=False,
**kwargs):
""" Loads the test files
Parameters
----------
fnames : (list)
List of filenames
tag : (str or NoneType)
Instrument tag (accepts '' or a number (i.e., '10'), which specifies
the number of times to include in the test instrument)
sat_id : (str or NoneType)
Instrument satellite ID (accepts '')
sim_multi_file_right : (boolean)
Adjusts date range to be 12 hours in the future or twelve hours beyond
root_date (default=False)
sim_multi_file_left : (boolean)
Adjusts date range to be 12 hours in the past or twelve hours before
root_date (default=False)
malformed_index : (boolean)
If True, time index will be non-unique and non-monotonic.
kwargs : dict
Additional unspecified keywords supplied to pysat.Instrument upon instantiation
are passed here.
Returns
-------
data : (xr.Dataset)
Testing data
meta : (pysat.Meta)
Metadataxs
"""
# create an artifical satellite data set
parts = os.path.split(fnames[0])[-1].split('-')
yr = int(parts[0])
month = int(parts[1])
day = int(parts[2][0:2])
date = pysat.datetime(yr, month, day)
if sim_multi_file_right:
root_date = pysat.datetime(2009, 1, 1, 12)
data_date = date + pds.DateOffset(hours=12)
elif sim_multi_file_left:
root_date = pysat.datetime(2008, 12, 31, 12)
data_date = date - pds.DateOffset(hours=12)
else:
root_date = pysat.datetime(2009, 1, 1)
data_date = date
num = 86400 if sat_id == '' else int(sat_id)
num_array = np.arange(num)
index = pds.date_range(data_date,
data_date+pds.DateOffset(seconds=num-1),
freq='S')
if malformed_index:
index = index[0:num].tolist()
# nonmonotonic
index[0:3], index[3:6] = index[3:6], index[0:3]
# non unique
index[6:9] = [index[6]]*3
data = xarray.Dataset({'uts': (('time'), index)}, coords={'time':index})
# need to create simple orbits here. Have start of first orbit
# at 2009,1, 0 UT. 14.84 orbits per day
time_delta = date - root_date
mlt = test.generate_fake_data(time_delta.total_seconds(), num_array,
period=5820, data_range=[0.0, 24.0])
data['mlt'] = (('time'), mlt)
# do slt, 20 second offset from mlt
slt = test.generate_fake_data(time_delta.total_seconds()+20, num_array,
period=5820, data_range=[0.0, 24.0])
data['slt'] = (('time'), slt)
# create a fake longitude, resets every 6240 seconds
# sat moves at 360/5820 deg/s, Earth rotates at 360/86400, takes extra time
# to go around full longitude
longitude = test.generate_fake_data(time_delta.total_seconds(), num_array,
period=6240, data_range=[0.0, 360.0])
data['longitude'] = (('time'), longitude)
# create latitude area for testing polar orbits
angle = test.generate_fake_data(time_delta.total_seconds(),
num_array, period=5820,
data_range=[0.0, 2.0*np.pi])
latitude = 90.0 * np.cos(angle)
data['latitude'] = (('time'), latitude)
# fake orbit number
fake_delta = date - pysat.datetime(2008, 1, 1)
orbit_num = test.generate_fake_data(fake_delta.total_seconds(),
num_array, period=5820,
cyclic=False)
data['orbit_num'] = (('time'), orbit_num)
# create some fake data to support testing of averaging routines
mlt_int = data['mlt'].astype(int)
long_int = (data['longitude'] / 15.).astype(int)
data['dummy1'] = (('time'), mlt_int)
data['dummy2'] = (('time'), long_int)
data['dummy3'] = (('time'), mlt_int + long_int * 1000.)
data['dummy4'] = (('time'), num_array)
data['string_dummy'] = (('time'), ['test'] * len(data.indexes['time']))
data['unicode_dummy'] = (('time'), [u'test'] * len(data.indexes['time']))
data['int8_dummy'] = (('time'), np.array([1] * len(data.indexes['time']),
dtype=np.int8))
data['int16_dummy'] = (('time'), np.array([1] * len(data.indexes['time']),
dtype=np.int16))
data['int32_dummy'] = (('time'), np.array([1] * len(data.indexes['time']),
dtype=np.int32))
data['int64_dummy'] = (('time'), np.array([1] * len(data.indexes['time']),
dtype=np.int64))
return data, meta.copy()
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Produce a fake list of files spanning a year"""
index = pds.date_range(pysat.datetime(2008, 1, 1),
pysat.datetime(2010, 12, 31))
names = [data_path+date.strftime('%Y-%m-%d')+'.nofile' for date in index]
return pysat.Series(names, index=index)
def download(date_array, tag, sat_id, data_path=None, user=None,
password=<PASSWORD>):
pass
meta = pysat.Meta()
meta['uts'] = {'units': 's',
'long_name': 'Universal Time',
'custom': False}
meta['Epoch'] = {'units': 'Milliseconds since 1970-1-1',
'Bin_Location': 0.5,
'notes': 'UTC time at middle of geophysical measurement.',
'desc': 'UTC seconds', }
meta['mlt'] = {'units': 'hours',
'long_name': 'Magnetic Local Time',
'label': 'MLT',
'axis': 'MLT',
'desc': 'Magnetic Local Time',
'value_min': 0.,
'value_max': 24.,
'notes': ('Magnetic Local Time is the solar local time of the '
'field line at the location where the field crosses '
'the magnetic equator. In this case we just simulate '
'0-24 with a consistent orbital period and an offste '
'with SLT.'),
'fill': np.nan,
'scale': 'linear'}
meta['slt'] = {'units': 'hours',
'long_name': 'Solar Local Time',
'label': 'SLT',
'axis': 'SLT',
'desc': 'Solar Local Time',
'value_min': 0.,
'value_max': 24.,
'notes': ('Solar Local Time is the local time (zenith angle of '
'sun) of the given locaiton. Overhead noon, +/- 90 is'
' 6, 18 SLT .'),
'fill': np.nan,
'scale': 'linear'}
meta['orbit_num'] = {'units': '',
'long_name': 'Orbit Number',
'label': 'Orbit Number',
'axis': 'Orbit Number',
'desc': 'Orbit Number',
'value_min': 0.,
'value_max': 25000.,
'notes': ('Number of orbits since the start of the '
'mission. For this simulation we use the '
'number of 5820 second periods since the '
'start, 2008-01-01.'),
'fill': np.nan,
'scale': 'linear'}
meta['longitude'] = {'units': 'degrees', 'long_name': 'Longitude'}
meta['latitude'] = {'units': 'degrees', 'long_name': 'Latitude'}
meta['dummy1'] = {'units': '', 'long_name': 'dummy1'}
meta['dummy2'] = {'units': '', 'long_name': 'dummy2'}
meta['dummy3'] = {'units': '', 'long_name': 'dummy3'}
meta['dummy4'] = {'units': '', 'long_name': 'dummy4'}
meta['string_dummy'] = {'units': '', 'long_name': 'string_dummy'}
meta['unicode_dummy'] = {'units': '', 'long_name': 'unicode_dummy'}
meta['int8_dummy'] = {'units': '', 'long_name': 'int8_dummy'}
meta['int16_dummy'] = {'units': '', 'long_name': 'int16_dummy'}
meta['int32_dummy'] = {'units': '', 'long_name': 'int32_dummy'}
meta['int64_dummy'] = {'units': '', 'long_name': 'int64_dummy'}
```
#### File: pysat/instruments/superdarn_grdex.py
```python
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import functools
import pandas as pds
import numpy as np
import pysat
platform = 'superdarn'
name = 'grdex'
tags = {'north': '',
'south': ''}
sat_ids = {'': ['north', 'south']}
_test_dates = {'': {'north': pysat.datetime(2009, 1, 1),
'south': pysat.datetime(2009, 1, 1)}}
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
Parameters
----------
self : pysat.Instrument
This object
Returns
--------
Void : (NoneType)
Object modified in place.
"""
# reset the list_remote_files routine to include the data path
# now conveniently included with instrument object
self._list_remote_rtn = \
functools.partial(list_remote_files,
data_path=self.files.data_path,
format_str=self.files.file_format)
# data acknowledgement from SuperDARN
# coped from SD Documents area of VT SuperDARN webpage
# http://vt.superdarn.org/tiki-list_file_gallery.php?galleryId=81
# How to acknowledge use of SuperDARN Data - 2017
print('Authors should acknowledge the use of SuperDARN data. ',
'SuperDARN is a collection of radars funded by national scientific ',
'funding agencies of Australia, Canada, China, France, Italy, ',
'Japan, Norway, South Africa, United Kingdom and the United States ',
'of America.')
return
def list_remote_files(tag, sat_id, data_path=None, format_str=None):
"""Lists remote files available for SuperDARN.
Note
----
This routine currently fakes the list but
produces the desired effect of keeping data current.
Begins with data in 1985. (this needs to be checked)
Parameters
----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are <tag strings>.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
Returns
-------
pandas.Series
Series indexed by date that stores the filename for each date.
"""
# given the function of SuperMAG, create a fake list of files
# starting 01 Jan 1970, through today
now = pysat.datetime.now()
now = pysat.datetime(now.year, now.month, now.day)
# create a list of dates with appropriate frequency
index = pds.period_range(pysat.datetime(1985, 1, 1), now, freq='D')
# pre fill in blank strings
remote_files = pds.Series([''] * len(index), index=index)
# pysat compares both dates and filenames when determining
# which files it needs to download
# so we need to ensure that filename for dates that overlap
# are the same or data that is already present will be redownloaded
# need to get a list of the current files attached to
# the Instrument object. In this case, the object hasn't
# been passed in.....
# that is ok, we can just call list_files right here
# except we don't have the data path
# the init function above is used to reset the
# lost_remote_files method with one where the
# data path and format_str are set
local_files = list_files(tag, sat_id, data_path, format_str)
# iterating directly since pandas is complaining about periods
# between different between indexes
for time, fname in local_files.iteritems():
remote_files.loc[time] = fname
return remote_files
def list_files(tag='north', sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string)
Denotes type of file to load. Accepted types are 'north' and 'south'.
(default='north')
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
"""
if format_str is None and tag is not None:
if tag == 'north' or tag == 'south':
hemi_fmt = ''.join(('{year:4d}{month:02d}{day:02d}.', tag,
'.grdex'))
return pysat.Files.from_os(data_path=data_path,
format_str=hemi_fmt)
else:
estr = 'Unrecognized tag name for SuperDARN, north or south.'
raise ValueError(estr)
elif format_str is None:
estr = 'A tag name must be passed to SuperDARN.'
raise ValueError(estr)
else:
return pysat.Files.from_os(data_path=data_path, format_str=format_str)
def load(fnames, tag=None, sat_id=None):
import davitpy
if len(fnames) <= 0:
return pysat.DataFrame(None), pysat.Meta(None)
elif len(fnames) == 1:
myPtr = davitpy.pydarn.sdio.sdDataPtr(sTime=pysat.datetime(1980, 1, 1),
fileType='grdex',
eTime=pysat.datetime(2250, 1, 1),
hemi=tag,
fileName=fnames[0])
myPtr.open()
in_list = []
in_dict = {'stid': [],
'channel': [],
'noisemean': [],
'noisesd': [],
'gsct': [],
'nvec': [],
'pmax': [],
'start_time': [],
'end_time': [],
'vemax': [],
'vemin': [],
'pmin': [],
'programid': [],
'wmax': [],
'wmin': [],
'freq': []}
while True:
info = myPtr.readRec()
if info is None:
myPtr.close()
break
drift_frame = pds.DataFrame.from_records(info.vector.__dict__,
nrows=len(info.pmax),
index=info.vector.index)
drift_frame['partial'] = 1
drift_frame.drop('index', axis=1, inplace=True)
drift_frame.index.name = 'index'
sum_vec = 0
for nvec in info.nvec:
in_list.append(drift_frame.iloc[sum_vec:sum_vec+nvec])
sum_vec += nvec
in_dict['stid'].extend(info.stid)
in_dict['channel'].extend(info.channel)
in_dict['noisemean'].extend(info.noisemean)
in_dict['noisesd'].extend(info.noisesd)
in_dict['gsct'].extend(info.gsct)
in_dict['nvec'].extend(info.nvec)
in_dict['pmax'].extend(info.pmax)
in_dict['start_time'].extend([info.sTime]*len(info.pmax))
in_dict['end_time'].extend([info.eTime]*len(info.pmax))
in_dict['vemax'].extend(info.vemax)
in_dict['vemin'].extend(info.vemin)
in_dict['pmin'].extend(info.pmin)
in_dict['programid'].extend(info.programid)
in_dict['wmax'].extend(info.wmax)
in_dict['wmin'].extend(info.wmin)
in_dict['freq'].extend(info.freq)
output = pds.DataFrame(in_dict)
output['vector'] = in_list
output.index = output.start_time
output.drop('start_time', axis=1, inplace=True)
return output, pysat.Meta()
else:
raise ValueError('Only one filename currently supported.')
# def default(ivm):
#
# return
def clean(self):
# remove data when there are no vectors
idx, = np.where(self['nvec'] > 0)
self.data = self.data.iloc[idx]
return
def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""
Download SuperDARN data from Virginia Tech organized for loading by pysat.
"""
import warnings
warnings.warn(" ".join(("Downloads for SuperDARN currently not supported,",
"but will be added in a future version.")))
```
#### File: pysat/instruments/timed_saber.py
```python
from __future__ import print_function
from __future__ import absolute_import
import functools
import pysat
# CDAWeb methods prewritten for pysat
from .methods import nasa_cdaweb as cdw
# the platform and name strings associated with this instrument
# need to be defined at the top level
# these attributes will be copied over to the Instrument object by pysat
# the strings used here should also be used to name this file
# platform_name.py
platform = 'timed'
name = 'saber'
# dictionary of data 'tags' and corresponding description
tags = {'': ''}
# Let pysat know if there are multiple satellite platforms supported
# by these routines
# define a dictionary keyed by satellite ID, each with a list of
# corresponding tags
# sat_ids = {'a':['L1', 'L0'], 'b':['L1', 'L2'], 'c':['L1', 'L3']}
sat_ids = {'': ['']}
# Define good days to download data for when pysat undergoes testing.
# format is outer dictionary has sat_id as the key
# each sat_id has a dictionary of test dates keyed by tag string
# _test_dates = {'a':{'L0':pysat.datetime(2019,1,1),
# 'L1':pysat.datetime(2019,1,2)},
# 'b':{'L1':pysat.datetime(2019,3,1),
# 'L2':pysat.datetime(2019,11,23),}}
_test_dates = {'': {'': pysat.datetime(2019, 1, 1)}}
# Additional information needs to be defined
# to support the CDAWeb list files routine
# We need to define a filename format string for every
# supported combination of sat_id and tag string
# fname1 = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
# fname2 = 'cnofs_vefi_acfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
# supported_tags = {'sat1':{'tag1':fname1},
# 'sat2':{'tag2':fname2}}
# you can use format keywords year, month, day, hour, min, sec,
# version and revision
# see code docstring for latest
fname = ''.join(('timed_l2av207_saber_{year:04d}{month:02d}{day:02d}',
'????_v01.cdf'))
supported_tags = {'': {'': fname}}
# use the CDAWeb methods list files routine
# the command below presets some of the methods inputs, leaving
# those provided by pysat available when invoked
list_files = functools.partial(cdw.list_files,
supported_tags=supported_tags)
# let pysat know that data is spread across more than one file
multi_file_day = True
# Set to False to specify using xarray (not using pandas)
# Set to True if data will be returned via a pandas DataFrame
pandas_format = True
#
# support load routine
#
# use the default CDAWeb method
# no other information needs to be supplied here
# pysatCDF is used to load data
load = cdw.load
#
# support download routine
#
# to use the default CDAWeb method
# we need to provide additional information
# directory location on CDAWeb ftp server
# formatting template for filenames on CDAWeb
# formatting template for files saved to the local disk
# a dictionary needs to be created for each sat_id and tag
# combination along with the file format template
# outer dict keyed by sat_id, inner dict keyed by tag
basic_tag = {'dir': '/pub/data/timed/saber/level2a_v2_07_cdf',
'remote_fname': '{year:4d}/{month:02d}/' + fname,
'local_fname': fname}
supported_tags = {'': {'': basic_tag}}
download = functools.partial(cdw.download, supported_tags, multi_file_day=True)
# support listing files currently on CDAWeb
list_remote_files = functools.partial(cdw.list_remote_files,
supported_tags=supported_tags)
# code should be defined below as needed
def default(self):
"""Default customization function.
This routine is automatically applied to the Instrument object
on every load by the pysat nanokernel (first in queue).
Parameters
----------
self : pysat.Instrument
This object
Returns
--------
Void : (NoneType)
Object modified in place.
"""
return
# code should be defined below as needed
def clean(inst):
"""Routine to return PLATFORM/NAME data cleaned to the specified level
Cleaning level is specified in inst.clean_level and pysat
will accept user input for several strings. The clean_level is
specified at instantiation of the Instrument object.
'clean' All parameters should be good, suitable for statistical and
case studies
'dusty' All paramers should generally be good though same may
not be great
'dirty' There are data areas that have issues, data should be used
with caution
'none' No cleaning applied, routine not called in this case.
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
-----
"""
return
```
#### File: pysat/pysat/model_utils.py
```python
from __future__ import print_function
from __future__ import absolute_import
import datetime as dt
import numpy as np
import pandas as pds
import warnings
def satellite_view_through_model(sat, tie, scoords, tlabels):
"""Interpolate model values onto satellite orbital path.
Parameters
----------
sat : pysat.Instrument object
Instrument object with some form of coordinates
tie : ucar_tiegcm object
Model run loaded as tie_gcm object
scoords : string or list of strings
Variable names reflecting coordinates in sat to interpolate model onto
tlabels : string or list of strings
Variable names from model to interpolate onto sat locations
"""
warnings.warn(' '.join(["This function is deprecated here and will be",
"removed in pysat 3.0.0. Please use",
"pysatModelUtils instead:"
"https://github.com/pysat/pysatModelUtils"]),
DeprecationWarning, stacklevel=2)
# tiegcm is in pressure levels, need in altitude, but on regular
# grid
import scipy.interpolate as interpolate
# create input array using satellite time/position
if isinstance(scoords, str):
scoords = [scoords]
coords = [sat[coord] for coord in scoords]
coords.insert(0, sat.index.values.astype(int))
sat_pts = [inp for inp in zip(*coords)]
interp = {}
if isinstance(tlabels, str):
tlabels = [tlabels]
for label in tlabels:
points = [tie.data.coords[dim].values if dim != 'time' else
tie.data.coords[dim].values.astype(int)
for dim in tie[label].dims]
interp[label] = interpolate.RegularGridInterpolator(points,
tie[label].values,
bounds_error=False,
fill_value=None)
sat[''.join(('model_', label))] = interp[label](sat_pts)
def compare_model_and_inst(pairs=None, inst_name=[], mod_name=[],
methods=['all']):
"""Compare modelled and measured data
Parameters
------------
pairs : xarray.Dataset instance
Dataset containing only the desired observation-model data pairs
inst_name : list of strings
ordered list of instrument measurements to compare to modelled data
mod_name : list of strings
ordered list of modelled data to compare to instrument measurements
methods : list of strings
statistics to calculate. See Notes for accecpted inputs
Returns
----------
stat_dict : dict of dicts
Dictionary where the first layer of keys denotes the instrument data
name and the second layer provides the desired statistics
data_units : dict
Dictionary containing the units for the data
Notes
-----
Statistics are calculated using PyForecastTools (imported as verify).
See notes there for more details.
all - all statistics
all_bias - bias, meanPercentageError, medianLogAccuracy,
symmetricSignedBias
accuracy - returns dict with mean squared error, root mean squared error,
mean absolute error, and median absolute error
scaledAccuracy - returns dict with normaled root mean squared error, mean
absolute scaled error, mean absolute percentage error,
median absolute percentage error, median symmetric
accuracy
bias - scale-dependent bias as measured by the mean error
meanPercentageError - mean percentage error
medianLogAccuracy - median of the log accuracy ratio
symmetricSignedBias - Symmetric signed bias, as a percentage
meanSquaredError - mean squared error
RMSE - root mean squared error
meanAbsError - mean absolute error
medAbsError - median absolute error
nRMSE - normaized root mean squared error
scaledError - scaled error (see PyForecastTools for references)
MASE - mean absolute scaled error
forecastError - forecast error (see PyForecastTools for references)
percError - percentage error
absPercError - absolute percentage error
logAccuracy - log accuracy ratio
medSymAccuracy - Scaled measure of accuracy
meanAPE - mean absolute percentage error
"""
import verify # PyForecastTools
from pysat import utils
warnings.warn(' '.join(["This function is deprecated here and will be",
"removed in pysat 3.0.0. Please use",
"pysatModelUtils instead:"
"https://github.com/pysat/pysatModelUtils"]),
DeprecationWarning, stacklevel=2)
method_rout = {"bias": verify.bias, "accuracy": verify.accuracy,
"meanPercentageError": verify.meanPercentageError,
"medianLogAccuracy": verify.medianLogAccuracy,
"symmetricSignedBias": verify.symmetricSignedBias,
"meanSquaredError": verify.meanSquaredError,
"RMSE": verify.RMSE, "meanAbsError": verify.meanAbsError,
"medAbsError": verify.medAbsError, "MASE": verify.MASE,
"scaledAccuracy": verify.scaledAccuracy,
"nRMSE": verify.nRMSE, "scaledError": verify.scaledError,
"forecastError": verify.forecastError,
"percError": verify.percError, "meanAPE": verify.meanAPE,
"absPercError": verify.absPercError,
"logAccuracy": verify.logAccuracy,
"medSymAccuracy": verify.medSymAccuracy}
replace_keys = {'MSE': 'meanSquaredError', 'MAE': 'meanAbsError',
'MdAE': 'medAbsError', 'MAPE': 'meanAPE',
'MdSymAcc': 'medSymAccuracy'}
# Grouped methods for things that don't have convenience functions
grouped_methods = {"all_bias": ["bias", "meanPercentageError",
"medianLogAccuracy",
"symmetricSignedBias"],
"all": list(method_rout.keys())}
# Replace any group method keys with the grouped methods
for gg in [(i, mm) for i, mm in enumerate(methods)
if mm in list(grouped_methods.keys())]:
# Extend the methods list to include all the grouped methods
methods.extend(grouped_methods[gg[1]])
# Remove the grouped method key
methods.pop(gg[0])
# Ensure there are no duplicate methods
methods = list(set(methods))
# Test the input
if pairs is None:
raise ValueError('must provide Dataset of paired observations')
if len(inst_name) != len(mod_name):
raise ValueError('must provide equal number of instrument and model ' +
'data names for comparison')
if not np.all([iname in pairs.data_vars.keys() for iname in inst_name]):
raise ValueError('unknown instrument data value supplied')
if not np.all([iname in pairs.data_vars.keys() for iname in mod_name]):
raise ValueError('unknown model data value supplied')
if not np.all([mm in list(method_rout.keys()) for mm in methods]):
known_methods = list(method_rout.keys())
known_methods.extend(list(grouped_methods.keys()))
unknown_methods = [mm for mm in methods
if mm not in list(method_rout.keys())]
raise ValueError('unknown statistical method(s) requested:\n' +
'{:}\nuse only:\n{:}'.format(unknown_methods,
known_methods))
# Initialize the output
stat_dict = {iname: dict() for iname in inst_name}
data_units = {iname: pairs.data_vars[iname].units for iname in inst_name}
# Cycle through all of the data types
for i, iname in enumerate(inst_name):
# Determine whether the model data needs to be scaled
iscale = utils.scale_units(pairs.data_vars[iname].units,
pairs.data_vars[mod_name[i]].units)
mod_scaled = pairs.data_vars[mod_name[i]].values.flatten() * iscale
# Flatten both data sets, since accuracy routines require 1D arrays
inst_dat = pairs.data_vars[iname].values.flatten()
# Ensure no NaN are used in statistics
inum = np.where(np.isfinite(mod_scaled) & np.isfinite(inst_dat))[0]
if inum.shape[0] < 2:
# Not all data types can use all statistics. Print warnings
# instead of stopping processing. Only valid statistics
# will be included in output
print("{:s} can't calculate stats for {:d} finite samples".format( \
iname, inum.shape[0]))
stat_dict
else:
# Calculate all of the desired statistics
for mm in methods:
try:
stat_dict[iname][mm] = method_rout[mm](mod_scaled[inum],
inst_dat[inum])
# Convenience functions add layers to the output, remove
# these layers
if hasattr(stat_dict[iname][mm], "keys"):
for nn in stat_dict[iname][mm].keys():
new = replace_keys[nn] if nn in replace_keys.keys()\
else nn
stat_dict[iname][new] = stat_dict[iname][mm][nn]
del stat_dict[iname][mm]
except ValueError as verr:
# Not all data types can use all statistics. Print warnings
# instead of stopping processing. Only valid statistics
# will be included in output
print("{:s} can't use {:s}: {:}".format(iname, mm, verr))
except NotImplementedError:
# Not all data types can use all statistics. Print warnings
# instead of stopping processing. Only valid statistics
# will be included in output
print("{:s} can't implement {:s}".format(iname, mm))
return stat_dict, data_units
def collect_inst_model_pairs(start=None, stop=None, tinc=None, inst=None,
user=None, password=<PASSWORD>, model_files=None,
model_load_rout=None, inst_lon_name=None,
mod_lon_name=None, inst_name=[], mod_name=[],
mod_datetime_name=None, mod_time_name=None,
mod_units=[], sel_name=None, method='linear',
model_label='model', inst_clean_rout=None,
comp_clean='clean'):
"""Pair instrument and model data, applying data cleaning after finding the
times and locations where the instrument and model align
Parameters
----------
start : dt.datetime
Starting datetime
stop : dt.datetime
Ending datetime
tinc : dt.timedelta
Time incriment for model files
inst : pysat.Instrument instance
instrument object for which modelled data will be extracted
user : string
User name (needed for some data downloads)
password : string
Password (needed for some data downloads)
model_files : string
string format that will construct the desired model filename from a
datetime object
model_load_rout : routine
Routine to load model data into an xarray using filename and datetime
as input
inst_lon_name : string
variable name for instrument longitude
mod_lon_name : string
variable name for model longitude
inst_name : list of strings
list of names of the data series to use for determing instrument
location
mod_name : list of strings
list of names of the data series to use for determing model locations
in the same order as inst_name. These must make up a regular grid.
mod_datetime_name : string
Name of the data series in the model Dataset containing datetime info
mod_time_name : string
Name of the time coordinate in the model Dataset
mod_units : list of strings
units for each of the mod_name location attributes. Currently
supports: rad/radian(s), deg/degree(s), h/hr(s)/hour(s), m, km, and cm
sel_name : list of strings or NoneType
list of names of modelled data indices to append to instrument object,
or None to append all modelled data (default=None)
method : string
Interpolation method. Supported are 'linear', 'nearest', and
'splinef2d'. The last is only supported for 2D data and is not
recommended here. (default='linear')
model_label : string
name of model, used to identify interpolated data values in instrument
(default="model")
inst_clean_rout : routine
Routine to clean the instrument data
comp_clean : string
Clean level for the comparison data ('clean', 'dusty', 'dirty', 'none')
(default='clean')
Returns
-------
matched_inst : pysat.Instrument instance
instrument object and paired modelled data
"""
from os import path
import pysat
warnings.warn(' '.join(["This function is deprecated here and will be",
"removed in pysat 3.0.0. Please use",
"pysatModelUtils instead:"
"https://github.com/pysat/pysatModelUtils"]),
DeprecationWarning, stacklevel=2)
matched_inst = None
# Test the input
if start is None or stop is None:
raise ValueError('Must provide start and end time for comparison')
if inst is None:
raise ValueError('Must provide a pysat instrument object')
if model_files is None:
raise ValueError('Must provide list of modelled data')
if model_load_rout is None:
raise ValueError('Need routine to load modelled data')
if mod_datetime_name is None:
raise ValueError('Need time coordinate name for model datasets')
if mod_time_name is None:
raise ValueError('Need time coordinate name for model datasets')
if len(inst_name) == 0:
estr = 'Must provide instrument location attribute names as a list'
raise ValueError(estr)
if len(inst_name) != len(mod_name):
estr = 'Must provide the same number of instrument and model '
estr += 'location attribute names as a list'
raise ValueError(estr)
if len(mod_name) != len(mod_units):
raise ValueError('Must provide units for each model location ' +
'attribute')
if inst_clean_rout is None:
raise ValueError('Need routine to clean the instrument data')
# Download the instrument data, if needed
# Could use some improvement, for not re-downloading times that you already
# have
if (stop - start).days != len(inst.files[start:stop]):
inst.download(start=start, stop=stop, user=user, password=password)
# Cycle through the times, loading the model and instrument data as needed
istart = start
while start < stop:
mod_file = start.strftime(model_files)
if path.isfile(mod_file):
try:
mdata = model_load_rout(mod_file, start)
lon_high = float(mdata.coords[mod_lon_name].max())
lon_low = float(mdata.coords[mod_lon_name].min())
except Exception as err:
print("unable to load {:s}: {:}".format(mod_file, err))
mdata = None
else:
mdata = None
if mdata is not None:
# Load the instrument data, if needed
if inst.empty or inst.index[-1] < istart:
inst.custom.add(pysat.utils.coords.update_longitude, 'modify',
low=lon_low, lon_name=inst_lon_name,
high=lon_high)
inst.load(date=istart)
if not inst.empty and inst.index[0] >= istart:
added_names = extract_modelled_observations(inst=inst, \
model=mdata, \
inst_name=inst_name, \
mod_name=mod_name, \
mod_datetime_name=mod_datetime_name, \
mod_time_name=mod_time_name, \
mod_units=mod_units, \
sel_name=sel_name, \
method=method, \
model_label=model_label)
if len(added_names) > 0:
# Clean the instrument data
inst.clean_level = comp_clean
inst_clean_rout(inst)
im = list()
for aname in added_names:
# Determine the number of good points
if inst.pandas_format:
imnew = np.where(np.isfinite(inst[aname]))
else:
imnew = np.where(np.isfinite(inst[aname].values))
# Some data types are higher dimensions than others,
# make sure we end up choosing a high dimension one
# so that we don't accidently throw away paired data
if len(im) == 0 or len(im[0]) < len(imnew[0]):
im = imnew
# If the data is 1D, save it as a list instead of a tuple
if len(im) == 1:
im = im[0]
else:
im = {kk: np.unique(im[i])
for i, kk in enumerate(inst.data.coords.keys())}
# Save the clean, matched data
if matched_inst is None:
matched_inst = pysat.Instrument
matched_inst.meta = inst.meta
matched_inst.data = inst[im]
else:
idata = inst[im]
matched_inst.data = \
inst.concat_data([matched_inst.data, idata])
# Reset the clean flag
inst.clean_level = 'none'
# Cycle the times
if tinc.total_seconds() <= 86400.0:
start += tinc
if start + tinc > istart + dt.timedelta(days=1):
istart += dt.timedelta(days=1)
else:
if start + tinc >= istart + dt.timedelta(days=1):
istart += dt.timedelta(days=1)
if istart >= start + tinc:
start += tinc
# Recast as xarray and add units
if matched_inst is not None:
if inst.pandas_format:
matched_inst.data = matched_inst.data.to_xarray()
for im in inst.meta.data.units.keys():
if im in matched_inst.data.data_vars.keys():
matched_inst.data.data_vars[im].attrs['units'] = \
inst.meta.data.units[im]
return matched_inst
def extract_modelled_observations(inst=None, model=None, inst_name=[],
mod_name=[], mod_datetime_name=None,
mod_time_name=None, mod_units=[],
sel_name=None, method='linear',
model_label='model'):
"""Extracts instrument-aligned data from a modelled data set
Parameters
----------
inst : pysat.Instrument instance
instrument object for which modelled data will be extracted
model : xarray Dataset
modelled data set
inst_name : list of strings
list of names of the data series to use for determing instrument
location
mod_name : list of strings
list of names of the data series to use for determing model locations
in the same order as inst_name. These must make up a regular grid.
mod_datetime_name : string
Name of the data series in the model Dataset containing datetime info
mod_time_name : string
Name of the time coordinate in the model Dataset
mod_units : list of strings
units for each of the mod_name location attributes. Currently
supports: rad/radian(s), deg/degree(s), h/hr(s)/hour(s), m, km, and cm
sel_name : list of strings or NoneType
list of names of modelled data indices to append to instrument object,
or None to append all modelled data (default=None)
method : string
Interpolation method. Supported are 'linear', 'nearest', and
'splinef2d'. The last is only supported for 2D data and is not
recommended here. (default='linear')
model_label : string
name of model, used to identify interpolated data values in instrument
(default="model")
Returns
-------
added_names : list of strings
list of names of modelled data added to the instrument
Notes
--------
For best results, select clean instrument data after alignment with model
"""
from scipy import interpolate
from pysat import utils
warnings.warn(' '.join(["This function is deprecated here and will be",
"removed in pysat 3.0.0. Please use",
"pysatModelUtils instead:"
"https://github.com/pysat/pysatModelUtils"]),
DeprecationWarning, stacklevel=2)
# Test input
if inst is None:
raise ValueError('Must provide a pysat instrument object')
if model is None:
raise ValueError('Must provide modelled data')
if mod_datetime_name is None:
raise ValueError('Need datetime key for model datasets')
if mod_time_name is None:
raise ValueError('Need time coordinate name for model datasets')
if len(inst_name) == 0:
estr = 'Must provide instrument location attribute names as a list'
raise ValueError(estr)
if len(inst_name) != len(mod_name):
estr = 'Must provide the same number of instrument and model '
estr += 'location attribute names as a list'
raise ValueError(estr)
if len(mod_name) != len(mod_units):
raise ValueError('Must provide units for each model location ' +
'attribute')
inst_scale = np.ones(shape=len(inst_name), dtype=float)
for i, ii in enumerate(inst_name):
if ii not in list(inst.data.keys()):
raise ValueError('Unknown instrument location index ' +
'{:}'.format(ii))
inst_scale[i] = utils.scale_units(mod_units[i],
inst.meta.data.units[ii])
# Determine which data to interpolate and initialize the interpolated
# output
if sel_name is None:
sel_name = list(model.data_vars.keys())
for mi in mod_name:
if mi in sel_name:
sel_name.pop(sel_name.index(mi))
# Determine the model time resolution
tm_sec = (np.array(model.data_vars[mod_datetime_name][1:]) -
np.array(model.data_vars[mod_datetime_name][:-1])).min()
tm_sec /= np.timedelta64(1, 's')
ti_sec = (inst.index[1:] - inst.index[:-1]).min().total_seconds()
min_del = tm_sec if tm_sec < ti_sec else ti_sec
# Determine which instrument observations are within the model time
# resolution of a model run
mind = list()
iind = list()
for i, tt in enumerate(np.array(model.data_vars[mod_datetime_name])):
del_sec = abs(tt - inst.index).total_seconds()
if del_sec.min() <= min_del:
iind.append(del_sec.argmin())
mind.append(i)
# Determine the model coordinates closest to the satellite track
interp_data = dict()
interp_shape = inst.index.shape if inst.pandas_format else \
inst.data.data_vars.items()[0][1].shape
inst_coord = {kk: getattr(inst.data, inst_name[i]).values * inst_scale[i]
for i, kk in enumerate(mod_name)}
for i, ii in enumerate(iind):
# Cycle through each model data type, since it may not depend on
# all the dimensions
for mdat in sel_name:
# Determine the dimension values
dims = list(model.data_vars[mdat].dims)
ndim = model.data_vars[mdat].data.shape
indices = {mod_time_name: mind[i]}
# Construct the data needed for interpolation
values = model[indices][mdat].data
points = [model.coords[kk].data for kk in dims if kk in mod_name]
get_coords = True if len(points) > 0 else False
idims = 0
while get_coords:
if inst.pandas_format:
# This data iterates only by time
xout = ii
xi = [inst_coord[kk][ii] for kk in dims if kk in mod_name]
get_coords = False
else:
# This data may have additional dimensions
if idims == 0:
# Determine the number of dimensions
idims = len(inst.data.coords)
idim_names = inst.data.coords.keys()[1:]
# Find relevent dimensions for cycling and slicing
ind_dims = [k for k, kk in enumerate(inst_name)
if kk in idim_names]
imod_dims = [k for k in ind_dims
if mod_name[k] in dims]
ind_dims = [inst.data.coords.keys().index(inst_name[k])
for k in imod_dims]
# Set the number of cycles
icycles = 0
ncycles = sum([len(inst.data.coords[inst_name[k]])
for k in imod_dims])
cinds = np.zeros(shape=len(imod_dims), dtype=int)
# Get the instrument coordinate for this cycle
if icycles < ncycles or icycles == 0:
ss = [ii if k == 0 else 0 for k in range(idims)]
se = [ii + 1 if k == 0 else
len(inst.data.coords[idim_names[k-1]])
for k in range(idims)]
xout = [cinds[ind_dims.index(k)] if k in ind_dims
else slice(ss[k], se[k]) for k in range(idims)]
xind = [cinds[ind_dims.index(k)] if k in ind_dims
else ss[k] for k in range(idims)]
xout = tuple(xout)
xind = tuple(xind)
xi = list()
for kk in dims:
if kk in mod_name:
# This is the next instrument coordinate
k = mod_name.index(kk)
if k in imod_dims:
# This is an xarray coordiante
xi.append(inst_coord[kk][cinds[k]])
else:
# This is an xarray variable
xi.append(inst_coord[kk][xind])
# Cycle the indices
if len(cinds) > 0:
k = 0
cinds[k] += 1
while cinds[k] > \
inst.data.coords.dims[inst_name[imod_dims[k]]]:
k += 1
if k < len(cinds):
cinds[k-1] = 0
cinds[k] += 1
else:
break
icycles += 1
# If we have cycled through all the coordinates for this
# time, move onto the next time
if icycles >= ncycles:
get_coords = False
# Interpolate the desired value
try:
yi = interpolate.interpn(points, values, xi, method=method)
except ValueError as verr:
if str(verr).find("requested xi is out of bounds") > 0:
# This is acceptable, pad the interpolated data with
# NaN
print("Warning: {:} for ".format(verr) +
"{:s} data at {:}".format(mdat, xi))
yi = [np.nan]
else:
raise ValueError(verr)
# Save the output
attr_name = "{:s}_{:s}".format(model_label, mdat)
if attr_name not in interp_data.keys():
interp_data[attr_name] = np.full(shape=interp_shape,
fill_value=np.nan)
interp_data[attr_name][xout] = yi[0]
# Test and ensure the instrument data doesn't already have the interpolated
# data. This should not happen
if np.any([mdat in inst.data.keys() for mdat in interp_data.keys()]):
raise ValueError("instrument object already contains model data")
# Update the instrument object and attach units to the metadata
for mdat in interp_data.keys():
attr_name = mdat.split("{:s}_".format(model_label))[-1]
inst.meta[mdat] = {inst.units_label: model.data_vars[attr_name].units}
if inst.pandas_format:
inst[mdat] = pds.Series(interp_data[mdat], index=inst.index)
else:
inst.data = inst.data.assign(interp_key=(inst.data.coords.keys(),
interp_data[mdat]))
inst.data.rename({"interp_key": mdat}, inplace=True)
return interp_data.keys()
```
#### File: pysat/utils/stats.py
```python
import numpy as np
import warnings
def median1D(self, bin_params, bin_label, data_label):
"""Calculates the median for a series of binned data.
** Will be remvoed in a future version now that ssnl.avgs has a
similar function
Parameters
----------
bin_params : array_like
Input array defining the bins in which the median is calculated
bin_label : string
Name of data parameter which the bins cover
data_level : string
Name of data parameter to take the median of in each bin
Returns
-------
medians : array_like
The median data value in each bin
"""
warnings.warn(' '.join(["utils.stats.median1D is deprecated and will be",
"removed in pysat 3.0.0. Please use",
"ssnl.avg.median1D instead"]),
DeprecationWarning, stacklevel=2)
bins = np.arange(bin_params[0], bin_params[1] + bin_params[2],
bin_params[2])
medians = 0. * bins[0:-1]
ind = np.digitize(self.data[bin_label], bins)
for i in range(bins.size-1):
index, = np.where(ind == (i + 1))
if len(index) > 0:
idx = self.data.index[index.astype(int)]
medians[i] = self.data.loc[idx, data_label].median()
return medians
def nan_circmean(samples, high=2.0*np.pi, low=0.0, axis=None):
"""NaN insensitive version of scipy's circular mean routine
Parameters
-----------
samples : array_like
Input array
high: float or int
Upper boundary for circular standard deviation range (default=2 pi)
low : float or int
Lower boundary for circular standard deviation range (default=0)
axis : int or NoneType
Axis along which standard deviations are computed. The default is to
compute the standard deviation of the flattened array
Returns
--------
circmean : float
Circular mean
"""
warnings.warn(' '.join(["utils.stats.nan_circmean is deprecated and will",
"be removed in a future version. This function is",
"part of the scipy 1.4.0 milestones and will be",
"migrated there."]),
DeprecationWarning, stacklevel=2)
samples = np.asarray(samples)
samples = samples[~np.isnan(samples)]
if samples.size == 0:
return np.nan
# Ensure the samples are in radians
ang = (samples - low) * 2.0 * np.pi / (high - low)
# Calculate the means of the sine and cosine, as well as the length
# of their unit vector
ssum = np.sin(ang).sum(axis=axis)
csum = np.cos(ang).sum(axis=axis)
res = np.arctan2(ssum, csum)
# Bring the range of the result between 0 and 2 pi
mask = res < 0.0
if mask.ndim > 0:
res[mask] += 2.0 * np.pi
elif mask:
res += 2.0 * np.pi
# Calculate the circular standard deviation
circmean = res * (high - low) / (2.0 * np.pi) + low
return circmean
def nan_circstd(samples, high=2.0*np.pi, low=0.0, axis=None):
"""NaN insensitive version of scipy's circular standard deviation routine
Parameters
-----------
samples : array_like
Input array
high: float or int
Upper boundary for circular standard deviation range (default=2 pi)
low : float or int
Lower boundary for circular standard deviation range (default=0)
axis : int or NoneType
Axis along which standard deviations are computed. The default is to
compute the standard deviation of the flattened array
Returns
--------
circstd : float
Circular standard deviation
"""
warnings.warn(' '.join(["utils.stats.nan_circstd is deprecated and will",
"be removed in a future version. This function is",
"part of the scipy 1.4.0 milestones and will be",
"migrated there."]),
DeprecationWarning, stacklevel=2)
samples = np.asarray(samples)
samples = samples[~np.isnan(samples)]
if samples.size == 0:
return np.nan
# Ensure the samples are in radians
ang = (samples - low) * 2.0 * np.pi / (high - low)
# Calculate the means of the sine and cosine, as well as the length
# of their unit vector
smean = np.sin(ang).mean(axis=axis)
cmean = np.cos(ang).mean(axis=axis)
rmean = np.sqrt(smean**2 + cmean**2)
# Calculate the circular standard deviation
circstd = (high - low) * np.sqrt(-2.0 * np.log(rmean)) / (2.0 * np.pi)
return circstd
``` |
{
"source": "JKlesmith/Bioinformatics",
"score": 3
} |
#### File: JKlesmith/Bioinformatics/CodonSwap.py
```python
import argparse
import os
#Set the author information
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, <NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "BSD-3"
__version__ = "0.2, Build: 20151006"
__maintainer__ = "<NAME>"
__email__ = ["<EMAIL>", "<EMAIL>", "<EMAIL>"]
#Get commandline arguments
parser = argparse.ArgumentParser(description='CodonSwap '+__version__+' for swapping codons to synomymous mutations')
parser.add_argument('-w', dest='wildtype', action='store', nargs='?', const=1, default='./WTSeq', help='File with the wild-type DNA sequence. Default = ./WTSeq')
args = parser.parse_args()
if os.path.isfile(args.wildtype):
with open(args.wildtype, 'r') as infile: #Open the file with the wild-type DNA sequence
WTSeq = infile.readline() #Read the first line of the WT sequence file
else:
print "Missing the wild-type DNA sequence file. Flag: -w or a file named WTSeq in the ./ directory. ...exit"
quit()
AA_Table = '*ACDEFGHIKLMNPQRSTVWY'
Codon_Table = {'TTT':'F', 'TCT':'S', 'TAT':'Y', 'TGT':'C',
'TTC':'F', 'TCC':'S', 'TAC':'Y', 'TGC':'C',
'TTA':'L', 'TCA':'S', 'TAA':'*', 'TGA':'*',
'TTG':'L', 'TCG':'S', 'TAG':'*', 'TGG':'W',
'CTT':'L', 'CCT':'P', 'CAT':'H', 'CGT':'R',
'CTC':'L', 'CCC':'P', 'CAC':'H', 'CGC':'R',
'CTA':'L', 'CCA':'P', 'CAA':'Q', 'CGA':'R',
'CTG':'L', 'CCG':'P', 'CAG':'Q', 'CGG':'R',
'ATT':'I', 'ACT':'T', 'AAT':'N', 'AGT':'S',
'ATC':'I', 'ACC':'T', 'AAC':'N', 'AGC':'S',
'ATA':'I', 'ACA':'T', 'AAA':'K', 'AGA':'R',
'ATG':'M', 'ACG':'T', 'AAG':'K', 'AGG':'R',
'GTT':'V', 'GCT':'A', 'GAT':'D', 'GGT':'G',
'GTC':'V', 'GCC':'A', 'GAC':'D', 'GGC':'G',
'GTA':'V', 'GCA':'A', 'GAA':'E', 'GGA':'G',
'GTG':'V', 'GCG':'A', 'GAG':'E', 'GGG':'G'}
EColi_Table = {'TTT':'TTC', 'TCT':'AGC', 'TAT':'TAC', 'TGT':'TGC',
'TTC':'TTT', 'TCC':'AGC', 'TAC':'TAT', 'TGC':'TGT',
'TTA':'CTG', 'TCA':'AGC', 'TAA':'TAA', 'TGA':'TAA',
'TTG':'CTG', 'TCG':'AGC', 'TAG':'TGA', 'TGG':'TGG',
'CTT':'CTG', 'CCT':'CCG', 'CAT':'CAC', 'CGT':'CGC',
'CTC':'CTG', 'CCC':'CCG', 'CAC':'CAT', 'CGC':'CGT',
'CTA':'CTG', 'CCA':'CCG', 'CAA':'CAG', 'CGA':'CGT',
'CTG':'CTG', 'CCG':'CCA', 'CAG':'CAA', 'CGG':'CGC',
'ATT':'ATC', 'ACT':'ACC', 'AAT':'AAC', 'AGT':'AGC',
'ATC':'ATT', 'ACC':'ACG', 'AAC':'AAT', 'AGC':'TCT',
'ATA':'ATT', 'ACA':'ACC', 'AAA':'AAA', 'AGA':'CGT',
'ATG':'ATG', 'ACG':'ACC', 'AAG':'AAA', 'AGG':'CGC',
'GTT':'GTG', 'GCT':'GCG', 'GAT':'GAC', 'GGT':'GGC',
'GTC':'GTA', 'GCC':'GCG', 'GAC':'GAT', 'GGC':'GGT',
'GTA':'GTC', 'GCA':'GCG', 'GAA':'GAG', 'GGA':'GGC',
'GTG':'GTT', 'GCG':'GCC', 'GAG':'GAA', 'GGG':'GGT',}
def main():
#Write out preamble
print "CodonSwap - Swap codons to synonymous codons (optimized for E.coli codon usage)"
print "Author: "+__author__
print "Contact: "+__email__[0]+", "+__email__[1]+", "+__email__[2]
print __copyright__
print "Version: "+__version__
print "License: "+__license__
print "Credits: "+__credits__[0]+", "+__credits__[1]
print ""
print "Please cite:"
print "Github [user: JKlesmith] (www.github.com)"
print ""
WTASeq = ""
NewDNASeq = ""
NewAASeq = ""
for i in xrange(0,(len(WTSeq)/3)):
WTASeq = WTASeq + str(Codon_Table[WTSeq[i*3:i*3+3]])
NewDNASeq = NewDNASeq + str(EColi_Table[WTSeq[i*3:i*3+3]])
NewAASeq = NewAASeq + str(Codon_Table[EColi_Table[WTSeq[i*3:i*3+3]]])
print "Wild-type DNA sequence"
print WTSeq.rstrip('\r\n')
print "Codon swapped DNA sequence"
print NewDNASeq
print "Wild-type amino acid sequence"
print WTASeq
print "Codon swapped amino acid sequence"
print NewAASeq
if __name__ == '__main__':
main()
``` |
{
"source": "JKlesmith/PythonScripts",
"score": 2
} |
#### File: JKlesmith/PythonScripts/FACSEntropy.py
```python
from __future__ import division
from subprocess import check_output
from math import log, pow
import StringIO
import argparse
import time
import os
#Set the author information
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, <NAME>"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "BSD-3"
__version__ = "1.3, Build: 20150819"
__maintainer__ = "<NAME>"
__email__ = ["<EMAIL>", "<EMAIL>", "<EMAIL>"]
#Get commandline arguments
parser = argparse.ArgumentParser(description='FACS Shannon Entropy '+__version__)
parser.add_argument('-s', dest='startresidue', action='store', required=True, help='What is the start residue? ie: 0, 40, 80')
parser.add_argument('-l', dest='length', action='store', required=True, help='Length of your tile? ie: 40, 80')
parser.add_argument('-d', dest='stddev', action='store', help='Standard Deviation? (FACS) ie: 0.6')
parser.add_argument('-c', dest='percentcollected', action='store', help='Percent Collected? (FACS) ie: 0.05')
parser.add_argument('-t', dest='sigthreshold', action='store', nargs='?', const=1, default=5, help='Unselected counts for significance. Default = 5')
parser.add_argument('-p', dest='path', action='store', required=True, help='What is the path to the enrich output directory? ie: ./tile/data/output/')
parser.add_argument('-w', dest='wildtype', action='store', nargs='?', const=1, default='./WTSeq', help='File with the wild-type amino acid sequence. Default = ./WTSeq')
parser.add_argument('-y', dest='ewtenrichment', action='store', help='Manual Ewt enrichment value')
parser.add_argument('-z', dest='eiscalar', action='store', help='Manual Ei enrichment scalar')
args = parser.parse_args()
#Verify inputs
if args.stddev == None:
print "Missing SD. Flag: -d"
quit()
if args.percentcollected == None:
print "Missing percent collected. Flag: -c"
quit()
if args.startresidue == None:
print "Missing start residue. Flag: -s"
quit()
if args.length == None:
print "Missing tile length. Flag: -l"
quit()
if args.ewtenrichment and args.eiscalar != None:
#This section is only true if we want to provide our own WT enrichment and a scalar to add to Ei
OverrideEwtEi = True
ManualEwt = float(args.ewtenrichment)
EiScalar = float(args.eiscalar)
else:
OverrideEwtEi = False
#Global Variables
if os.path.isfile(args.wildtype):
with open(args.wildtype, 'r') as infile: #Open the file with the wild-type protein sequence
WTSeq = infile.readline() #Read the first line of the WT sequence file
else:
print "Wild-type sequence file not found...exit"
quit()
StartResidue = int(args.startresidue) #Starting residue for your tile
TileLen = int(args.length) #Length of your tile
Path = args.path #What is the path to the output directory
SignificantThreshold = int(args.sigthreshold) #Number of counts in the unselected library and selected library to be significant
AA_Table = 'ACDEFGHIKLMNPQRSTVWY'
Mutations = {} #Mutations matrix
Ewt = None #Initialize the variable for the wildtype enrichment
SD = float(args.stddev) #Standard Deviation
PC = float(args.percentcollected) #Percent collected
THEOENRICHMENT = -log(PC, 2) #Theoretical maximum enrichment
def Build_Matrix():
#Populate mutation array with None data
for j in xrange(0,TileLen):
for i in enumerate(AA_Table):
try:
#Mutations[ResID][MutID[1]][0 = RawLog2, 1 = Unused, 2 = UnselectedCounts, 3 = SelectedCounts, 4=p-value, 5=WT]
Mutations[j][i[1]] = [None, None, None, None, None, False]
except KeyError:
Mutations[j] = {}
Mutations[j][i[1]] = [None, None, None, None, None, False]
return Mutations
def Get_WT_Ewt():
global Ewt
#Extract NA-NA WT Ewt log2
awk = ""
if os.path.isfile(Path+'ratios_sel_example_F_N_include_filtered_B_PRO_qc_unsel_example_F_N_include_filtered_B_PRO_qc'):
awk = check_output(["awk", '{ print $5,$6,$8 }', Path+'ratios_sel_example_F_N_include_filtered_B_PRO_qc_unsel_example_F_N_include_filtered_B_PRO_qc'])
elif os.path.isfile(Path+'ratios_sel_example_F_N_include_filtered_R1_PRO_qc_unsel_example_F_N_include_filtered_R1_PRO_qc'):
awk = check_output(["awk", '{ print $5,$6,$8 }', Path+'ratios_sel_example_F_N_include_filtered_R1_PRO_qc_unsel_example_F_N_include_filtered_R1_PRO_qc'])
else:
print "Selected protein ratios file not found...exit"
quit()
#Loop through the output
for line in StringIO.StringIO(awk):
split = line.split(" ")
location = str(split[0])
identity = str(split[1])
if location == "NA" and identity == "NA":
Ewt = float(split[2].rstrip('\n'))
print "Wild-type log2 (Ewt): "+str(Ewt)
return Ewt
def Get_Mut_Ei():
#Extract Mut Ei log2
awk = ""
if os.path.isfile(Path+'ratios_sel_example_F_N_include_filtered_B_PRO_qc_unsel_example_F_N_include_filtered_B_PRO_qc.m1'):
awk = check_output(["awk", 'FNR>1{ print $5,$6,$8 }', Path+'ratios_sel_example_F_N_include_filtered_B_PRO_qc_unsel_example_F_N_include_filtered_B_PRO_qc.m1'])
elif os.path.isfile(Path+'ratios_sel_example_F_N_include_filtered_R1_PRO_qc_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'):
awk = check_output(["awk", 'FNR>1{ print $5,$6,$8 }', Path+'ratios_sel_example_F_N_include_filtered_R1_PRO_qc_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'])
else:
print "Selected protein ratios .m1 file not found...exit"
quit()
#Loop through the output
for line in StringIO.StringIO(awk):
split = line.split(" ")
location = int(split[0])
identity = str(split[1])
#Skip stop codons
if identity == "*":
continue
#Check to see if we're above the tile length and go to next
if location >= TileLen:
continue
Ei = float(split[2].rstrip('\n'))
#Check to see if the enrichment is greater or equal than the theoretical
if OverrideEwtEi == False: #Apply no scalar to the Ei
if Ei >= THEOENRICHMENT:
Mutations[location][identity][0] = (THEOENRICHMENT - 0.001)
else:
Mutations[location][identity][0] = Ei
elif OverrideEwtEi == True: #Apply a scalar to the Ei
if Ei >= (THEOENRICHMENT + EiScalar):
Mutations[location][identity][0] = ((THEOENRICHMENT + EiScalar) - 0.001)
else:
Mutations[location][identity][0] = (Ei + EiScalar)
return Mutations
def Get_Unsel_Counts():
#Get the unselected counts for a variant
awk = ""
if os.path.isfile(Path+'counts_unsel_example_F_N_include_filtered_B_PRO_qc.m1'):
awk = check_output(["awk", 'FNR>1{ print $5,$6,$9 }', Path+'counts_unsel_example_F_N_include_filtered_B_PRO_qc.m1'])
elif os.path.isfile(Path+'counts_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'):
awk = check_output(["awk", 'FNR>1{ print $5,$6,$9 }', Path+'counts_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'])
else:
print "Unselected protein counts .m1 file not found...exit"
quit()
#Loop through the output
for line in StringIO.StringIO(awk):
split = line.split(" ")
location = int(split[0])
identity = str(split[1])
#Skip stop codons
if identity == "*":
continue
#Check to see if we're above the tile length and go to next
if location >= TileLen:
continue
counts = int(split[2].rstrip('\n'))
Mutations[location][identity][2] = counts #Set the unselected counts
return Mutations
def AssignWT():
#Assign the WT residues
for j in xrange(0,TileLen):
for i in enumerate(AA_Table):
if i[1] == WTSeq[StartResidue+j]:
Mutations[j][i[1]][5] = True
return Mutations
def NumMutinCol(ID):
#Returns the number of sig mutants at a residue location
#Initialize our variable
NUMSIGMUTS = 1 #Start at one to account for WT
#Loop through the mut types
for i in enumerate(AA_Table):
if Mutations[ID][i[1]][2] >= SignificantThreshold:
NUMSIGMUTS = NUMSIGMUTS + 1
return NUMSIGMUTS
def Shannon():
#Check to see if the wild-type enrichment is set
if Ewt == None:
print "Error: Wild-Type enrichment is not set...quit"
quit()
print ""
print "Shannon Entropy"
print "Residue Number,Shannon Entropy,Number of Mutations Counted+WT"
#Check to see if the wild-type enrichment is set
if Ewt == None:
print "Error: Wild-Type enrichment is not set...quit"
quit()
#Check for a case where a significant variant fell out of the population
for j in xrange(0,TileLen):
for i in enumerate(AA_Table):
if Mutations[j][i[1]][0] == None and Mutations[j][i[1]][2] >= SignificantThreshold and Mutations[j][i[1]][3] == None:
Mutations[j][i[1]][0] = log((1/Mutations[j][i[1]][2]), 2) #Calculate the raw log2 for this variant and report it as less than this value
#Calculate p-values
for j in xrange(0,TileLen):
#First calculate the column sum
pcol = 0
for i in enumerate(AA_Table):
if Mutations[j][i[1]][5] == False: #Check to see if it's Wild-Type
if Mutations[j][i[1]][2] >= SignificantThreshold: #Check to see if the count is above the counting threshold
pcol = pcol + pow(2, float(Mutations[j][i[1]][0]))
else:
pcol = pcol + pow(2, float(Ewt))
#Then calculate the individual p-value and store the shannon entropy
for i in enumerate(AA_Table):
if Mutations[j][i[1]][5] == False: #Check to see if it's Wild-Type
if Mutations[j][i[1]][2] >= SignificantThreshold: #Check to see if the count is above the counting threshold
Mutations[j][i[1]][4] = (pow(2, float(Mutations[j][i[1]][0]))/pcol)
else:
Mutations[j][i[1]][4] = (pow(2, float(Ewt))/pcol)
#Calculate the residue shannon entropy
for j in xrange(0,TileLen):
SE = 0
for i in enumerate(AA_Table):
if Mutations[j][i[1]][2] >= SignificantThreshold: #Check to see if the count is above the counting threshold
SE = SE + -1*Mutations[j][i[1]][4]*log(Mutations[j][i[1]][4])
#Normalize our Shannon Entropy
try:
SE = (SE*log(20))/log(NumMutinCol(j))
except ZeroDivisionError:
print "Your tile length is possibly too long or there is no mutations besides WT at position "+str(j)
#Output the entropy values
print str(StartResidue+j)+","+str(SE)+","+str(NumMutinCol(j))
return
def main():
global Ewt
#Write out preamble
print "FACS Shannon Entropy"
print "Author: "+__author__
print "Contact: "+__email__[0]+", "+__email__[1]+", "+__email__[2]
print __copyright__
print "Version: "+__version__
print "License: "+__license__
print "Credits: "+__credits__[0]+", "+__credits__[1]+", "+__credits__[2]
print ""
print "Cite:"
print "Github [user: JKlesmith] (www.github.com)"
print ""
print "Run parameters:"
print time.strftime("%H:%M:%S")
print time.strftime("%m/%d/%Y")
print "SD (-d): "+args.stddev
print "Percent Collected (-c): "+args.percentcollected
print "Theoretical max enrichment based off of percent collected: "+str(THEOENRICHMENT)
print "Start residue (-s): "+str(StartResidue)
print "Tile length (-l): "+str(TileLen)
print "Significant count threshold (-t): "+str(SignificantThreshold)
print "Wild-type sequence file (-w): "+args.wildtype
print "Enrich output directory (-p): "+Path
#Make the internal matrix
Build_Matrix()
#Assign the WT residues
AssignWT()
#Get the counts
Get_Unsel_Counts()
#Get the log2 data
if OverrideEwtEi == True:
#Set the manual Ewt enrichment
Ewt = ManualEwt
print "Manually set Ewt (-y): "+str(Ewt)
print "Ei scalar transform (-z): "+str(EiScalar)
else:
Get_WT_Ewt()
Get_Mut_Ei()
#Print out a csv
Shannon()
if __name__ == '__main__':
main()
```
#### File: JKlesmith/PythonScripts/QuickStats.py
```python
from __future__ import division
from subprocess import check_output
from math import log
import StringIO
import argparse
import time
import os
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, <NAME>"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "BSD-3"
__version__ = "1.4X, Build: 201507X"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
#Build Notes:
#1.3 - 20150616 - Fixed counting bug at the end of the tile in CodonSubs such that it's just less than and not equal to
#Get commandline arguments
parser = argparse.ArgumentParser(description='Quick Enrich Stats - Note: you must pre-normalize the data using QuickNormalize.py.')
parser.add_argument('-f', dest='file', action='store', help='File of your already normalized dataset')
parser.add_argument('-p', dest='path', action='store', help='What is the path to the enrich tile directory? ie: ./tile/')
parser.add_argument('-l', dest='tilelength', action='store', help='Tile length override')
parser.add_argument('-s', dest='tilestart', action='store', help='Tile start override')
args = parser.parse_args()
#Verify inputs
if args.file == None:
print "No normalized file given"
quit()
if args.path == None:
print "No enrich path given"
quit()
#Global vars
AA_Table = '*ACDEFGHIKLMNPQRSTVWY'
Mutations = {}
NumResi = 0 #Tile length
NormData = ""
StartResidue = 0
def Build_Matrix():
#Populate Mutation Dictionary with None Data
for j in xrange(0+StartResidue,NumResi+StartResidue):
for i in enumerate(AA_Table):
try:
#Mutations[ResID][MutID[1]][0 = NormLog2, 1 = Unselected, 2 = Selected]
Mutations[j][i[1]] = [None, None, None]
except KeyError:
Mutations[j] = {}
Mutations[j][i[1]] = [None, None, None]
return Mutations
def ImportNormData():
global NumResi
global NormData
global StartResidue
lines = 0
normdata = ""
#Import the previously normalized data
with open(args.file) as infile:
copy = False
for line in infile:
if line.strip() == "Location,Mutation,Normalized_ER,Unselected_Reads,Selected_Reads,RawLog2":
copy = True
elif line.strip() == "Normalized Heatmap":
copy = False
elif line.startswith("Tile Length: "):
if args.tilelength != None:
NumResi = int(args.tilelength)
else:
NumResi = int(line.strip()[13:])
print "Tile length: "+str(NumResi)
elif line.startswith("Start residue (-s): "):
split = line.split(" ")
if args.tilestart != None:
StartResidue = int(args.tilestart)
else:
StartResidue = int(split[3]) #Set the start residue
elif copy:
NormData = NormData + line
lines = lines + 1
#NumResi = int(lines / 21) #Set the tile length
return normdata
def PopulateMutArrays():
#Loop through the output
for line in StringIO.StringIO(NormData):
split = line.split(",")
location = int(split[0])
identity = str(split[1])
#Ignore if our location is above our number of residues
if location > (NumResi + StartResidue - 1):
print "Above Tile Length Reject: "+str(location)+"-"+str(identity)
continue
#Ignore if our location is below our number of residues
if location < StartResidue:
print "Below Tile Start Reject "+str(location)+"-"+str(identity)
continue
Mutations[location][identity][0] = split[2]
Mutations[location][identity][1] = split[3]
Mutations[location][identity][2] = split[4].rstrip('\n')
return Mutations
def DNAReads():
reads = {} #Initialize the variable for the number of reads 0=unsel, 1=sel
SC = 0
UC = 0
selectedcounts = ""
unselectedcounts = ""
if os.path.isfile(args.path+'data/output/counts_sel_example_F_N_include_filtered_B_DNA_qc'):
selectedcounts = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_sel_example_F_N_include_filtered_B_DNA_qc'])
elif os.path.isfile(args.path+'data/output/counts_sel_example_F_N_include_filtered_R1_DNA_qc'):
selectedcounts = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_sel_example_F_N_include_filtered_R1_DNA_qc'])
else:
print "Can't find selected DNA counts"
quit()
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc'):
unselectedcounts = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc'):
unselectedcounts = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc'])
else:
print "Can't find unselected DNA counts"
quit()
#Loop through the output
for line in StringIO.StringIO(selectedcounts):
split = line.split(" ")
SC = SC + int(split[0].rstrip('\n'))
for line in StringIO.StringIO(unselectedcounts):
split = line.split(" ")
UC = UC + int(split[0].rstrip('\n'))
reads[0] = str(UC) #Set the unselected reads
reads[1] = str(SC) #Set the selected reads
return reads
def MutationCounts():
muts = {}
NM00 = 0
NM10 = 0
NM15 = 0
NM30 = 0
NM50 = 0
NM100 = 0
FiveThreshold = 0
Retained = 0
for j in xrange(0+StartResidue,NumResi+StartResidue):
for i in enumerate(AA_Table):
if Mutations[j][i[1]][0] != "NS":
if float(Mutations[j][i[1]][0]) > 0.00:
NM00 += 1
if float(Mutations[j][i[1]][0]) > 0.10:
NM10 += 1
if float(Mutations[j][i[1]][0]) > 0.15:
NM15 += 1
if float(Mutations[j][i[1]][0]) > 0.30:
NM30 += 1
if float(Mutations[j][i[1]][0]) > 0.50:
NM50 += 1
if float(Mutations[j][i[1]][0]) > 1.00:
NM100 += 1
if Mutations[j][i[1]][1] != "None":
if int(Mutations[j][i[1]][1]) >= 5:
FiveThreshold += 1
if Mutations[j][i[1]][2] != "None":
Retained += 1
muts[0] = NM00
muts[1] = NM10
muts[2] = NM15
muts[3] = NM30
muts[4] = NM50
muts[5] = NM100
muts[6] = FiveThreshold
muts[7] = Retained
return muts
def Nonsynonymous():
reads = {}
Total = 0
Single = 0
WT = 0
ALL = ""
M1 = ""
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_PRO_qc'):
ALL = check_output(["awk", 'FNR>1{ print $1,$9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_PRO_qc'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_PRO_qc'):
ALL = check_output(["awk", 'FNR>1{ print $1,$9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_PRO_qc'])
else:
print "Unsel protein counts not found"
quit()
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_PRO_qc.m1'):
M1 = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_PRO_qc.m1'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'):
M1 = check_output(["awk", 'FNR>1{ print $9 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_PRO_qc.m1'])
else:
print "Unsel protein counts.m1 not found"
quit()
#Loop through the output
for line in StringIO.StringIO(ALL):
split = line.split(" ")
if split[0] == "NA-NA":
WT = int(split[1])
Total = Total + int(split[1].rstrip('\n'))
for line in StringIO.StringIO(M1):
split = line.split(" ")
Single = Single + int(split[0].rstrip('\n'))
reads[0] = WT #Wild-type
reads[1] = Single #.m1
reads[2] = (Total - Single - WT) #all - .m1 - WT
return reads
def CodonSubs():
codons = {}
One = 0
Two = 0
Three = 0
#Get the start of translation
TranslateStart = 0
TranslateEnd = 0
with open(args.path+'input/example_local_config') as infile:
for line in infile:
if line.startswith("<translate_start>"):
TSLen = len(line)
TranslateStart = int(line[17:TSLen-20])
TranslateEnd = TranslateStart+(3*NumResi)
ALL = ""
M1 = ""
M2 = ""
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc'):
ALL = check_output(["awk", 'FNR>1{ print $4,$5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc'):
ALL = check_output(["awk", 'FNR>1{ print $4,$5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc'])
else:
print "Counts unsel DNA not found."
quit()
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc.m1'):
M1 = check_output(["awk", 'FNR>1{ print $5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc.m1'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc.m1'):
M1 = check_output(["awk", 'FNR>1{ print $5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc.m1'])
else:
print "Counts unsel DNA.m1 not found."
quit()
if os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc.m2'):
M2 = check_output(["awk", 'FNR>1{ print $5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_B_DNA_qc.m2'])
elif os.path.isfile(args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc.m2'):
M2 = check_output(["awk", 'FNR>1{ print $5 }', args.path+'data/output/counts_unsel_example_F_N_include_filtered_R1_DNA_qc.m2'])
else:
print "Counts unsel DNA.m2 not found."
quit()
#Check for single base mutations
for line in StringIO.StringIO(M1):
split = line.split(" ")
if int(split[0]) >= TranslateStart and int(split[0]) < TranslateEnd: #Check to see that the base is in our tile
One = One + 1
#Check for double base mutations
for line in StringIO.StringIO(M2):
split2 = line.split(" ")
location = split2[0].split(",") #Get the individual mutation locations
if int(location[0]) >= TranslateStart and int(location[0]) < TranslateEnd: #Check to see that the base is in our tile
if int(location[1]) >= TranslateStart and int(location[1]) < TranslateEnd: #Check to see that the base is in our tile
codon1 = int((int(location[0]) - int(TranslateStart))/3)
codon2 = int((int(location[1]) - int(TranslateStart))/3)
if codon1 == codon2:
Two = Two + 1
#Check for triple base mutations
for line in StringIO.StringIO(ALL):
split3 = line.split(" ")
if split3[0] == "3": #Test to see that there are three mutations
location = split3[1].split(",") #Get the individual mutation locations
if int(location[0]) >= TranslateStart and int(location[0]) < TranslateEnd: #Check to see that the base is in our tile
if int(location[1]) >= TranslateStart and int(location[1]) < TranslateEnd: #Check to see that the base is in our tile
if int(location[2]) >= TranslateStart and int(location[2]) < TranslateEnd: #Check to see that the base is in our tile
codon1 = int((int(location[0]) - int(TranslateStart))/3)
codon2 = int((int(location[1]) - int(TranslateStart))/3)
codon3 = int((int(location[2]) - int(TranslateStart))/3)
if codon1 == codon2 and codon2 == codon3:
Three = Three + 1
codons[0] = One #1-base sub
codons[1] = Two #2-base sub
codons[2] = Three #3-base sub
return codons
def RunStats():
print "Stat run parameters:"
print time.strftime("%H:%M:%S")
print time.strftime("%m/%d/%Y")
print "Nomalized file: "+args.file
print "Data path: "+args.path
print "Tile length: "+str(NumResi)
print "Tile start: "+str(StartResidue)
if args.tilelength != None:
print "Custom tile length passed on the command line"
if args.tilestart != None:
print "Custom tile start passed on the command line"
reads = DNAReads()
print "Unselected DNA sequences (reads) from Enrich: "+reads[0]
print "Selected DNA sequences (reads) from Enrich: "+reads[1]
mutations = MutationCounts()
print "Number of mutations above 0.00: "+str(mutations[0])
print "Number of mutations above 0.10: "+str(mutations[1])
print "Number of mutations above 0.15: "+str(mutations[2])
print "Number of mutations above 0.30: "+str(mutations[3])
print "Number of mutations above 0.50: "+str(mutations[4])
print "Number of mutations above 1.00: "+str(mutations[5])
print "Number unselected mutants above threshold of 5: "+str(mutations[6])
print "Number of mutations retained in the selected population (not given a 1 if significant in unsel): "+str(mutations[7])
codons = CodonSubs()
print "Percent of possible codon subsititions observed in the unselected population:"
print "1-base substitution (#codons*9): {0:.1f}".format((codons[0]/(9*NumResi)*100))+"% "+str(codons[0])+"/"+str(9*NumResi)
print "2-base substitutions (#codons*27): {0:.1f}".format((codons[1]/(27*NumResi)*100))+"% "+str(codons[1])+"/"+str(27*NumResi)
print "3-base substitutions (#codons*27): {0:.1f}".format((codons[2]/(27*NumResi)*100))+"% "+str(codons[2])+"/"+str(27*NumResi)
print "Total base substitutions: "+str(codons[0]+codons[1]+codons[2])+"/"+str(63*NumResi)
nonsynonymous = Nonsynonymous()
print "Percent of unselected reads with: "
print "No nonsynonymous mutations: {0:.1f}".format((nonsynonymous[0]/int(reads[0]))*100)+"% "+str(nonsynonymous[0])+"/"+reads[0]
print "One nonsynonymous mutation: {0:.1f}".format((nonsynonymous[1]/int(reads[0]))*100)+"% "+str(nonsynonymous[1])+"/"+reads[0]
print "Multiple nonsynonymous mutations: {0:.1f}".format((nonsynonymous[2]/int(reads[0]))*100)+"% "+str(nonsynonymous[2])+"/"+reads[0]
print "Coverage of possible single nonsynonymous amino acid mutations: {0:.1f}".format((mutations[6]/(NumResi*20))*100)+"% "+str(mutations[6])+"/"+str(NumResi*20)
return
def main():
#Write out preample
print "QuickStats"
print "Author: "+__author__
print "Contact: "+__email__
print __copyright__
print "License: "+__license__
print "Credits: "+__credits__[0]+", "+__credits__[1]
print ""
print "Please cite:"
print "Github [user: JKlesmith] (www.github.com)"
print "<NAME>, <NAME>, <NAME>, Whitehead TA. 2015. Comprehensive sequence-flux mapping of metabolic pathways in living cells."
print "Kowalsky CA, <NAME>, <NAME>, <NAME>, <NAME>, Whitehead TA. 2015. High-Resolution Sequence-Function Mapping of Full-Length Proteins. PLoS ONE 10(3):e0118193. doi:10.1371/journal.pone.0118193."
print ""
#Print out run stats
ImportNormData()
Build_Matrix()
PopulateMutArrays()
RunStats()
if __name__ == '__main__':
main()
``` |
{
"source": "jkleve/Optimization-Algoirthms",
"score": 3
} |
#### File: Optimization-Algoirthms/functions/ackley_function.py
```python
from numpy import sqrt, cos, exp, pi
# Ackley Function
def objective_function(params):
x1 = params[0]
x2 = params[1]
a = 20
b = 0.2
c = 2*pi
d = len(params) # number of dimensions
return ( -1.0*a*exp(-1.0*b*sqrt((1.0/d)*(x1**2 + x2**2))) - exp((1.0/d)*(cos(c*x1) + cos(c*x2))) + a + exp(1) )
```
#### File: Optimization-Algoirthms/genetic_algorithm/genetic_algorithm.py
```python
import argparse # parsing command line arguments
import importlib # dynamically importing modules
import random # randint
import time # delay & timing
from math import log # used in mutation
import sys # to exit and append to path
sys.path.append('../utils')
sys.path.append('../functions')
import oa_utils # optimization algorithm utils
from timer import Timer
from plot_utils import PlotUtils # plotting each iteration if plot is True
class Organism:
"""One organsim to be used with genetic algorithm. Keeps
track of the following attributes:
Attributes:
id: A number that specifies an id
pos: An array of floats defining the organisms position is space.
func: A function to call to calculate this organisms fitness
"""
def __init__(self, id, pos, func):
self.id = id
self.pos = pos
self.func = func
self.fitness = self.get_fval()
def __str__(self):
x_str = "["
for x in self.pos:
x_str += "%6.3f " % x
x_str += "]"
return "(id: %d, fitness: %7.4f, X: %s)" % \
(self.id, self.fitness, x_str)
# TODO to make this a class function with a pos parameter??
def get_fval(self):
return self.func(self.pos)
class GA(Timer, object):
"""A genetic algorithm class that contains methods for handling
the population over generations/iterations
Attributes:
There are not attributes for this class. All settings/attributes
are read in from ga_settings.py which should be located in the same
directory as this file
NOTE: The GA methods assume the population array is sorted
"""
def __init__(self, settings, function): # TODO add settings parameter
super(self.__class__, self).__init__()
# read in settings
num_dims = settings['number_of_dimensions']
population_size = settings['population_size']
bounds = settings['bounds']
# check to make sure num_dims and number of bounds provided match
if len(bounds) != num_dims:
raise ValueError("Number of dimensions doesn't match number of bounds provided")
# set instance variables
self.settings = settings
self.function = function
# initialize population
self.population = GA.__gen_population(bounds, population_size, function)
self.total_organisms = len(self.population)
self.best_x = self.population[0]
self.num_generations = 1
# stopping criteria variables
self.func_val_improvement = 0
self.num_iter_since_improvement = 0
if settings['plot']:
try:
self.plotutils = PlotUtils(num_dims, bounds, function)
self.__plot_state()
except ValueError:
print("Can not plot more than 2 dimensions")
settings['plot'] = False
if settings['print_iterations']:
self.__display_state()
if settings['step_through']:
oa_utils.pause()
# def __del__(self):
# del(self.plotutils)
#
@staticmethod
def __gen_organism(id, bounds, function):
# use gen_random_numbers to get a list of positions within the bounds
return Organism(id, oa_utils.gen_random_numbers(bounds), function)
@staticmethod
def __gen_population(bounds, size, function):
b = bounds
f = function
# generate a list of organisms
p = [GA.__gen_organism(i+1, b, f) for i in range(0, size)]
return GA.__sort_population(p)
@staticmethod
def __sort_population(p):
return sorted(p, key=lambda o: o.fitness)
###########################
### GA steps and loop ###
###########################
'''
Three possible ways of doing this.
1. have a setting that says we kill of last 20% of array or population
2. the further you are down the array the higher your probability of dieing
3. kill off the worst based on their distance from the best
TODO write a test for this. simple 10 population w/ .5 cutoff test will do
'''
@staticmethod
def __selection(population, cutoff, print_action=False):
size = len(population)
max_f = population[0].fitness
min_f = population[size-1].fitness
# denominator in probability of surviving
den = (max_f - min_f)
# if den == 0:
# print("Every organism has same objective function value.")
for (i, organism) in enumerate(population):
f = organism.fitness
# check for division by zero
if den == 0:
normalized_f = 0
else: # get normalized value
normalized_f = float(f - min_f) / den
if normalized_f > cutoff:
# delete the organism from the population
del population[i]
if print_action:
print("Selection: Deleting organism %s" % str(organism))
return population
@staticmethod
def __get_parent_index(cdf_value, arr):
norm_sum = 0
for i, o in enumerate(arr):
norm_sum += o['probability']
if norm_sum >= cdf_value:
return i
return -1
@staticmethod
def __mate_parents(id, parent1, parent2, function):
n = len(parent1.pos)
# randomly choose split position
split = random.randint(0, n-1)
# split parent positions
pos1 = parent1.pos[0:split] + parent2.pos[split:]
pos2 = parent2.pos[0:split] + parent1.pos[split:]
# get id numbers
id1 = id + 1
id2 = id + 2
# return the two newly created organisms
return (Organism(id1, pos1, function), Organism(id2, pos2, function))
"""
population: population
size: size that the population should be after crossover
NOTE: population must be sorted. crossover will return an unsorted
array of the new population.
"""
@staticmethod
def __crossover(id, population, size, function, print_action=False):
new_population = []
length = len(population)
max_f = population[length-1].fitness
min_f = population[0].fitness
den = max_f - min_f
# if size is odd
if size % 2 == 1:
raise ValueError("Populations with an odd size hasn't been implemented. Talk to Jesse")
# get inversed normalized values of fitness
# normalized value of 1 is the best. 0 is the worst
probabilities = []
normalized_sum = 0.0
for o in population:
if den == 0:
normalized_f = 1
else:
normalized_f = (max_f - o.fitness)/den
normalized_sum += normalized_f
probabilities.append({'normalized_f': normalized_f})
# calculate weight of each normalized value
for i, p in enumerate(probabilities):
probabilities[i]['probability'] = probabilities[i]['normalized_f']/normalized_sum
# generate new population
while len(new_population) < size:
# get cdf input values
cdf1 = random.random()
cdf2 = random.random()
# get index of parent from output of cdf
i = GA.__get_parent_index(cdf1, probabilities)
j = GA.__get_parent_index(cdf2, probabilities)
# mate parents
child1, child2 = GA.__mate_parents(id, population[i], population[j], function)
id += 2
# append children to new_population
new_population.extend((child1, child2))
if print_action:
for organism in new_population:
print("Crossover: New oganism %s" % str(organism))
return new_population
@staticmethod
def __mutation(population, bounds, rate, max_mutation_amount, print_action=False):
for organism in population:
if random.random() < rate:
new_pos = []
# for each dimension
for i in range(0, len(bounds)):
# take some percentage of the max mutation amount
x = random.uniform(0.01, 1.00)
delta_pos = (-1.0*log(1-x))*max_mutation_amount
# should we go positive or negative
if random.randint(0,1) == 1: delta_pos = -1.0*delta_pos
new_dim_pos = organism.pos[i] + delta_pos
# cap where we can go if we are beyond the bounds of the design space
if new_dim_pos < bounds[i][0]:
new_dim_pos = bounds[i][0]
elif new_dim_pos > bounds[i][1]:
new_dim_pos = bounds[i][1]
new_pos.append(new_dim_pos)
if print_action:
new_pos_str = "["
for x in new_pos:
new_pos_str += "%6.3f " % x
new_pos_str += "]"
print("Mutation: Moving organism %s to %s" % \
(str(organism), new_pos_str))
organism.pos = new_pos
organism.fitness = organism.get_fval()
return population
def __display_state(self):
print("The best organism in generation %d is %s" \
% (self.num_generations, str(self.get_best_x())))
def __plot_state(self):
pts = [(organism.pos[0], organism.pos[1]) for organism in self.population]
self.plotutils.plot(pts)
def __str__(self):
return "Iteration %d Best Fitness: %8.4f by organism %s" % \
(self.num_generations, self.get_best_f(), str(self.get_best_x()))
####################################
# These are the only methods that #
# should be called outside of this #
# class #
####################################
def get_best_x(self):
return self.best_x
def get_best_f(self):
return self.best_x.fitness
def do_loop(self):
population = self.population
population = GA.__selection(population, \
self.settings['selection_cutoff'], \
self.settings['print_actions'])
population = GA.__crossover(self.total_organisms, \
population, \
self.settings['population_size'], \
self.function, \
self.settings['print_actions'])
self.total_organisms += len(population)
population = GA.__mutation(population, \
self.settings['bounds'], \
self.settings['mutation_rate'], \
self.settings['max_mutation_amount'], \
self.settings['print_actions'])
self.population = GA.__sort_population(population)
self.num_generations += 1
if self.population[0].fitness < self.best_x.fitness:
# add on the improvement in function value
self.func_val_improvement += (self.best_x.fitness - self.population[0].fitness)
self.best_x = self.population[0]
if self.settings['plot']:
self.__plot_state()
if self.settings['print_iterations']:
self.__display_state()
if self.settings['step_through']:
oa_utils.pause()
def run(self):
# iterate over generations
while self.settings['num_iterations'] > self.num_generations:
self.do_loop()
# check if we've improved our function value
if self.func_val_improvement > self.settings['stopping_criteria']:
self.func_val_improvement = 0
self.num_iter_since_improvement = 0
else:
self.num_iter_since_improvement += 1
# check if we haven't improved at all in num of stopping criteria steps
if self.num_iter_since_improvement > self.settings['num_iter_stop_criteria']:
if self.settings['print_actions'] or self.settings['print_iterations']:
print("Stopping criteria met after %d number of iterations" % self.num_generations)
break
# pause for a bit if setting is set
time.sleep(self.settings['time_delay'])
if self.num_generations > self.settings['num_iterations']:
if self.settings['print_actions'] or self.settings['print_iterations']:
print("Maximum number of iterations hit (%d)" % self.num_generations)
@staticmethod
def get_name():
return "Genetic Algorithm"
########################################################################################
# MAIN #
########################################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Accept an optional settings file')
parser.add_argument('--settings', '-s', nargs=1, type=str, \
metavar='<file>', help='specify settings file to use')
parser.add_argument('--function', '-f', nargs=1, type=str, \
metavar='<file>', help='specify objective function file to use')
parser.add_argument('-v', action='store_true', help='print info when method is doing an action')
parser.add_argument('--time', '-t', action='store_true', help='turn timing on for the algorithm')
parser.add_argument('--plot', '-p', action='store_true', help='plot each iteration')
args = parser.parse_args()
function_module = None
settings_module = None
# get objective function
if args.function:
function_module = importlib.import_module(args.function[0])
else:
function_module = importlib.import_module('ackley_function')
function = function_module.objective_function
# get settings
if args.settings:
settings_module = importlib.import_module(args.settings[0])
else:
settings_module = importlib.import_module('ga_settings')
settings = settings_module.settings
# if -v is set change the setting
if args.v:
settings['print_actions'] = True
settings['print_iterations'] = True
# check for a couple more command line arguments
if args.time: settings['time'] = True
if args.plot: settings['plot'] = True
# --- END OF ARG PARSING --- #
# print a empty line
print("")
# time initialization
if settings['time']:
start_time = time.time()
# create algorithm instance
ga = GA(settings, function)
if settings['time']:
print(" --- Initialized in %s seconds --- " % (time.time() - start_time))
if settings['time_delay'] > 0.0 or settings['plot'] \
or settings['print_actions'] or settings['print_iterations'] or settings['step_through']:
print("\n --- WARNING: You are timing with either time_delay, plot, print_actions,")
print(" print_iterations, or step_through enabled. --- \n")
oa_utils.pause()
ga.start_timer()
#start_time = time.time()
ga.run()
if settings['time']:
ga.stop_timer()
print(" --- Ran for %s seconds --- " % (ga.get_time()))
#print(" --- Ran for %s seconds --- " % (time.time() - start_time))
# print out some data
print("")
print(str(ga))
sys.exit()
```
#### File: Optimization-Algoirthms/particle_swarm_optimization/particle_swarm_optimization.py
```python
import argparse # parsing command line arguments
import importlib # dynamically importing modules
import random # randint
import time # delay & timing
from math import sqrt
from operator import add, attrgetter
import copy
import sys # to exit and append to path
sys.path.append('../utils')
sys.path.append('../functions')
import oa_utils # optimization algorithm utils
from timer import Timer
from plot_utils import PlotUtils # plotting each iteration if plot is True
"""http://www.swarmintelligence.org/tutorials.php"""
class Particle:
"""One particle to be used with particle swarm optimization. Keeps
track of the following attributes:
Attributes:
id: A number that specifies an id
pos: An array of floats defining the organisms position is space.
func: A function to call to calculate this organisms fitness
"""
def __init__(self, id, pos, func):
self.id = id
self.pos = pos
self.func = func
self.velocity = [0 for b in pos]
self.fval = self.get_fval()
self.pbest = pos
def __str__(self):
x_str = "["
for x in self.pos:
x_str += "%6.3f " % x
x_str += "]"
return "(id: %d, fval: %7.4f, X: %s)" % \
(self.id, self.fval, x_str)
def __repr__(self):
return "<Particle(%d)>" % self.id
def __cmp__(self, other):
return cmp(self.fval, other.get_fval())
# TODO to make this a class function with a pos parameter??
def get_fval(self):
return self.func(self.pos)
def get_velocity(self):
return self.velocity
class PSO(Timer, object):
"""A particle swarm class that contains methods for handling
the population over iterations
Attributes:
There are not attributes for this class. All settings/attributes
are read in from pso_settings.py which should be located in the same
directory as this file
"""
def __init__(self, settings, function): # TODO add settings parameter
super(self.__class__, self).__init__()
# read in settings
num_dims = settings['number_of_dimensions']
population_size = settings['population_size']
bounds = settings['bounds']
if settings['velocity_type'] == 'constriction':
phi = max(settings['cp'] + settings['cg'], 4.0)
self.k = 2.0/abs(2.0 - phi - sqrt(phi*phi - 4.0*phi))
else:
self.k = 1
# check to make sure num_dims and number of bounds provided match
if len(bounds) != num_dims:
raise ValueError("Number of dimensions doesn't match number of bounds provided")
# set instance variables
self.settings = settings
self.function = function
# initialize population
self.population = PSO.__gen_population(bounds, population_size, function)
self.total_population = population_size
self.best_x = PSO.__get_best_particle(self.population)
self.num_iterations = 1
if settings['plot']:
try:
self.plotutils = PlotUtils(num_dims, bounds, function)
self.__plot_state()
except ValueError:
print("Can not plot more than 2 dimensions")
settings['plot'] = False
if settings['print_iterations']:
self.__display_state()
if settings['step_through']:
oa_utils.pause()
@staticmethod
def __gen_particle(id, bounds, function):
# use gen_random_numbers to get a list of positions within the bounds
return Particle(id, oa_utils.gen_random_numbers(bounds), function)
@staticmethod
def __gen_population(bounds, size, function):
b = bounds
f = function
# generate a list of organisms
p = [PSO.__gen_particle(i+1, b, f) for i in range(0, size)]
return p
###########################
### PSO steps and loop ###
###########################
@staticmethod
def __update_velocity(population, velocity_type, print_actions, gbest, cp, cg, k, w):
for p in population:
if (velocity_type == 'normal'):
p.velocity = PSO.__get_velocity(1, cp, cg, gbest, p, 1)
elif (velocity_type == 'inertia'):
p.velocity = PSO.__get_velocity(k, cp, cg, gbest, p, w)
elif (velocity_type == 'constriction'):
p.velocity = PSO.__get_velocity(k, cp, cg, gbest, p, 1)
return population
@staticmethod
def __get_velocity(k, c1, c2, gbest, p, w):
velocity_array = []
for i, v in enumerate(p.velocity):
velocity_array.append(k*(w*v + c1*random.random()*(p.pbest[i] - p.pos[i]) + c2*random.random()*(gbest[i] - p.pos[i])))
return velocity_array
@staticmethod
def __update_position(population): # TODO put bounds on what position can be updated to
for p in population:
p.pos = list(map(add, p.pos, p.velocity))
p.fval = p.get_fval()
return population
@staticmethod
def __get_best_particle(population):
return copy.deepcopy( min(population, key=attrgetter('fval')) )
def __display_state(self):
print("The best organism in generation %d is %s" \
% (self.num_generations, str(self.get_best_x())))
def __plot_state(self):
pts = [(organism.pos[0], organism.pos[1]) for organism in self.population]
self.plotutils.plot(pts)
def __str__(self):
return "Best Fitness: %8.4f by particle %s" % \
(self.get_best_f(), str(self.get_best_x()))
####################################
# These are the only methods that #
# should be called outside of this #
# class #
####################################
def get_best_x(self):
return self.best_x
def get_best_f(self):
return self.best_x.fval
def do_loop(self):
population = self.population
population = PSO.__update_velocity(population, \
self.settings['velocity_type'], \
self.settings['print_actions'], \
self.get_best_x().pos, \
self.settings['cp'], \
self.settings['cg'], \
self.k, \
self.settings['weight'])
if self.settings['cg_plus']:
self.settings['cg'] += 0.1
phi = max(self.settings['cp'] + self.settings['cg'], 4.0)
self.k = 2.0/abs(2.0 - phi - sqrt(phi*phi - 4.0*phi))
population = PSO.__update_position(population)
self.num_iterations += 1
self.population = population
current_best = PSO.__get_best_particle(self.population)
if current_best.get_fval() < self.best_x.get_fval():
self.best_x = current_best
if self.settings['plot']:
self.__plot_state()
if self.settings['print_iterations']:
self.__display_state()
if self.settings['step_through']:
oa_utils.pause()
def run(self):
# iterate over generations
while self.settings['num_iterations'] > self.num_iterations:
self.do_loop()
time.sleep(self.settings['time_delay'])
@staticmethod
def get_name():
return "Particle Swarm"
########################################################################################
# MAIN #
########################################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Accept an optional settings file')
parser.add_argument('--settings', '-s', nargs=1, type=str, \
metavar='<file>', help='specify settings file to use')
parser.add_argument('--function', '-f', nargs=1, type=str, \
metavar='<file>', help='specify objective function file to use')
parser.add_argument('-v', action='store_true', help='print info when method is doing an action')
parser.add_argument('--time', '-t', action='store_true', help='turn timing on for the algorithm')
parser.add_argument('--plot', '-p', action='store_true', help='plot each iteration')
args = parser.parse_args()
function_module = None
settings_module = None
# get objective function
if args.function:
function_module = importlib.import_module(args.function[0])
else:
function_module = importlib.import_module('ackley_function')
function = function_module.objective_function
# get settings
if args.settings:
settings_module = importlib.import_module(args.settings[0])
else:
settings_module = importlib.import_module('pso_settings')
settings = settings_module.settings
# if -v is set change the setting
if args.v:
settings['print_actions'] = True
settings['print_iterations'] = True
# check for a couple more command line arguments
if args.time: settings['time'] = True
if args.plot: settings['plot'] = True
# --- END OF ARG PARSING --- #
# print a empty line
print("")
# time initialization
if settings['time']:
start_time = time.time()
# create algorithm instance
pso = PSO(settings, function)
if settings['time']:
print(" --- Initialized in %s seconds --- " % (time.time() - start_time))
if settings['time_delay'] > 0.0 or settings['plot'] \
or settings['print_actions'] or settings['print_iterations'] or settings['step_through']:
print("\n --- WARNING: You are timing with either time_delay, plot, print_actions,")
print(" print_iterations, or step_through enabled. --- \n")
oa_utils.pause()
pso.start_timer()
# iterate over generations
pso.run()
if settings['time']:
pso.stop_timer()
print(" --- Ran for %s seconds --- " % (pso.get_time()))
# print out some data
print("")
print(str(pso))
sys.exit()
``` |
{
"source": "jkleve/Presidential-Prediction",
"score": 3
} |
#### File: Presidential-Prediction/state_by_state/handle_csv_files.py
```python
import os
import sys
TEST = 0
def move_zips(year):
source = "tmp/"
dest = "zips/"
ending = "_" + str(year) + ".zip"
files = os.listdir(source)
for f in files:
beginning = f.split('.')[0]
if TEST:
print("mv " + source + f + " " + dest + beginning + ending)
else:
os.system("mv " + source + f + " " + dest + beginning + ending)
def unzip_files(year):
source = "tmp/"
dest_dir = "csvs_" + str(year) + "/"
files = os.listdir(source)
s = ""
for f in files:
if TEST:
print("unzip " + source + f + " -d " + dest_dir)
else:
os.system("unzip " + source + f + " -d " + dest_dir)
if __name__ == "__main__":
year = 2008
unzip_files(year)
move_zips(year)
```
#### File: Presidential-Prediction/state_by_state/numpy_utils.py
```python
import numpy as np
################################################
#
# Add 2 np arrays row wise
#
################################################
def add_row_to_array(a, b):
if a.shape == (0,0):
return b
if a.shape[1] != b.shape[1]:
print("Number of columns doesn't match. %d vs %d" % (a.shape[1], b.shape[1]))
print("Can't add row to array")
return np.concatenate((a,b))
################################################
#
# Add 2 np arrays column wise
#
################################################
def add_column_to_array(a, b):
if a.shape == (0,0):
return b
if a.shape[0] != b.shape[0]:
print("Number of columns doesn't match. %d vs %d" % (a.shape[0], b.shape[0]))
print("Can't add row to array")
return np.column_stack((a,b))
################################################
#
# Split one ndarray into two
# start: start row of ndarray to extract
# stop: stop row of ndarray to extract
#
################################################
def split_into_two_ndarrays_by_rows(d, start, stop):
train = None
test = None
if stop > d.shape[0]:
print("Contraining end of test data")
stop = d.shape[0]
beginning = d[0:start]
end = d[stop+1:]
if len(beginning) > 0:
if len(end) > 0:
train = (np.concatenate((beginning,end)))
else:
train = beginning
else:
train = end
test = d[start:stop+1]
return (train, test)
################################################
#
# Split one ndarray into two
# start: start column of ndarray to extract
# stop: stop column of ndarray to extract
# TODO nothing implemented besides the extracted array
# TODO no error checking on ndarray size
################################################
def split_into_two_ndarrays_by_column(d, start, stop):
return (d[:,start:stop], np.zeros(shape=(0,0)))
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.