metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joelfelipe338/Jogo-Uno-em-Python",
"score": 3
} |
#### File: joelfelipe338/Jogo-Uno-em-Python/jogador.py
```python
class Jogador():
def __init__(self):
self.cartas = []
self.nome = ""
def adicionar(self, cartas):
self.cartas += cartas
def retirar(self,indexs):
for i in range(len(indexs)):
self.cartas[indexs[i]] = "X"
self.cartas = [i for i in self.cartas if i != "X"]
def verCartas(self):
return self.cartas
def setNome(self, nome):
self.nome = nome
```
#### File: joelfelipe338/Jogo-Uno-em-Python/mesa.py
```python
class Mesa:
def __init__(self, carta):
(self.cor,self.num) = carta[0].split("-")
self.soma = "0"
self.corEsp = ""
self.corAbrv = ""
def jogada(self, cartas, primeira, ultima):
(cor, num) = primeira.split("-")
if num == "+2" or num == "+4":
for i in cartas:
(corAux, numAux) = i.split("-")
if(numAux != "+2" and numAux != "+4"):
return False
else:
for i in cartas:
(corAux, numAux) = i.split("-")
if(numAux != num):
return False
if(self.num == "COR"):
if(cor != self.corAbrv):
self.corAbrv = ""
self.corEsp = ""
return False
if(self.num == "+4" or self.num == "+2" and int(self.soma) > 0):
if(self.num == "+4"):
if(cor != self.corAbrv or (num != "+2" and num != "+4")):
self.corAbrv = ""
self.corEsp = ""
return False
elif(self.num == "+2" and (num != "+2" and num != "+4")):
return False
if(num == "COR"):
print("Escolha a cor: ")
print("[1] Azul")
print("[2] Amarelo")
print("[3] Verde")
print("[4] Vermelho")
op = input("")
if op == 1:
self.corEsp = "Azul"
self.corAbrv = "AZ"
elif op == 2:
self.corEsp = "Amarelo"
self.corAbrv = "AM"
elif op == 3:
self.corEsp = "Verde"
self.corAbrv = "VR"
elif op == 4:
self.corEsp = "Vermelho"
self.corAbrv = "VM"
else:
self.corEsp = ""
(self.cor, self.num) = ultima.split("-")
return True
elif(num == "+4" or num == "+2"):
if(num == "+4"):
print("Escolha a cor: ")
print("[1] Azul")
print("[2] Amarelo")
print("[3] Verde")
print("[4] Vermelho")
op = input("")
if op == 1:
self.corEsp = "Azul"
self.corAbrv = "AZ"
elif op == 2:
self.corEsp = "Amarelo"
self.corAbrv = "AM"
elif op == 3:
self.corEsp = "Verde"
self.corAbrv = "VR"
elif op == 4:
self.corEsp = "Vermelho"
self.corAbrv = "VM"
else:
self.corEsp = ""
tot = int(self.soma)
for i in cartas:
(corAux, numAux) = i.split("-")
tot += int(numAux)
self.soma = str(tot)
(self.cor, self.num) = ultima.split("-")
return True
elif(num == "<>"):
(self.cor, self.num) = ultima.split("-")
return True
elif(num == "@"):
(self.cor, self.num) = ultima.split("-")
return True
else:
if(cor == self.cor or num == self.num or cor == self.corAbrv):
(self.cor, self.num) = ultima.split("-")
return True
else:
return False
def verCentro(self):
return (self.cor, self.num)
def zerar(self):
self.soma = "0"
self.corAbrv = ""
self.corEsp = ""
``` |
{
"source": "joelfelipe338/redesNerurais",
"score": 3
} |
#### File: joelfelipe338/redesNerurais/adaline.py
```python
from matriz import Matriz
from random import random
class Adaline:
def __init__(self,neuronios):
self.neuronios = neuronios
self.pesos = Matriz(1, neuronios)
self.bias = random()
self.taxa = 0.0025
self.precisao = 0.0000001
def treino(self, entrada, saida):
entrada = Matriz.matrizLinha(entrada)
pesos_t = Matriz.transposta(self.pesos)
soma = Matriz.mult(entrada, pesos_t)
soma.data[0][0] -= self.bias;
erro = saida - soma.data[0][0]
delta = erro * self.taxa
self.bias = delta * -1
delta = Matriz.mult_escalar(entrada,delta)
self.pesos = Matriz.soma(self.pesos,delta)
def EQM(self,padroes,entrada,saida):
eqm = 0
x = 0
pesos_t = Matriz.transposta(self.pesos)
for i in entrada:
i = Matriz.matrizLinha(i)
u = Matriz.mult(i,pesos_t)
u = u.data[0][0] - self.bias
erro = saida[x] - u
x += 1
eqm += erro**2
eqm = eqm/padroes
return eqm
eqm = soma/padroes
return eqm
def testeErro(self,eqm_ant,eqm_atual):
mod = eqm_atual - eqm_ant
if(mod < 0):
mod = mod * -1
if(mod <= self.precisao):
return True
return False
def predict(self,entrada):
entrada = Matriz.matrizLinha(entrada)
pesos_t = Matriz.transposta(self.pesos)
soma = Matriz.mult(entrada,pesos_t)
soma.data[0][0] -= self.bias;
return Hebb(soma.data[0][0])
def Hebb(x):
if(x < 0):
return -1
else:
return 1
``` |
{
"source": "joel-fmjr/Projeto-POO",
"score": 3
} |
#### File: code/entities/player.py
```python
import pygame
from weapon import Weapon
from utils import import_folder, input
from . import Entity
from read_json import settings
from visual import GetParticle
class Player(Entity):
'''
Classe que contém os sprites(parte responsavel pelas imagens de movimentação do jogador) e suas informações.
Configurações das movimentações do jogador com suas direções.
Teletransporte do jogador com suas configurações.
Status do jogador, se está em movimentação ou parado.
Configurações dos ataques.
Animação do jogador e configurações da arma
'''
def __init__(self, pos, groups, obstacle_sprites, portal_sprites, point_sprites, attackable_sprites, default_image_path, status, hitbox_inflation):
super().__init__(groups, default_image_path, pos, status, hitbox_inflation)
# graphics setup
self.__import_player_assets()
# attacking
self.attacking = False
self.attack_cooldown = 300
self.attack_time = None
self.current_attack = None
self.visible_sprites = groups[0]
# teleporting
self.teleporting = False
self.teleport_cooldown = 300
self.teleport_time = None
self.teleport_sound = pygame.mixer.Sound(
'lib/audio/portal/portal.wav')
self.teleport_sound.set_volume(0.5)
# attributes
self.stats = {'health': 100, 'energy': 60,
'attack': 30, 'speed': 6}
self.health = self.stats['health']
self.energy = self.stats['energy']
self.attack = self.stats['attack']
self.speed = self.stats['speed']
# grupos de sprites
self.point_sprites = point_sprites
self.attackable_sprites = attackable_sprites
self.obstacle_sprites = obstacle_sprites
self.portal_sprites = portal_sprites
# temporizador dano
self.hurt_time = None
self.invicible_duration = 500
# particulas
self.animation_player = GetParticle()
def __import_player_assets(self):
''' Carrega todos os sprites de todos os estados do jogador. '''
character_path = 'lib/graphics/player/'
self.animations = {'up': [], 'down': [], 'left': [], 'right': [],
'right_idle': [], 'left_idle': [], 'up_idle': [], 'down_idle': []}
for animation in self.animations.keys():
full_path = character_path + animation
self.animations[animation] = import_folder(full_path)
def __move(self, speed):
'''Responsável pela movimentação do jogador.
:param speed: int.
'''
if self.attacking or self.teleporting:
self.direction.x = 0
self.direction.y = 0
if self.direction.magnitude() != 0:
self.direction = self.direction.normalize()
self.hitbox.x += self.direction.x * speed
self.collision('horizontal')
self.hitbox.y += self.direction.y * speed
self.collision('vertical')
self.rect.center = self.hitbox.center
self.teleport()
def __get_status(self):
''' Verifica os status do jogador e a posição em que ele parou.'''
# parado status
if self.direction.x == 0 and self.direction.y == 0:
if not 'idle' in self.status:
self.status = self.status + '_idle'
def cooldowns(self):
''' Confere a pausa entre ações. '''
current_time = pygame.time.get_ticks()
if self.attacking:
if current_time - self.attack_time >= self.attack_cooldown:
self.attacking = False
self.__destroy_attack()
if not self.vulnerable:
if current_time - self.hurt_time >= self.invicible_duration:
self.vulnerable = True
if self.teleporting:
if current_time - self.teleport_time >= self.teleport_cooldown:
self.teleporting = False
def animate(self):
''' Animação do sprite do jogador. '''
animation = self.animations[self.status]
# loop over the frame index
self.set_frame_index(self.get_frame_index() +
self.get_animation_speed())
if self.get_frame_index() >= len(animation):
self.set_frame_index(0)
# set the image
self.image = animation[int(self.get_frame_index())]
self.rect = self.image.get_rect(center=self.hitbox.center)
# piscar
if not self.vulnerable:
alpha = self.wave_value()
self.image.set_alpha(alpha)
else:
self.image.set_alpha(255)
def __recover(self):
''' Recarrega a saúde e energia do jogador.'''
if self.energy <= self.stats['energy']:
self.energy += 0.1
else:
self.energy = self.stats['energy']
if self.health <= self.stats['health']:
self.health += 0.01
else:
self.health = self.stats['health']
def teleport(self):
''' Verifica a área do teletransporte e se o jogador entrou nela, com as informações para acontecer o teletransporte. '''
for sprite in self.portal_sprites:
centralize = pygame.math.Vector2(24, 24)
if sprite.hitbox.colliderect(self.hitbox):
self.teleport_sound.play()
self.teleport_time = pygame.time.get_ticks()
self.teleporting = True
if 3400 < sprite.rect.topleft[0] < 3500:
self.hitbox.center = (1776, 624)
elif 1700 < sprite.rect.topleft[0] < 1800:
self.hitbox.center = (3456, 624)
elif 2200 < sprite.rect.topleft[0] < 2300:
self.hitbox.center = (3936, 3888)
else:
self.teleporting = False
break
self.hitbox.center += centralize
self.status = 'down_idle'
def create_attack(self):
''' Cria o sprite de disparo.'''
if self.energy >= 10:
self.current_attack = Weapon(self, [self.visible_sprites])
self.current_attack.shot_play()
self.energy -= 10
# direção em que o tiro vai se mover
facing = self.status.split('_')[0]
if facing == 'right':
direction = pygame.math.Vector2(1, 0)
elif facing == 'left':
direction = pygame.math.Vector2(-1, 0)
elif facing == 'up':
direction = pygame.math.Vector2(0, -1)
else:
direction = pygame.math.Vector2(0, 1)
for i in range(1, 10):
# horizontal
if direction.x:
offset_x = (direction.x * i) * \
settings['general_settings']['tilesize']
shot_x = self.rect.centerx + offset_x
shot_y = self.rect.centery
self.animation_player.create_particles('pointing', (shot_x, shot_y), [
self.visible_sprites, self.point_sprites])
# vertical
else:
offset_y = (direction.y * i) * \
settings['general_settings']['tilesize']
shot_x = self.rect.centerx
shot_y = self.rect.centery + offset_y
self.animation_player.create_particles(
'pointing', (shot_x, shot_y), [self.visible_sprites, self.point_sprites])
for point_sprite in self.point_sprites:
hit = pygame.sprite.spritecollide(
point_sprite, self.obstacle_sprites, False)
hit_damage = pygame.sprite.spritecollide(
point_sprite, self.attackable_sprites, False)
if hit:
for target_sprite in hit:
position = target_sprite.rect.center
self.animation_player.create_particles(
'gun', position, [self.visible_sprites])
if hit_damage:
for target_sprite in hit_damage:
target_sprite.get_damage(self)
position = target_sprite.rect.center
self.animation_player.create_particles(
'gun', position, [self.visible_sprites])
if hit or hit_damage:
self.current_attack.hit_play()
break
def __destroy_attack(self):
''' Destrói o sprite de disparo ao fim do ataque. '''
if self.current_attack:
self.current_attack.kill()
self.current_attack = None
def update(self):
'''Atualiza os graficos e funções do jogador.'''
input(self)
self.__get_status()
self.animate()
self.cooldowns()
self.__move(self.speed)
self.__recover()
```
#### File: code/visual/get_particle.py
```python
from .particle_effect import ParticleEffect
from utils import import_folder
class GetParticle:
''' Classe responsável pelas animações do player, no qual tem a interação do inimigo e jogador. '''
def __init__(self):
self.frames = {
# inimigos
'error': import_folder('lib/graphics/animation/claw'),
'alteration': import_folder('lib/graphics/animation/alteration'),
# jogador
'gun': import_folder('lib/graphics/animation/gun'),
'pointing': import_folder('lib/graphics/animation/pointing'),
# morte inimigo
'client': import_folder('lib/graphics/animation/death_client'),
'bug': import_folder('lib/graphics/animation/death_bug')
}
def create_particles(self, animation_type, pos, groups):
''' Cria as partículas de efeito. '''
animation_frames = self.frames[animation_type]
ParticleEffect(pos, animation_frames, groups)
``` |
{
"source": "JoElfner/multisim",
"score": 2
} |
#### File: multisim/multisim/flownet.py
```python
from collections import OrderedDict as odict
class FlowNet:
def __init__(self, SimEnv_instance, part, sub_net=False):
# depending on primary or sub/secondary net:
if not sub_net:
# if primary net:
self.net_type = 'primary flow net'
# save parent_pump:
self.parent_pump = part
elif sub_net:
# if sub_net:
self.net_type = 'secondary flow net'
# save parent_part:
self.parent_part = part
# general stuff for both types:
# get memoryview to massflow array of parent part by slicing:
self.dm_parent = SimEnv_instance.parts[part].dm[:]
# from parent part to child part ordered topology dict which determines
# the order to solve for the massflows. the key is the part/port to
# solve,the value contains a tuple of the following setup:
# (memory view to massflow to solve for,
# operation id depicting the calculation method,
# memory view(s) to source massflow array cell(s)).
self.dm_topo = odict()
```
#### File: multisim/parts/mixingvalve.py
```python
import numpy as np
import pandas as pd
from ..simenv import SimEnv
from .. import precomp_funs as _pf
class MixingValve(SimEnv):
"""
type: MixingValve class.
The MixingValve **mixes or separates** a flow. The flow on the 2-end-side
is mixed/separated by the factors n1 and n2, with **n1 + n1 = 1** and
**n1 >= 0** and **n2 >= 0**.
When mixing the temperatures and mass flows of the respective streams are
mixed by the rule of *dm_out = dm_in1 + dm_in2*.
When separating one stream is separated into two streams with the
same temperature and the massflows *dm_in = n1*dm_out1 + n2*dm_out2*.
The resulting flow of mixing/separating is calculated after each timestep
and intermediate step depending on the given control algorithm and the
measured values in the specified measuring port.
The MixingValve class does not contain a differential method as it only
passes the values of the part connected to its 'in'-port(s) to its
'out'-port(s) and the values of the part connected to its 'out'-port(s) to
its 'in'-port(s) and only applying the mixing/separating. Thus it is
not involved in solving the equations using the specified solver algorithm.
Parameters:
-----------
name: string
Name of the part.
mix_or_sep: string, default: 'mix'
Specifies if the MixingValve is supposed to mix or separate strings.
It can be set to 'mix' for mixing or 'sep' for separating. When 'mix'
is set, there are two inlet ports 'in1' and 'in1' and one outlet port
'out' which have to be connected. When 'sep' is set there is one inlet
port 'in1' two outlet ports 'out1' and 'out2' which have to be
connected.
"""
def __init__(self, name, master_cls, mix_or_split='mix', **kwargs):
self._models = master_cls
self.constr_type = 'Valve_3w' # define construction type
base_err = ( # define leading base error message
'While adding {0} `{1}` to the simulation '
'environment, the following error occurred:\n'
).format(self.constr_type, str(name))
arg_err = ( # define leading error for missing/incorrect argument
'Missing argument or incorrect type/value: {0}\n\n'
)
self._base_err = base_err # save to self to access it in methods
self._arg_err = arg_err # save to self to access it in methods
self.name = name
self._unit = '[%]' # unit of the actuator
self.part_id = self._models.num_parts - 1
self.kind = mix_or_split
# save smallest possible float number for avoiding 0-division:
self._tiny = self._models._tiny
# even though this part is not using numeric solving, number of
# gridpoints are specified anyways:
self.num_gp = 3
# preallocate grids:
self.T = np.zeros(3, dtype=np.float64)
self._T_init = np.zeros_like(self.T) # init temp for resetting env.
# preallocate T ports array (here only used for dimension checking)
self._T_port = np.zeros_like(self.T)
self.dm = np.zeros(3)
# self.U = np.zeros(3)
# preallocate grids for port connection parameters
# cross section area of wall of connected pipe, fluid cross section
# area of, gridspacing and lambda of wall of connected pipe
self._A_wll_conn_p = np.zeros_like(self._T_port)
self._A_fld_conn_p = np.zeros_like(self._T_port)
self._port_gsp = np.full_like(self._T_port, self._tiny)
self._lam_wll_conn_p = np.full_like(self._T_port, self._tiny)
self._lam_port_fld = np.full_like(self._T_port, self._tiny)
# port_definition (first, second and last array element):
self.port_num = 3
# Index to own value array to get values of own ports, meaning if I
# index a FLATTENED self.T.flat with self._port_own_idx, I need to
# get values accoring to the order given in self.port_names.
# That is, this must yield the value of the cell in self.T, which is
# belonging to the port 'in':
# self.T.flat[self._port_own_idx[self.port_names.index('in')]]
self._port_own_idx = np.array(
(0, 1, self.T.shape[0] - 1), dtype=np.int32
)
self._port_own_idx_2D = self._port_own_idx # save for compatibility
"""port_array"""
self.port_ids = np.array((), dtype=np.int32)
# set to read-only to avoid manipulation, same for port_name by using
# tuple:
# self._port_own_idx.flags.writeable = False
# preallocate port values to avoid allocating in loop:
self._port_vals = np.zeros(self.port_num)
# preallocate list to mark ports which have already been solved in
# topology (to enable creating subnets)
self._solved_ports = list()
# port setup depending on mixer or separator valve
# mixing or separating factors for each port are saved in the dict
# port_factors, with the factor 1 being a tuple (can't be changed!):
if mix_or_split == 'mix':
self.port_names = tuple(('A', 'B', 'AB'))
# set massflow characteristics for ports: in means that an
# inflowing massflow has a positive sign, out means that an
# outflowing massflow is pos.
self.dm_char = tuple(('in', 'in', 'out'))
self.pf_arr = np.array(
[0.5, 0.5, 1], dtype=np.float64 # port in1 # port in2
) # port out
elif mix_or_split == 'split':
self.port_names = tuple(('A', 'B', 'AB'))
# set massflow characteristics for ports: in means that an
# inflowing massflow has a positive sign, out means that an
# outflowing massflow is pos.
self.dm_char = tuple(('out', 'out', 'in'))
self.pf_arr = np.array(
[0.5, 0.5, 1], dtype=np.float64 # port out1 # port out2
) # port in
else:
err_str = 'mix_or_split has to be set to \'mix\' or\'split\'!'
raise ValueError(err_str)
# make dict for easy lookup of portfactors with memory views:
self.port_factors = dict(
{
'A': self.pf_arr[0:1],
'B': self.pf_arr[1:2],
'AB': self.pf_arr[2:3],
}
)
# construct partname+portname to get fast access to own ports:
dummy_var = list(self.port_names)
for i in range(self.port_num):
dummy_var[i] = self.name + ';' + dummy_var[i]
self._own_ports = tuple(dummy_var)
# preallocate result grids with one row. An estimate of total rows will
# be preallocated before simulation start in initialize_sim. massflow
# grid is preallocated in set_initial_cond:
self.res = np.zeros((1, self.port_num))
self.res_dm = np.zeros((2, self.port_num))
# set if type has to be solved numeric:
self.solve_numeric = False
# if port arrays shall be collapsed to amount of ports to improve speed
self.collapse_arrays = False
self._collapsed = False # bool checker if already collapsed
# determine if part is treated as hydraulic compensator
self.hydr_comp = False
# if part can be a parent part of a primary flow net:
self._flow_net_parent = False
# add each flow channel of part to hydr_comps (will be removed once its
# massflow solving method is completely integrated in flow_net.
# remaining parts except real hydr comps will be used to generate an
# error):
self._models._hydr_comps.add(self.name)
# if the topology construction method has to stop when it reaches the
# part to solve more ports from other sides before completely solving
# the massflow of it. This will be set to false as soon as only one
# port to solve is remaining:
self.break_topology = False
# count how many ports are still open to be solved by topology. If
# break topology is True, this is used to set it to False if 1 is
# reached.
self._cnt_open_prts = self.port_num # not required here
self._port_heatcond = True # if heatcond. over ports is enabled
# determine if part has the capability to affect massflow (dm) by
# diverting flow through ports or adding flow through ports:
self.affect_dm = True
# if the massflow (dm) has the same value in all cells of the part
# (respectively in each flow channel for parts with multiple flows):
self.dm_invariant = False
# if the part has multiple separated flow channels which do NOT mix
# (like a heat exchanger for exampe):
self.multiple_flows = False
# bool checker if flows were updated in update_flownet to avoid
# processing flows in get_diff each time (array for referencing):
self._process_flows = np.array([True])
# if the part CAN BE controlled by the control algorithm:
self.is_actuator = True
self._actuator_CV = self.pf_arr[:] # set array to be controlled
self._actuator_CV_name = 'port_opening'
# if the part HAS TO BE controlled by the control algorithm:
self.control_req = True
# if the part needs a special control algorithm (for parts with 2 or
# more controllable inlets/outlets/...):
self.actuator_special = True
# initialize bool if control specified:
self.ctrl_defined = False
# if the parts get_diff method is solved with memory views entirely and
# thus has arrays which are extended by +2 (+1 at each end):
self.enlarged_memview = False
# if the part has a special plot method which is defined within the
# part's class:
self.plot_special = True
# save initialization status:
self.initialized = False
# save memory address of T
self._memadd_T = self.T.__array_interface__['data'][0]
# preallocate massflow grid:
if self.dm_invariant:
# if there is the same massflow everywhere in the part
self.dm = np.zeros(1)
else:
self.dm = np.zeros(self.port_num)
# and also preallocate grid for massflow through ports:
if not self.hydr_comp:
# if part is no hydraulic compensator, dm ports grid is simply a
# memory view to massflow grid
self._dm_port = self.dm[:]
self._dm_io = self.dm[:]
else:
# if part is a hydraulic compensator, dm ports is separate from dm
self._dm_port = np.zeros_like(self.T)
self._dm_io = np.zeros_like(self.T)
# save all kind of info stuff to dicts:
# topology info:
self.info_topology = dict()
# IMPORTANT: THIS VARIABLE **MUST NOT BE INHERITED BY SUB-CLASSES**!!
# If sub-classes are inherited from this part, this bool checker AND
# the following variables MUST BE OVERWRITTEN!
# ist the diff function fully njitted AND are all input-variables
# stored in a container?
self._diff_fully_njit = False
# self._diff_njit = pipe1D_diff # handle to njitted diff function
# input args are created in simenv _create_diff_inputs method
def init_part(self, *, port_A_init, **kwargs):
"""
Initialize 3-way valve with specifications, material and initial
conditions. Initial condition for a 3-way valve is the relative port
opening of port A in values from 0...1.
"""
# get material properties and pipe specifications:
self._get_specs_n_props(**kwargs)
# gridspacing is saved in an array of length port_num to save the
# gridspacing of connected parts for heat flux calculation. this array
# is pre-filled with an estimate of 1.1 times the DN outer diameter but
# will be overwritten and filled by get_port_connections() method with
# connected part values, if any numeric parts are connected.
# therefore get the info topology key:
if 'in' in self.info_topology:
key = 'in'
else:
key = 'all_ports'
self.grid_spacing = np.full_like(
self._T_port, self.info_topology[key]['pipe_specs']['d_o'] * 1.1
)
# delete used kwargs:
del kwargs['material']
del kwargs['pipe_specs']
# assert in and out values
err_str = (
'Only values `0 <= port_A_init <= 1` are allowed as '
'initial values for mixing or splitting valves!'
)
assert 0 <= port_A_init <= 1, err_str
# set starting values to port factors:
if self.kind == 'mix':
self.pf_arr[0] = port_A_init
self.pf_arr[1] = 1 - port_A_init
else:
# self.pf_arr[1] = port_A_init
# self.pf_arr[2] = 1 - port_A_init
""" TODO: change to same idx mix split"""
self.pf_arr[0] = port_A_init
self.pf_arr[1] = 1 - port_A_init
self._pf_init = port_A_init # backup for resetting
# # if set to steady state:
# if kwargs:
# if 'set_steadystate' in kwargs:
# assert (type(kwargs['set_steadystate']) ==
# bool), ('\'set_steadystate\' can only be True or '
# 'False!')
# self.ctrl_defined = kwargs['set_steadystate']
# if valve has to be controlled (default) and thus is NOT set to
# static, it needs a lower and upper limit for the values to set:
if 'no_control' not in kwargs or (
'no_control' in kwargs and kwargs['no_control'] is False
):
err_str = (
self._base_err
+ self._arg_err.format('lower_limit, upper_limit')
+ 'The part was set to be an actuator and need a control with '
'`no_control=False`, thus `lower_limit` and `upper_limit` '
'in {0} have to be passed to clip the controller action on '
'the actuator to the limits.\n'
'The limits have to be given as integer or float values with '
'`lower_limit < upper_limit`.'
).format(self._unit)
assert 'lower_limit' in kwargs and 'upper_limit' in kwargs, err_str
self._lims = np.array( # set limits to array
[kwargs['lower_limit'], kwargs['upper_limit']],
dtype=np.float64,
)
self._llim = self._lims[0] # also save to single floats
self._ulim = self._lims[1] # also save to single floats
assert 0 <= self._lims[0] < self._lims[1] <= 1, (
err_str + ' For Valve_3w limits are additionally restricted '
'to `0 <= lower_limit < upper_limit <= 1`.'
)
# if part does not need control (static or given values):
elif 'no_control' in kwargs and kwargs['no_control'] is True:
# if part is static:
if 'const_val' in kwargs:
# check for correct type:
err_str = (
'If valve ' + self.name + ' is set to static with '
'`const_val=array`, array has to be a 1D numpy '
'array with 2 values! To set array values over '
'a predefined timespan, use `val_given=time_array` '
'instead!'
)
assert type(kwargs['const_val']) == np.ndarray and kwargs[
'const_val'
].shape == (2,), err_str
self.pfarr[0:2] = kwargs['const_val']
raise ValueError('with const val reset to init not working')
# delete used kwargs to enable checking at the end:
del kwargs['const_val']
elif 'val_given' in kwargs:
# check for correct type:
err_str = (
'If valve ' + self.name + ' is set with predefined '
'values over a timespan, `val_given=time_array` '
'has to be given! `time_array` has to be a Pandas '
'Series with the index column filled with '
'timestamps which have to outlast the simulation '
'timeframe! The valve setting to set has to be '
'given in the first column (index 0) for branch A '
'and in the second column (index 1) for branch B. '
'To set a constant valve opening, use `const_val` '
'instead!'
)
err_str = (
'A check for pandas series needs to be here,'
'also checking the timestamp! The check for the '
'correct duration of the timestamp needs to be '
'done during sim init!'
)
assert (
type(kwargs['val_given']) == pd.core.series.Series
), err_str
raise TypeError('Timeindex etc. not yet defined!!!')
# delete used kwargs to enable checking at the end:
del kwargs['val_given']
self.val_given = True
self.control_req = False
self.ctrl_defined = True
else:
err_str = (
'If `no_control=True` is defined for valve '
+ self.name
+ ', the valve opening has either to be'
' given with `const_val` as a constant opening or '
'with `val_given` as time dependent Panda Series!'
)
assert (
'const_val' not in kwargs and 'val_given' not in kwargs
), err_str
else:
err_str = (
'An error during the initialization of '
+ self.name
+ ' occurred! Please check the spelling and type of all '
'arguments passed to the parts `set_initial_cond()`!'
)
# construct list of differential input argument names IN THE CORRECT
# ORDER!!!
# regex to remove strings: [a-zA-Z_]*[ ]*=self.
self._input_arg_names_sorted = [
'ports_all',
'_port_link_idx',
'_dm_io',
'T',
]
# update init status:
self.initialized = True
def _reset_to_init_cond(self):
# set starting values to port factors:
if self.kind == 'mix':
self.pf_arr[0] = self._pf_init
self.pf_arr[1] = 1 - self._pf_init
else:
# self.pf_arr[1] = port_A_init
# self.pf_arr[2] = 1 - port_A_init
""" TODO: change to same idx mix split"""
self.pf_arr[0] = self._pf_init
self.pf_arr[1] = 1 - self._pf_init
def _get_flow_routine(
self, port, parent_port=None, subnet=False, **kwargs
):
"""
Returns the massflow calculation routine for the port of the current
part to the topology construction. The massflow calculation routine has
to look like:
routine = (memory_view_to_target_port,
operation_id,
memory_view_to_port1, memory_view_to_port2, ...)
with target_port being the port which has to be calculated and port1
and port2 being the other/source ports which **don't** have to be
calculated with this routine! These source ports **must be given**
when the routine is called.
Parameters:
-----------
port : string
Port name of the port which shall be calculated (target port).
"""
# get topology connection conditions (target index, source part/port
# identifiers, source index and algebraic sign for passed massflow):
trgt_idx, src_part, src_port, src_idx, alg_sign = self._get_topo_cond(
port, parent_port
)
# 3wValve, no ports solved yet
if self._cnt_open_prts == 3:
# The following connection requirement(s) have to be checked:
# 1: all ports (A, B and AB) of a mixing valve MUST NOT be on
# the pressure side of a pump.
# 2: entry ports (A and B) of a mixing valve MUST NOT be on the
# suction side of a pump. This means a mixing valve can only
# be solved coming from port AB.
# 3: all ports (A, B and AB) of a splitting valve MUST NOT be
# on the suction side of a pump.
# 4: exit ports (A and B) of a splitting valve MUST NOT be on
# the pressure side of a pump. This means a splitting valve
# can only be solved coming from port AB.
# 5: two parts of the non numeric solving kind MUST NOT be
# connected directly. At least one numeric part has to be in
# between.
# check connection requirement(s):
# prepare error strings:
err_str1 = (
'Part ' + self.name + ' is directly connected to '
'the pressure side of a pump. Mixing valves may '
'only be connected to the suction side of a pump '
'with port AB!'
)
err_str2 = (
'Part ' + self.name + ' is connected to the '
'suction side of a pump with port A or B. '
'Mixing valves may only be connected to the '
'suction side of a pump with port AB!'
)
err_str3 = (
'Part ' + self.name + ' is directly connected to the '
'suction side of a pump. Splitting valves may only be '
'connected to the pressure side of a pump with port '
'AB!'
)
err_str4 = (
'Part ' + self.name + ' is connected to the '
'pressure side of a pump with port A or B. '
'Splitting valves may only be connected to the '
'suction side of a pump with port AB!'
)
if self.kind == 'mix':
# assert condition 1:
assert kwargs['pump_side'] != 'pressure', err_str1
# assert condition 2:
assert port == 'AB', err_str2
else:
# assert condition 3:
assert kwargs['pump_side'] != 'suction', err_str3
# assert condition 4:
assert port == 'AB', err_str4
# assert condition 5:
err_str5 = (
'Non numeric Part ' + self.name + ' is connected to '
'non numeric part ' + src_part + '. Two non '
'numeric parts must not be connected directly! '
'Insert a numeric part in between to set up a '
'correct topology!'
)
assert self._models.parts[src_part].solve_numeric, err_str5
# if valve is getting the massflow from another part (then port
# AB is solved as the first port), it can simply be copied
# from it: operation id 0 (positive) or - 1 (negative)
if alg_sign == 'positive':
operation_id = 0
else:
operation_id = -1
# add operation instructions to tuple (memory view to target
# massflow array cell, operation id and memory view source port's
# massflow array cells)
op_routine = tuple()
# construct memory view to target massflow array cell and append to
# op routine tuple
op_routine += (self._dm_io.reshape(-1)[trgt_idx],)
# add operation id:
op_routine += (operation_id,)
# add memory view to source massflow array cell:
op_routine += (
self._models.parts[src_part]._dm_io.reshape(-1)[src_idx],
)
else:
# get massflow calculation routine for the case that port
# A or B needs to be solved using the massflow from port AB
# and valve opening (stored in port factors array).
# operation id of a 3w valve for this case is ALWAYS 3, since
# AB must be given and A or B can be calculated by multiplying
# the respective port opening factor with AB. no negative
# of product needed, since AB positive massflow sign is
# contrary to A and B
operation_id = 3
# get source port index and create memory view to it:
src1_idx_start = self._port_own_idx[self.port_names.index('AB')]
src1_idx = slice(src1_idx_start, src1_idx_start + 1)
# second source "port" index is the index to the port factor
# array cell of port:
src2_idx_start = self.port_names.index(port)
src2_idx = slice(src2_idx_start, src2_idx_start + 1)
# add operation instructions to tuple (memory view to target
# massflow array cell, operation id, memory view to the
# source port's massflow array cell and memory view to the
# TARGET PORT'S port factor array cell):
op_routine = (
self._dm_io.reshape(-1)[trgt_idx],
operation_id,
self._dm_io.reshape(-1)[src1_idx],
self.pf_arr[src2_idx],
)
# update solved ports list and counter stop break:
self._solved_ports.append(port)
self._cnt_open_prts = self.port_num - len(self._solved_ports)
# this stays always False for 3w Valve!
self.break_topology = False
# remove part from hydr_comps if completely solved:
if self._cnt_open_prts == 0:
self._models._hydr_comps.remove(self.name)
# save topology parameters to dict for easy information lookup:
net = 'Subnet' if subnet else 'Flownet'
operation_routine = (
'Negative of sum'
if operation_id == -1
else 'Sum'
if operation_id == 1
else 'Pass on value'
if operation_id == 0
else ('Multiplication ' 'with port factor')
if operation_id == 3
else 'Error'
)
src_part = src_part if src_part is not None else self.name
source_ports = (
tuple(('AB', 'pf_arr[' + port + ']'))
if operation_id == 3
else src_port
if operation_id == 0
else tuple(set(self.port_names) - set(port))
)
# add port dict for current port and fill it:
if port not in self.info_topology:
self.info_topology[port] = dict()
self.info_topology[port].update(
{
'Net': net,
'Massflow': self._dm_io.reshape(-1)[trgt_idx],
'Calculation routine': operation_routine,
'Source part': src_part,
'Source port(s)': source_ports,
'Connected part': (
self._models.port_links[self.name + ';' + port].split(';')[
0
]
),
'Connected port': (
self._models.port_links[self.name + ';' + port].split(';')[
1
]
),
'Parent pump/part': kwargs['parent_pump'],
'Pump side': kwargs['pump_side'],
}
)
return op_routine
def _process_cv(self, ctrl_inst):
# 3w_valve_direct!
# n1 value (port A) with clipping to ]llim,ulim[:
self.pf_arr[0] = (
self._llim
if ctrl_inst.cv < self._llim
else self._ulim
if ctrl_inst.cv > self._ulim
else ctrl_inst.cv
)
# n2 value (port B):
self.pf_arr[1] = 1 - self.pf_arr[0]
def solve(self, timestep):
"""
Mixing Valve solve method:
--------------------------
The mass flow averaged mean of the values of the other parts ports
connected to the 'in1' and 'in2' ports is passed to the 'out'
port, taking the arithmetic mean of the in-ports temperatures to
get approximate material properties at the out port. the value of
the 'out' port is passed to 'in1' and 'in2' unchanged.
This is approximately correct, as the temperature values affect the
heat conduction and in-/ or outflowing massflow of connected parts
while the valve part itself is approximated as infinitely small
containing no mass.
Splitting Valve solve method:
-----------------------------
The arithmetic mean of the values of the other parts ports
connected to the 'out1' and 'out2' ports is passed to the 'in'
port, while the value of the 'in' port is passed to 'out1' and
'out2' unchanged.
This is approximately correct, as the temperature values affect the
heat conduction and in-/ or outflowing massflow of connected parts
while the valve part itself is approximated as infinitely small
containing no mass.
"""
# save massflow to results grid:
self.res_dm[self.stepnum] = self._dm_io
# get kind and then call numba jitted function (calling numba function
# which also does the selecting is SLOWER!)
if self.kind == 'mix':
# numba compiled function to solve mixing (includes getting ports)
_pf.solve_mix(
self._models.ports_all,
self._port_link_idx,
self._dm_io,
self.T,
)
else:
# numba compiled function to solve splitting(incl. getting ports)
_pf.solve_split(
self._models.ports_all, self._port_link_idx, self.T
)
# copy results to results grid:
self.res[self.stepnum] = self.T
def draw_part(self, axis, timestep, draw):
"""
Draws the current part in the plot environment, using vector
transformation to rotate the part drawing.
"""
# get and calculate all the information if not drawing (save all to a
# hidden dict):
if not draw:
# create hidden plot dict:
__pt = dict()
# get part start position from plot info dict:
__pt['pos_start'] = self.info_plot['path'][0]['start_coordinates']
# assert that orientation is in info dict and correct type:
orient_list = ['left', 'middle', 'right']
err_str = (
'For plotting of 3w-valves the orientation of the '
'valve must be given! Please pass the orientation as '
'`orientation=\'string\'` to the 3w-valve\'s '
'`set_plot_shape()`-method with string being one of '
'the following: ' + str(orient_list)
)
assert 'orientation' in self.info_plot, err_str
assert self.info_plot['orientation'] in orient_list, err_str
# get orientation:
__pt['orient'] = self.info_plot['orientation']
# get direction vector from info dict:
__pt['vec_dir'] = self.info_plot['path'][0]['vector']
# get part rotation angle from the drawing direction vector (vector
# from part start to part end in drawing):
__pt['rot_angle'] = self._models._angle_to_x_axis(__pt['vec_dir'])
# get length of part:
__pt['vec_len'] = np.sqrt(
(__pt['vec_dir'] * __pt['vec_dir']).sum()
)
# construct all drawing vectors for zero-rotation and one port on
# the left side, one port on the bottom side and one port on the
# right side (standard orientation 'left'). all vectors start from
# the center of the part which is given
# by the end position of vertex_coords.
# construct left port (upper vertice and lower vertice):
__pt['vec_l_u'] = np.array([-1, 0.5]) * __pt['vec_len']
__pt['vec_l_l'] = np.array([-1, -0.5]) * __pt['vec_len']
# construct right port (upper vertice and lower vertice):
__pt['vec_r_u'] = np.array([1, 0.5]) * __pt['vec_len']
__pt['vec_r_l'] = np.array([1, -0.5]) * __pt['vec_len']
# construct middle port (left vertice and right vertice):
__pt['vec_m_l'] = np.array([-0.5, -1]) * __pt['vec_len']
__pt['vec_m_r'] = np.array([0.5, -1]) * __pt['vec_len']
# get rotation angle due to orientation (to x unit vector (1 0)):
if __pt['orient'] == 'left':
# standard rotation
__pt['orient_angle'] = 0
elif __pt['orient'] == 'right':
# flipped standard rotation
__pt['orient_angle'] = 180 / 180 * np.pi
elif __pt['orient'] == 'middle':
# middle port on the left
__pt['orient_angle'] = -90 / 180 * np.pi
# get total rotation angle:
__pt['rot_angle'] += __pt['orient_angle']
# rotate all vectors:
__pt['vec_l_u'] = self._models._rotate_vector(
__pt['vec_l_u'], __pt['rot_angle']
)
__pt['vec_l_l'] = self._models._rotate_vector(
__pt['vec_l_l'], __pt['rot_angle']
)
__pt['vec_r_u'] = self._models._rotate_vector(
__pt['vec_r_u'], __pt['rot_angle']
)
__pt['vec_r_l'] = self._models._rotate_vector(
__pt['vec_r_l'], __pt['rot_angle']
)
__pt['vec_m_l'] = self._models._rotate_vector(
__pt['vec_m_l'], __pt['rot_angle']
)
__pt['vec_m_r'] = self._models._rotate_vector(
__pt['vec_m_r'], __pt['rot_angle']
)
# construct all points:
__pt['pos_center'] = __pt['pos_start'] + __pt['vec_dir']
__pt['pos_l_u'] = __pt['pos_center'] + __pt['vec_l_u']
__pt['pos_l_l'] = __pt['pos_center'] + __pt['vec_l_l']
__pt['pos_r_u'] = __pt['pos_center'] + __pt['vec_r_u']
__pt['pos_r_l'] = __pt['pos_center'] + __pt['vec_r_l']
__pt['pos_m_l'] = __pt['pos_center'] + __pt['vec_m_l']
__pt['pos_m_r'] = __pt['pos_center'] + __pt['vec_m_r']
# construct x- and y-grid for lines (from center to l_u to l_l to
# r_u to r_l to center to m_l to m_r to center):
__pt['x_grid'] = np.array(
[
__pt['pos_center'][0],
__pt['pos_l_u'][0],
__pt['pos_l_l'][0],
__pt['pos_r_u'][0],
__pt['pos_r_l'][0],
__pt['pos_center'][0],
__pt['pos_m_l'][0],
__pt['pos_m_r'][0],
__pt['pos_center'][0],
]
)
__pt['y_grid'] = np.array(
[
__pt['pos_center'][1],
__pt['pos_l_u'][1],
__pt['pos_l_l'][1],
__pt['pos_r_u'][1],
__pt['pos_r_l'][1],
__pt['pos_center'][1],
__pt['pos_m_l'][1],
__pt['pos_m_r'][1],
__pt['pos_center'][1],
]
)
# replace port coordinates since they are wrong for more complex
# parts:
if __pt['orient'] == 'left':
# get middle and right port coordinates:
__pt['p1_coords'] = (
__pt['pos_center']
+ (__pt['vec_m_l'] + __pt['vec_m_r']) / 2
)
__pt['p2_coords'] = (
__pt['pos_center']
+ (__pt['vec_r_u'] + __pt['vec_r_l']) / 2
)
elif __pt['orient'] == 'middle':
# get left and right port coordinates:
__pt['p1_coords'] = (
__pt['pos_center']
+ (__pt['vec_l_u'] + __pt['vec_l_l']) / 2
)
__pt['p2_coords'] = (
__pt['pos_center']
+ (__pt['vec_r_u'] + __pt['vec_r_l']) / 2
)
elif __pt['orient'] == 'right':
# get left and middle port coordinates:
__pt['p1_coords'] = (
__pt['pos_center']
+ (__pt['vec_l_u'] + __pt['vec_l_l']) / 2
)
__pt['p2_coords'] = (
__pt['pos_center']
+ (__pt['vec_m_l'] + __pt['vec_m_r']) / 2
)
# get the free ports (the ports where the position is not coming
# from):
free_ports = list(self.port_names)
free_ports.remove(self.info_plot['auto_connection']['own_port'])
# now get the free ports depending on invert status:
if 'invert' not in self.info_plot or not self.info_plot['invert']:
p1 = free_ports[0]
p2 = free_ports[1]
elif self.info_plot['invert']:
p1 = free_ports[1]
p2 = free_ports[0]
# set them to the ports:
self.info_plot[p1]['coordinates'] = __pt['p1_coords']
self.info_plot[p2]['coordinates'] = __pt['p2_coords']
# get the connected part;ports:
# p1_conn_p = self._models.port_links[self.name + ';' + free_ports[0]]
# p2_conn_p = self._models.port_links[self.name + ';' + free_ports[1]]
# # split them up:
# p1_conn_part, p1_conn_port = p1_conn_p.split(';')
# p2_conn_part, p2_conn_port = p2_conn_p.split(';')
# # now run their set plot shape with that new information again:
# NetPlotter.set_plot_shape(p1_conn_part, p1_conn_port,
# self._models.parts[p1_conn_part].
# info_plot['vertex_coordinates'],
# linewidth=self._models.parts[p1_conn_part].
# info_plot['path_linewidth'])
# NetPlotter.set_plot_shape(p2_conn_part, p2_conn_port,
# self._models.parts[p2_conn_part].
# info_plot['vertex_coordinates'],
# linewidth=self._models.parts[p2_conn_part].
# info_plot['path_linewidth'])
# get annotation text properties:
# get offset vector depending on rotation of pump to deal with
# none-quadratic form of textbox to avoid overlapping. only in the
# range of +/-45° of pos. and neg. x-axis an offset vec length of
# -20 is allowed, else -30:
offset = (
20
if (
0 <= __pt['rot_angle'] <= 45 / 180 * np.pi
or 135 / 180 * np.pi
<= __pt['rot_angle']
<= 225 / 180 * np.pi
or __pt['rot_angle'] >= 315 / 180 * np.pi
)
else 30
)
# get text offset from bottom point of pump by vector rotation:
__pt['txt_offset'] = tuple(
self._models._rotate_vector(
np.array([0, offset]), __pt['rot_angle']
)
)
__pt['txtA_offset'] = tuple(
self._models._rotate_vector(
np.array([0, offset]), __pt['rot_angle']
)
)
__pt['txtB_offset'] = tuple(
self._models._rotate_vector(
np.array([0, offset]), __pt['rot_angle']
)
)
# finally save hidden dict to self:
self.__pt = __pt
# only draw if true:
if draw:
# add lines to plot
axis.plot(
self.__pt['x_grid'],
self.__pt['y_grid'],
color=[0, 0, 0],
linewidth=self.info_plot['path_linewidth'],
zorder=5,
)
# construct name and massflow strings for ports A and B:
txt = self.name
txtA = (
'A'
+ r'\n$\dot{m} = $'
+ str(self.res_dm[timestep, 0])
+ r'$\,$kg/s'
)
txtB = (
'B'
+ r'\n$\dot{m} = $'
+ str(self.res_dm[timestep, 1])
+ r'$\,$kg/s'
)
axis.annotate(
txt,
xy=(self.__pt['pos_center']),
xytext=self.__pt['txt_offset'],
textcoords='offset points',
ha='center',
va='center',
)
axis.annotate(
txtA,
xy=(self.info_plot['A']['coordinates']),
xytext=self.__pt['txtA_offset'],
textcoords='offset points',
ha='center',
va='center',
)
axis.annotate(
txtB,
xy=(self.info_plot['B']['coordinates']),
xytext=self.__pt['txtB_offset'],
textcoords='offset points',
ha='center',
va='center',
)
# construct name and massflow string:
# txt = (self.name + '\n$\dot{m} = $' + str(self.res_dm[timestep][0])
# + 'kg/s')
# get offset vector depending on rotation of pump to deal with
# none-quadratic form of textbox to avoid overlapping. only in the
# range of +/-45° of pos. and neg. x-axis an offset vec length of -20
# is allowed, else -30:
# offset = (-20 if (0 <= rot_angle <= 45/180*np.pi
# or 135/180*np.pi <= rot_angle <= 225/180*np.pi
# or rot_angle >= 315/180*np.pi) else -30)
# # get text offset from bottom point of pump by vector rotation:
# txt_offset = tuple(self._models._rotate_vector(np.array([0, offset]),
# rot_angle))
# axis.annotate(txt, xy=(pos_bot),
# xytext=txt_offset, textcoords='offset points',
# ha='center', va='center')
```
#### File: parts/part_modules/suppliers.py
```python
import numpy as np
from ... import all_parts as msp
from ... import utility_functions as ut
def chp_with_fghex(
simenv,
databases,
number_rf_ports,
chp_ff_connection,
chp_ntrf_connection,
chp_htrfA_connection,
chp_htrfB_connection,
segment_name='1',
chp_kwds=dict(
power_electrical=20e3,
eta_el=0.32733,
p2h_ratio=0.516796,
modulation_range=(0.5, 1),
theta_ff=83.5,
heat_spread='range',
heated_cells=(1, 5),
no_control=False,
lower_limit=0.0,
upper_limit=1.0,
fluegas_flow=70 / 61.1,
pipe_specs={'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN80'}},
),
chp_ctrl=dict(
ctrld_part='tes',
on_sensor_port=15,
setpoint=70.0,
off_val=75.0,
chp_mode='heat',
),
fluegashex_theta_out_water=55.0,
ht_rf_theta_mix=65.0,
adjust_pipes=True,
Tamb=25,
ctrl_chp_pump_by_ts=False,
ctrl_hex_by_ts=False,
ctrl_htrf_by_ts=False,
fluegas_hex_kwds=dict(hex_regression_pipeline='pass sklearn pipe'),
calculations_df=None,
**kwds
):
"""
Add a CHP plant with a condensing flue gas heat exchanger to simenv.
Parameters
----------
simenv : TYPE
DESCRIPTION.
databases : TYPE
DESCRIPTION.
chp_kwds : TYPE, optional
DESCRIPTION. The default is dict( power_electrical=20e3, modulation_range=(.5, 1), heat_spread='range', heated_cells=(0, 4), no_control=False, lower_limit=0., upper_limit=1.).
fluegas_hex_kwds : TYPE, optional
DESCRIPTION. The default is dict(hex_regression_pipeline=5).
**kwds : TYPE
DESCRIPTION.
Returns
-------
None.
"""
assert isinstance(number_rf_ports, (int, float))
assert chp_ctrl['chp_mode'] in ('heat', 'el', 'el_mod')
if chp_ctrl['chp_mode'] == 'el_mod':
assert (
'tes_cap_mpred' in chp_ctrl
and 'opt_profiles' in chp_ctrl
and 'ctrl_profiles' in chp_ctrl
)
# extract TES caps for modelpredictive control optimization
tes_cap_min, tes_cap_max = chp_ctrl['tes_cap_mpred']
# calculate pump flow limits with a maximum allowed temp exceedence of 3K:
chp_ff_pump_ulim = (
chp_kwds['power_electrical']
/ chp_kwds['p2h_ratio']
/ ((chp_kwds['theta_ff'] - chp_ctrl['off_val']) * 4180)
)
ps_dn25 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN25'}}
ps_dn32 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN32'}}
ps_dn40 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN40'}}
# ps_dn50 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN50'}}
# ps_dn65 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN65'}}
ps_dn80 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN80'}}
# scale all values by the CHP power ratio (to the validation chp th power
# of 38.7kW) and by the temperature spread compared to vld ref temp spread
chp_power_factor = (
chp_kwds['power_electrical']
/ chp_kwds['p2h_ratio']
/ 38.7e3
* (82.5 - 73.0)
/ (chp_kwds['theta_ff'] - chp_ctrl['off_val'])
)
pspecs = { # get ref pipe specs and mult factor for each part
'pwp_ff': (ps_dn40, chp_power_factor),
'p3v_rf': (ps_dn40, chp_power_factor),
'p_rf_lt': (ps_dn25, chp_power_factor),
'p3v_htrfA': (ps_dn32, chp_power_factor),
'p_htrfB': (ps_dn32, chp_power_factor),
'p_htrf': (ps_dn40, chp_power_factor),
'p_rf': (ps_dn40, chp_power_factor),
'hex_fg': (ps_dn40, chp_power_factor),
}
if 'pipe_specs' in chp_kwds:
pspecs['chp'] = (chp_kwds['pipe_specs'], chp_power_factor)
del chp_kwds['pipe_specs']
else:
pspecs['chp'] = (ps_dn80, chp_power_factor)
if adjust_pipes: # adjust pipes by A_i multiplier
pspecs = ut.adjust_pipes(pspecs)
else: # take pspecs without multiplier
pspecs = {k: v[0] for k, v in pspecs.items()}
# make some basic chp power calculations:
# core chp pth und eta
chp_pth_core = chp_kwds['power_electrical'] / chp_kwds['p2h_ratio']
chp_eta_pth = chp_kwds['eta_el'] / chp_kwds['p2h_ratio']
# gas power lower and higher heating value
chp_pgas_lhv = chp_kwds['power_electrical'] / chp_kwds['eta_el']
chp_pgas_hhv = chp_pgas_lhv * 1.108
# chp_pps = ps_dn80
if number_rf_ports != 0.5: # with fg hex
simenv.add_part(
msp.CHPPlant,
name='CHP{0}'.format(segment_name),
length=2.5,
grid_points=6,
**chp_kwds,
s_ins=0.2,
lambda_ins=0.035,
T_init=71.0,
T_amb=Tamb,
material='carbon_steel',
pipe_specs=pspecs['chp'],
connect_fluegas_hex=True,
fg_hex_name='hex_chp_fg_{0}'.format(segment_name),
store_results=True,
)
# also calculate an estimated total power assuming that the FG HEX can
# extract 50% of the condensation enthalpy.
chp_pth_est = (
chp_pth_core
# get cond. enth. as a fraction relative to the thermal chp power
# fraction
+ ((chp_pgas_hhv * chp_eta_pth) - chp_pth_core) * 0.5
)
else: # without fg hex
simenv.add_part(
msp.CHPPlant,
name='CHP{0}'.format(segment_name),
length=2.5,
grid_points=6,
**chp_kwds,
s_ins=0.2,
lambda_ins=0.035,
T_init=71.0,
T_amb=Tamb,
material='carbon_steel',
pipe_specs=pspecs['chp'],
connect_fluegas_hex=False,
store_results=True,
)
# here the estimated thermal power is simply the thermal power...
chp_pth_est = chp_pth_core
# ff pipe with pump
simenv.add_part(
msp.PipeWithPump,
name='pwp_chp_ff_{0}'.format(segment_name),
length=4,
grid_points=15,
s_ins=0.05,
lambda_ins=0.03,
material='carbon_steel',
pipe_specs=pspecs['pwp_ff'],
T_init=np.full(15, chp_kwds['theta_ff']),
T_amb=Tamb,
start_massflow=0.0,
lower_limit=0.0,
upper_limit=chp_ff_pump_ulim,
# max flow to cool 40kW at 7K spread
maximum_flow=chp_ff_pump_ulim,
store_results=(0, 1, 7, 14),
)
simenv.connect_ports(
'CHP{0}'.format(segment_name),
'out',
'pwp_chp_ff_{0}'.format(segment_name),
'in',
)
# design system depending on number of rf ports
if number_rf_ports == 3: # if three rf ports
# RF from HEX to CHP with branch to incorporate HT return flow. Branch is
# controlled 3W valve to emulate flow control valve over HEX.
# Only for validation with ang data, else fast ctrl:
# Controller should be P-Controller with setpoint 52-55 degree
# (check what s best) Kp should be low, so that it always lags behind
# (as the thermostate valve does).
simenv.add_part(
msp.PipeWith3wValve,
name='p3v_chp_rf_{0}'.format(segment_name),
length=2,
grid_points=7,
s_ins=0.05,
lambda_ins=0.03,
material='carbon_steel',
pipe_specs=pspecs['p3v_rf'],
T_init=np.hstack((np.full(2, 38.0), np.full(5, 52.5))),
T_amb=Tamb,
valve_location=2,
ctrl_required=True,
start_portA_opening=0.15,
lower_limit=0.01,
upper_limit=0.9,
store_results=(0, 1, 2, 3, 6),
)
simenv.connect_ports(
'p3v_chp_rf_{0}'.format(segment_name),
'AB',
'CHP{0}'.format(segment_name),
'in',
)
# RF from outer system to HEX:
simenv.add_part(
msp.Pipe,
name='p_chp_rf_lt_{0}'.format(segment_name),
length=7,
grid_points=30,
s_ins=0.05,
lambda_ins=0.03,
material='carbon_steel',
pipe_specs=pspecs['p_rf_lt'],
T_init=26.0,
T_amb=Tamb,
store_results=(0, 1, 15, 28, 29),
)
# RF from TES HT port A (lower HT port) to mix with the RF from the HEX.
# Valve mixes flow with HT RF port B. P-Controller with low Kp suits the
# thermostatic valve best. Check for best mixing temperature
simenv.add_part(
msp.PipeWith3wValve,
name='p3v_chp_rf_htA_{0}'.format(segment_name),
length=7,
grid_points=20,
s_ins=0.05,
lambda_ins=0.03,
material='carbon_steel',
pipe_specs=pspecs['p3v_htrfA'],
T_init=np.hstack(
# siehe Messdaten für Startwerte
(
np.linspace(49.7, 51.7, 6), # ht rf a von 49.7 auf 51.7
np.linspace(51.7, 54.2, 14),
)
), # hinter mix, dann auf 54.2
T_amb=Tamb,
valve_location=6,
ctrl_required=True,
start_portA_opening=0.3,
lower_limit=0.0,
upper_limit=1.0,
store_results=(0, 1, 5, 6, 7, 18, 19),
)
simenv.connect_ports(
'p3v_chp_rf_htA_{0}'.format(segment_name),
'AB',
'p3v_chp_rf_{0}'.format(segment_name),
'B',
)
# add pipe from TES HT RF B (upper/hot ht rf) to mix with HT RF A
simenv.add_part( # Connects to TES and htrf_A
msp.Pipe,
name='p_chp_rf_htB_{0}'.format(segment_name),
length=2,
grid_points=8,
s_ins=0.05,
lambda_ins=0.03,
material='carbon_steel',
pipe_specs=pspecs['p_htrfB'],
T_init=51.5,
T_amb=Tamb,
store_results=(0, 1, 4, 6, 7),
)
simenv.connect_ports(
'p_chp_rf_htB_{0}'.format(segment_name),
'out',
'p3v_chp_rf_htA_{0}'.format(segment_name),
'B',
)
# add flue gas hex:
simenv.add_part(
msp.HEXCondPoly,
name='hex_chp_fg_{0}'.format(segment_name),
material='carbon_steel',
pipe_specs=pspecs['hex_fg'],
T_init=50,
T_amb=Tamb,
**fluegas_hex_kwds,
fluegas_flow_range=(0.5, 1.05),
water_flow_range=(0.0, 1.05),
store_results=True,
)
simenv.connect_ports(
'p_chp_rf_lt_{0}'.format(segment_name),
'out',
'hex_chp_fg_{0}'.format(segment_name),
'water_in',
)
simenv.connect_ports(
'hex_chp_fg_{0}'.format(segment_name),
'water_out',
'p3v_chp_rf_{0}'.format(segment_name),
'A',
)
# boundary conditions for flue gas hex:
simenv.add_open_port(
name='BC_fg_in_{0}'.format(segment_name),
constant=True,
temperature=110,
)
simenv.add_open_port(
name='BC_fg_out_{0}'.format(segment_name),
constant=True,
temperature=50,
)
simenv.connect_ports(
'BoundaryCondition',
'BC_fg_in_{0}'.format(segment_name),
'hex_chp_fg_{0}'.format(segment_name),
'fluegas_in',
)
simenv.connect_ports(
'BoundaryCondition',
'BC_fg_out_{0}'.format(segment_name),
'hex_chp_fg_{0}'.format(segment_name),
'fluegas_out',
)
# connect sub-system to outer system:
simenv.connect_ports(
'pwp_chp_ff_{0}'.format(segment_name),
'out',
chp_ff_connection[0],
chp_ff_connection[1],
)
simenv.connect_ports(
'p_chp_rf_lt_{0}'.format(segment_name),
'in',
chp_ntrf_connection[0],
chp_ntrf_connection[1],
)
simenv.connect_ports( # HT RF A (lower ht rf)
'p3v_chp_rf_htA_{0}'.format(segment_name),
'A',
chp_htrfA_connection[0],
chp_htrfA_connection[1],
)
simenv.connect_ports( # HT RF A (upper ht rf)
'p_chp_rf_htB_{0}'.format(segment_name),
'in',
chp_htrfB_connection[0],
chp_htrfB_connection[1],
)
elif number_rf_ports == 2:
# RF from HEX to CHP with branch to incorporate HT return flow.
# Branch is controlled 3W valve to emulate flow control valve over HEX.
simenv.add_part(
msp.PipeWith3wValve,
name='p3v_chp_rf_{0}'.format(segment_name),
length=2,
grid_points=7,
s_ins=0.05,
lambda_ins=0.03,
material='carbon_steel',
pipe_specs=pspecs['p3v_rf'],
T_init=np.hstack((np.full(2, 38.0), np.full(5, 52.5))),
T_amb=Tamb,
valve_location=2,
ctrl_required=True,
start_portA_opening=0.15,
lower_limit=0.01,
upper_limit=0.9,
store_results=(0, 1, 2, 3, 6),
)
simenv.connect_ports(
'p3v_chp_rf_{0}'.format(segment_name),
'AB',
'CHP{0}'.format(segment_name),
'in',
)
# RF from outer system to HEX:
simenv.add_part(
msp.Pipe,
name='p_chp_rf_lt_{0}'.format(segment_name),
length=7,
grid_points=30,
s_ins=0.05,
lambda_ins=0.03,
material='carbon_steel',
pipe_specs=pspecs['p_rf_lt'],
T_init=26.0,
T_amb=Tamb,
store_results=(0, 1, 15, 28, 29),
)
# RF from TES HT port (ca. where lower/cold ht rf A is in 3-port
# config.)to mix with the RF from the HEX.
simenv.add_part(
msp.Pipe,
name='p_chp_rf_ht_{0}'.format(segment_name),
length=7,
grid_points=30,
s_ins=0.05,
lambda_ins=0.03,
material='carbon_steel',
pipe_specs=pspecs['p_htrf'],
T_init=np.full(30, 56.0),
T_amb=Tamb,
store_results=(0, 1, 15, 28, 29),
)
# connect to flue gas hex return flow
simenv.connect_ports(
'p_chp_rf_ht_{0}'.format(segment_name),
'out',
'p3v_chp_rf_{0}'.format(segment_name),
'B',
)
# add flue gas hex:
simenv.add_part(
msp.HEXCondPoly,
name='hex_chp_fg_{0}'.format(segment_name),
material='carbon_steel',
pipe_specs=pspecs['hex_fg'],
T_init=50,
T_amb=Tamb,
**fluegas_hex_kwds,
fluegas_flow_range=(0.5, 1.05),
water_flow_range=(0.0, 1.05),
store_results=True,
)
simenv.connect_ports(
'p_chp_rf_lt_{0}'.format(segment_name),
'out',
'hex_chp_fg_{0}'.format(segment_name),
'water_in',
)
simenv.connect_ports(
'hex_chp_fg_{0}'.format(segment_name),
'water_out',
'p3v_chp_rf_{0}'.format(segment_name),
'A',
)
# boundary conditions for flue gas hex:
simenv.add_open_port(
name='BC_fg_in_{0}'.format(segment_name),
constant=True,
temperature=110,
)
simenv.add_open_port(
name='BC_fg_out_{0}'.format(segment_name),
constant=True,
temperature=50,
)
simenv.connect_ports(
'BoundaryCondition',
'BC_fg_in_{0}'.format(segment_name),
'hex_chp_fg_{0}'.format(segment_name),
'fluegas_in',
)
simenv.connect_ports(
'BoundaryCondition',
'BC_fg_out_{0}'.format(segment_name),
'hex_chp_fg_{0}'.format(segment_name),
'fluegas_out',
)
# connect sub-system to outer system:
simenv.connect_ports(
'pwp_chp_ff_{0}'.format(segment_name),
'out',
chp_ff_connection[0],
chp_ff_connection[1],
)
simenv.connect_ports(
'p_chp_rf_lt_{0}'.format(segment_name),
'in',
chp_ntrf_connection[0],
chp_ntrf_connection[1],
)
simenv.connect_ports( # HT RF A (lower ht rf)
'p_chp_rf_ht_{0}'.format(segment_name),
'in',
chp_htrfA_connection[0],
chp_htrfA_connection[1],
)
elif number_rf_ports == 1:
# RF from HEX to CHP. NO branch, since only one rf port chosen!
simenv.add_part(
msp.Pipe,
name='p_chp_rf_{0}'.format(segment_name),
length=2,
grid_points=6,
s_ins=0.05,
lambda_ins=0.03,
material='carbon_steel',
pipe_specs=pspecs['p_rf'],
T_init=np.full(6, 45.0),
T_amb=Tamb,
store_results=(0, 1, 4, 5),
)
simenv.connect_ports(
'p_chp_rf_{0}'.format(segment_name),
'out',
'CHP{0}'.format(segment_name),
'in',
)
# RF from outer system to HEX:
simenv.add_part(
msp.Pipe,
name='p_chp_rf_lt_{0}'.format(segment_name),
length=7,
grid_points=21,
s_ins=0.05,
lambda_ins=0.03,
material='carbon_steel',
pipe_specs=pspecs['p_rf'],
T_init=26.0,
T_amb=Tamb,
store_results=(0, 1, 10, 19, 20),
)
# add flue gas hex:
simenv.add_part(
msp.HEXCondPoly,
name='hex_chp_fg_{0}'.format(segment_name),
material='carbon_steel',
pipe_specs=pspecs['hex_fg'],
T_init=50,
T_amb=Tamb,
**fluegas_hex_kwds,
fluegas_flow_range=(0.5, 1.05),
water_flow_range=(0.0, 1.05),
store_results=True,
)
simenv.connect_ports(
'p_chp_rf_lt_{0}'.format(segment_name),
'out',
'hex_chp_fg_{0}'.format(segment_name),
'water_in',
)
simenv.connect_ports(
'hex_chp_fg_{0}'.format(segment_name),
'water_out',
'p_chp_rf_{0}'.format(segment_name),
'in',
)
# boundary conditions for flue gas hex:
simenv.add_open_port(
name='BC_fg_in_{0}'.format(segment_name),
constant=True,
temperature=110,
)
simenv.add_open_port(
name='BC_fg_out_{0}'.format(segment_name),
constant=True,
temperature=50,
)
simenv.connect_ports(
'BoundaryCondition',
'BC_fg_in_{0}'.format(segment_name),
'hex_chp_fg_{0}'.format(segment_name),
'fluegas_in',
)
simenv.connect_ports(
'BoundaryCondition',
'BC_fg_out_{0}'.format(segment_name),
'hex_chp_fg_{0}'.format(segment_name),
'fluegas_out',
)
# connect sub-system to outer system:
simenv.connect_ports(
'pwp_chp_ff_{0}'.format(segment_name),
'out',
chp_ff_connection[0],
chp_ff_connection[1],
)
simenv.connect_ports(
'p_chp_rf_lt_{0}'.format(segment_name),
'in',
chp_ntrf_connection[0],
chp_ntrf_connection[1],
)
elif number_rf_ports == 0.5:
# ONE RF PORT AND NO FLUE GAS HEX!!
# RF from HEX to CHP. NO branch, since only one rf port chosen!
simenv.add_part(
msp.Pipe,
name='p_chp_rf_{0}'.format(segment_name),
length=9,
grid_points=27,
s_ins=0.05,
lambda_ins=0.03,
material='carbon_steel',
pipe_specs=pspecs['p_rf'],
T_init=np.full(27, 45.0),
T_amb=Tamb,
store_results=(0, 1, 13, 25, 26),
)
simenv.connect_ports(
'p_chp_rf_{0}'.format(segment_name),
'out',
'CHP{0}'.format(segment_name),
'in',
)
# connect sub-system to outer system:
simenv.connect_ports(
'pwp_chp_ff_{0}'.format(segment_name),
'out',
chp_ff_connection[0],
chp_ff_connection[1],
)
simenv.connect_ports(
'p_chp_rf_{0}'.format(segment_name),
'in',
chp_ntrf_connection[0],
chp_ntrf_connection[1],
)
else:
raise ValueError
# add control:
if chp_ctrl['chp_mode'] == 'heat':
c_pel_chp_ctrld_part = (
'p3v_chp_rf_{0}' if number_rf_ports in (2, 3) else 'p_chp_rf_{0}'
)
# switch on CHP if on_sensor part/port is below setpoint and switch off
# when controlled part/port is above off_val --> adjust both to match
# theta_ff
simenv.add_control(
msp.TwoSensors,
name='c_chp_pel_{0}'.format(segment_name),
actuator='CHP{0}'.format(segment_name),
process_CV_mode='direct',
CV_saturation=(0.0, 1.0),
controlled_part=c_pel_chp_ctrld_part.format(segment_name),
controlled_port=-1,
reference_part='none',
setpoint=chp_ctrl['off_val'],
sub_controller=False,
off_state=0.0,
time_domain='continuous',
deadtime=0.0,
slope=(0, 0), # (5%/s modulation) deact., checked by chp plant
on_sensor_part=chp_ctrl['ctrld_part'],
on_sensor_port=chp_ctrl['on_sensor_port'],
activation_value=chp_ctrl['setpoint'],
activation_sign='lower',
deactivation_sign='greater',
invert=False,
)
elif chp_ctrl['chp_mode'] == 'el_mod':
c_pel_chp_ctrld_part = (
'p3v_chp_rf_{0}' if number_rf_ports in (2, 3) else 'p_chp_rf_{0}'
)
# electricity led modulated CHP mode including model predictive ctrl
simenv.add_control(
msp.ModelPredCHP,
name='chp_modelpred_{0}'.format(segment_name),
actuator='CHP{0}'.format(segment_name),
process_CV_mode='direct',
CV_saturation=(0.0, 1.0),
controlled_part=c_pel_chp_ctrld_part.format(segment_name),
controlled_port=-1,
reference_part='none',
setpoint=chp_ctrl['off_val'],
sub_controller=False,
chp_params={
'pel_chp': chp_kwds['power_electrical'],
'pth_chp': chp_pth_est,
'eta_el': chp_kwds['eta_el'],
'mod_range': chp_kwds['modulation_range'],
},
opt_params={
'opt_timeframe': '2d',
'opt_every': '15min',
'max_iter': 500,
},
tes_soc_minmax=(tes_cap_min, tes_cap_max),
tes_part='tes',
opt_profiles=chp_ctrl['opt_profiles'],
ctrl_profiles=chp_ctrl['ctrl_profiles'],
costs='default',
off_state=0.0,
time_domain='continuous',
deadtime=0.0,
slope=(-0.2, 0.2), # (20%/s modulation)
on_sensor_part=chp_ctrl['ctrld_part'],
on_sensor_port=chp_ctrl['on_sensor_port'],
activation_value=chp_ctrl['setpoint'],
activation_sign='lower',
deactivation_sign='greater',
invert=False,
emergency_cntrl={
'use_hysteresis': True,
'hysteresis': 1.0,
'full_power_offset': 2.0,
}, # go into full power 2°C below act val SP
tes_cap_restore_time='default', # time to restore cap to input val
)
# pump controls
if not ctrl_chp_pump_by_ts:
simenv.add_control(
msp.PID,
name='c_chp_pump_{0}'.format(segment_name),
actuator='pwp_chp_ff_{0}'.format(segment_name),
process_CV_mode='part_specific',
CV_saturation=(0.0, 1.0),
controlled_part='CHP{0}'.format(segment_name),
controlled_port=4,
reference_part='none',
setpoint=chp_kwds['theta_ff'],
# sub_controller=True, master_type='controller',
# master_controller='c_chp_pel_{0}'.format(segment_name), dependency_kind='concurrent',
sub_controller=True,
master_type='part',
master_part='CHP{0}'.format(segment_name),
master_variable='_dQ_heating',
master_variable_index=0,
off_state=0.0,
time_domain='continuous',
deadtime=0,
dependency_kind='concurrent',
# slope=(-pump_max_val / 10, pump_max_val / 10), # 10s full on-to-off
slope=(-0.1, 0.1), # 10s full on-to-off
terms='PI',
anti_windup='auto_.1',
# stabil aber ein bisschen langsam:
# loop_tuning='ziegler-nichols', Kp_crit=.6, T_crit=2.5,
# loop_tuning='tune', Kp=.68,
loop_tuning='ziegler-nichols',
Kp_crit=0.65,
T_crit=40 / 16,
adapt_coefficients=True,
norm_timestep=1.0,
invert=True,
)
else:
simenv.add_control(
msp.PID,
name='c_chp_pump_{0}'.format(segment_name),
actuator='pwp_chp_ff_{0}'.format(segment_name),
process_CV_mode='part_specific',
CV_saturation=(0.0, 1.0),
controlled_part='CHP{0}'.format(segment_name),
controlled_port=4,
reference_part='none',
setpoint=chp_kwds['theta_ff'],
# sub_controller=True, master_type='controller',
# master_controller='c_chp_pel_{0}'.format(segment_name), dependency_kind='concurrent',
sub_controller=True,
master_type='part',
master_part='CHP{0}'.format(segment_name),
master_variable='_dQ_heating',
master_variable_index=0,
dependency_kind='concurrent',
off_state=0.0,
time_domain='continuous',
deadtime=0,
# slope=(-pump_max_val / 10, pump_max_val / 10), # 10s full on-to-off
slope=(-0.1, 0.1), # 10s full on-to-off
terms='PI',
anti_windup=1.0,
# loop_tuning='manual', Kp=dm_ff_diff, Ki=dm_ff_diff / 7,
loop_tuning='ziegler-nichols',
Kp_crit=0.5,
T_crit=1.43, # Kp=.65, Ki=0,
adapt_coefficients=True,
norm_timestep=1.0,
invert=True,
)
# control massflow over fluegas heat exchanger
# if controlled by timeseries, the hex will have a faster Kp/Ki value
# only if 2 or 3 rf ports are to be simulated
if number_rf_ports in (2, 3):
if not ctrl_hex_by_ts:
simenv.add_control(
msp.PID,
name='c_chp_3v_hex_fg_{0}'.format(segment_name),
actuator='p3v_chp_rf_{0}'.format(segment_name),
process_CV_mode='part_specific',
actuator_port='A',
CV_saturation=(0.05, 0.95),
controlled_part='p3v_chp_rf_{0}'.format(segment_name),
controlled_port=1,
reference_part='none',
setpoint=fluegashex_theta_out_water,
sub_controller=False,
off_state=1.0,
time_domain='continuous',
deadtime=0,
slope=(-0.1, 0.1), # 10s full open-to-closed
# terms='PID', loop_tuning='manual', Kp=0.025, #Kp=.03, Ki=.01,
# anti_windup=10,
terms='PID',
loop_tuning='ziegler-nichols',
rule='classic',
Kp_crit=0.035 * 1.3,
T_crit=37.79,
anti_windup=150.0,
filter_derivative=False,
adapt_coefficients=True,
norm_timestep=1.0,
invert=True,
)
else:
# Kp = 0.035
# Ki = 0.0025
simenv.add_control(
msp.PID,
name='c_chp_3v_hex_fg_{0}'.format(segment_name),
actuator='p3v_chp_rf_{0}'.format(segment_name),
process_CV_mode='part_specific',
actuator_port='A',
CV_saturation=(0.05, 0.95),
controlled_part='p3v_chp_rf_{0}'.format(segment_name),
controlled_port=1,
reference_part='none',
setpoint=fluegashex_theta_out_water,
sub_controller=False,
off_state=1.0,
time_domain='continuous',
deadtime=0,
slope=(-0.1, 0.1), # 10s full open-to-closed
terms='PID',
loop_tuning='ziegler-nichols',
rule='classic',
Kp_crit=0.035 * 1.5,
T_crit=37.79,
anti_windup=150.0,
filter_derivative=False,
adapt_coefficients=True,
norm_timestep=1.0,
invert=True,
)
# control mix of massflows through HT rf ports
# only if 3 rf ports are to be simulated
if number_rf_ports == 3:
# act_3v_htrf = 'p3v_chp_rf_htA_{0}'.format(segment_name)
if not ctrl_htrf_by_ts:
simenv.add_control(
msp.PID,
name='c_chp_3v_htrf_{0}'.format(segment_name),
actuator='p3v_chp_rf_htA_{0}'.format(segment_name),
process_CV_mode='part_specific',
actuator_port='A',
CV_saturation=(0.3, 1.0),
controlled_part='p3v_chp_rf_htA_{0}'.format(segment_name),
controlled_port=7,
reference_part='none',
setpoint=ht_rf_theta_mix,
sub_controller=False,
off_state=0.0,
time_domain='continuous',
deadtime=0,
slope=(-0.05, 0.05), # 10s full open-to-closed
# terms='P', loop_tuning='manual', Kp=0.15, # Kp=.15, #Ki=.1,
# anti_windup=25,
terms='PID',
loop_tuning='ziegler-nichols',
rule='classic',
Kp_crit=0.035 * 2,
T_crit=15.0,
anti_windup=150.0,
filter_derivative=False,
adapt_coefficients=True,
norm_timestep=1.0,
invert=True,
)
else:
simenv.add_control(
msp.PID,
name='c_chp_3v_htrf_{0}'.format(segment_name),
actuator='p3v_chp_rf_htA_{0}'.format(segment_name),
process_CV_mode='part_specific',
actuator_port='A',
CV_saturation=(0.03, 1.0),
controlled_part='p3v_chp_rf_htA_{0}'.format(segment_name),
controlled_port=7,
reference_part='none',
setpoint=ht_rf_theta_mix,
sub_controller=False,
off_state=0.0,
time_domain='continuous',
deadtime=0,
slope=(-0.1, 0.1), # 10s full open-to-closed
terms='PID',
loop_tuning='ziegler-nichols',
rule='classic',
Kp_crit=0.035 * 2,
T_crit=15.0,
anti_windup=150.0,
filter_derivative=False,
adapt_coefficients=True,
norm_timestep=1.0,
invert=True,
)
# save calculations to df and return if given:
if calculations_df is not None:
calculations_df.loc[
'max_pump_flow', 'CHP{0}'.format(segment_name)
] = chp_ff_pump_ulim
calculations_df.loc[
'pipe_scaling', 'CHP{0}'.format(segment_name)
] = chp_power_factor
return calculations_df
def gasboiler(
simenv,
gasb_pth,
lhs,
rc_flow,
phw_dmd,
space_heating=None,
n_rf_ports=1,
segment_name='1',
adjust_pipes=True,
ff_connector={'part': 'port'},
ltrf_connector={'part': 'port'},
htrf_connector={'part': 'port'},
gasboiler_ctrl=dict(
on_sens_part='tes',
on_sens_port=5,
setpoint=70.0,
off_sens_part='tes',
off_sens_port=10,
off_val=75.0,
theta_ff_gasb=75.0,
),
Tamb=25.0,
calculations_df=None,
):
# ps_dn20 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN20'}}
# ps_dn25 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN25'}}
# ps_dn32 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN32'}}
# ps_dn40 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN40'}}
ps_dn50 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN50'}}
# ps_dn65 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN65'}}
# ps_dn80 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN80'}}
ps_dn125 = {'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN125'}}
# scale all values by the gasboiler thermal power and the temperature
# spread with reference to the ref min. temp. spread of 15K
gasb_factor = (
gasb_pth
/ 150e3
/ (gasboiler_ctrl['theta_ff_gasb'] - 60.0)
* (75 - 60.0)
)
# get ref pipe specs and mult factor for each part
pspecs = {
'pwp_gasb_ff': (ps_dn50, gasb_factor),
'p_gasb_core': (ps_dn125, gasb_factor),
'p_gasb_rf': (ps_dn50, gasb_factor),
'pb_gasb_rf': (ps_dn50, gasb_factor),
}
if adjust_pipes: # adjust pipes by A_i multiplier
pspecs = ut.adjust_pipes(pspecs)
else: # take pspecs without multiplier
pspecs = {k: v[0] for k, v in pspecs.items()}
# scale pump flow limit to full flow at 15K spread at full power
pmp_flow_lim = gasb_pth / (4180 * (gasboiler_ctrl['theta_ff_gasb'] - 60.0))
simenv.add_part( # Rohr von Gasboiler
msp.PipeWithPump,
name='pwp_gasb_ff_{0}'.format(segment_name),
length=2.5,
grid_points=8,
insulation_thickness=0.1,
insulation_lambda=0.035,
material='carbon_steel',
pipe_specs=pspecs['pwp_gasb_ff'],
T_amb=Tamb,
T_init=np.full(8, gasboiler_ctrl['theta_ff_gasb']),
start_massflow=0.0,
lower_limit=0,
upper_limit=pmp_flow_lim,
maximum_flow=pmp_flow_lim,
store_results=(0, 1, 4, 7),
)
simenv.connect_ports(
list(ff_connector.keys())[0],
list(ff_connector.values())[0],
'pwp_gasb_ff_{0}'.format(segment_name),
'out',
)
# GASBOILER CORE
simenv.add_part( # Rohr von Gasboiler
msp.HeatedPipe,
name='ph_gasb_{0}'.format(segment_name),
length=2.5,
grid_points=6,
insulation_thickness=0.2,
insulation_lambda=0.035,
material='carbon_steel',
pipe_specs=pspecs['p_gasb_core'],
T_amb=Tamb,
T_init=np.full(6, 75.0),
heat_spread='range',
heated_cells=[1, 5],
no_control=False,
lower_limit=0.0,
upper_limit=gasb_pth,
store_results=True,
)
simenv.connect_ports(
'pwp_gasb_ff_{0}'.format(segment_name),
'in',
'ph_gasb_{0}'.format(segment_name),
'out',
)
if n_rf_ports == 1:
simenv.add_part( # <NAME>
msp.Pipe,
name='p_gasb_rf_{0}'.format(segment_name),
length=2.5,
grid_points=8,
insulation_thickness=0.1,
insulation_lambda=0.035,
material='carbon_steel',
pipe_specs=pspecs['p_gasb_rf'],
T_amb=Tamb,
T_init=np.full(8, 25.0),
store_results=(0, 1, 4, 6, 7),
)
simenv.connect_ports(
'ph_gasb_{0}'.format(segment_name),
'in',
'p_gasb_rf_{0}'.format(segment_name),
'out',
)
simenv.connect_ports(
'p_gasb_rf_{0}'.format(segment_name),
'in',
list(ltrf_connector.keys())[0],
list(ltrf_connector.values())[0],
)
elif n_rf_ports == 2:
raise NotImplementedError
else:
raise ValueError
# add control:
# switch on gas boiler if on_sensor part/port is below setpoint and
# switch off when controlled part/port is above off_val
# --> adjust both to match theta_ff
simenv.add_control( # control on/off state of gas boiler
msp.TwoSensors,
name='c_gasb_pth_{0}'.format(segment_name),
actuator='ph_gasb_{0}'.format(segment_name),
process_CV_mode='direct',
CV_saturation=(0.0, gasb_pth),
controlled_part=gasboiler_ctrl['off_sens_part'],
controlled_port=gasboiler_ctrl['off_sens_port'],
reference_part='none',
on_sensor_part=gasboiler_ctrl['on_sens_part'],
on_sensor_port=gasboiler_ctrl['on_sens_port'],
setpoint=gasboiler_ctrl['off_val'],
sub_controller=False,
off_state=0,
time_domain='continuous',
deadtime=0,
slope=(-gasb_pth / 10, gasb_pth / 10),
activation_value=gasboiler_ctrl['setpoint'],
activation_sign='lower',
deactivation_sign='greater',
invert=False,
silence_slope_warning=True,
)
simenv.add_control( # pump controls temperature at gas boiler outlet
msp.PID,
name='c_gasb_pump_{0}'.format(segment_name),
actuator='pwp_gasb_ff_{0}'.format(segment_name),
process_CV_mode='part_specific',
CV_saturation=(0.0, 1.0),
controlled_part='ph_gasb_{0}'.format(segment_name),
controlled_port=4,
reference_part='none',
setpoint=gasboiler_ctrl['theta_ff_gasb'],
sub_controller=True,
master_type='part',
master_part='ph_gasb_{0}'.format(segment_name),
master_variable='_dQ_heating',
master_variable_index=0,
dependency_kind='concurrent',
off_state=0,
time_domain='continuous',
deadtime=0,
slope=(-0.1, 0.1),
terms='PI',
anti_windup='auto_.3',
# loop_tuning='manual', Kp=dm_ff_diff, Ki=dm_ff_diff / 7,
# loop_tuning='ziegler-nichols', Kp_crit=.5, T_crit=1.43, # Kp=.65, Ki=0,
# loop_tuning='tune', Kp=.55, further reduced kp to .5 to reduce OS
loop_tuning='ziegler-nichols',
Kp_crit=0.5,
T_crit=31 / 13,
adapt_coefficients=True,
norm_timestep=1.0,
invert=True,
)
# save calculations to df and return if given:
if calculations_df is not None:
calculations_df.loc[
'max_pump_flow', 'Gasboiler_{0}'.format(segment_name)
] = pmp_flow_lim
calculations_df.loc[
'pipe_scaling', 'Gasboiler_{0}'.format(segment_name)
] = gasb_factor
return calculations_df
```
#### File: multisim/parts/pump.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as _plt
from ..simenv import SimEnv
from .. import precomp_funs as _pf
class Pump(SimEnv):
"""
type: Pump class.
One pump is required in each contiguous seaction of the model environment
which is not separated from other sections by a hydraulic compensator like
a thermal energy storage.
The pumps determine the mass flows in these contiguous sections.
The mass flow is calculated after each timestep and intermediate step
depending on the given control algorithm and the measured values in the
specified measuring port.
The Pump class does not contain a differential method as it only passes the
values of the part connected to its 'in'-port to its 'out'-port and the
values of the part connected to its 'out'-port to its 'in'-port. Thus it is
not involved in solving the equations using the specified solver algorithm.
"""
def __init__(self, name, master_cls, **kwargs):
self._models = master_cls
self.constr_type = 'Pump' # define construction type
base_err = ( # define leading base error message
'While adding {0} `{1}` to the simulation '
'environment, the following error occurred:\n'
).format(self.constr_type, str(name))
arg_err = ( # define leading error for missing/incorrect argument
'Missing argument or incorrect type/value: {0}\n\n'
)
self._base_err = base_err # save to self to access it in controllers
self._arg_err = arg_err # save to self to access it in controllers
# super().__init__()
self.name = name
self._unit = '[kg/s]' # unit of the actuator
self.part_id = self._models.num_parts - 1
# # array defining the minium and maximum massflow for the pump:
# done in init now!
# self.dm_range = np.array((dm_min, dm_max))
# save smallest possible float number for avoiding 0-division:
self._tiny = self._models._tiny
# even though this part is not using numeric solving, number of
# gridpoints are specified anyways:
self.num_gp = 2
# preallocate grids:
self.T = np.zeros(2, dtype=np.float64)
self._T_init = np.zeros_like(self.T) # init temp for resetting env.
# preallocate T ports array (in Pump only used for dimension checking)
self._T_port = np.zeros_like(self.T)
# self.dm = np.zeros(1)
# self.U = np.zeros(2)
# preallocate grids for port connection parameters
# cross section area of wall of connected pipe, fluid cross section
# area of, gridspacing and lambda of wall of connected pipe
self._A_wll_conn_p = np.zeros_like(self._T_port)
self._A_fld_conn_p = np.zeros_like(self._T_port)
self._port_gsp = np.full_like(self._T_port, self._tiny)
self._lam_wll_conn_p = np.full_like(self._T_port, self._tiny)
self._lam_port_fld = np.full_like(self._T_port, self._tiny)
# port_definition (first and last array element):
self.port_num = 2
# Index to own value array to get values of own ports, meaning if I
# index a FLATTENED self.T.flat with self._port_own_idx, I need to
# get values accoring to the order given in self.port_names.
# That is, this must yield the value of the cell in self.T, which is
# belonging to the port 'in':
# self.T.flat[self._port_own_idx[self.port_names.index('in')]]
self._port_own_idx = np.array((0, self.T.shape[0] - 1), dtype=np.int32)
self._port_own_idx_2D = self._port_own_idx # save for compatibility
"""port_array"""
self.port_ids = np.array((), dtype=np.int32)
# save port names
self.port_names = tuple(('in', 'out'))
# set massflow characteristics for ports: in means that an inflowing
# massflow has a positive sign, out means that an outflowing massflow
# is pos.
self.dm_char = tuple(('in', 'out'))
# construct partname+portname to get fast access to own ports:
dummy_var = list(self.port_names)
for i in range(self.port_num):
dummy_var[i] = self.name + ';' + dummy_var[i]
self._own_ports = tuple(dummy_var)
# preallocate port values to avoid allocating in loop:
self._port_vals = np.zeros(self.port_num)
# preallocate list to mark ports which have already been solved in
# topology (to enable creating subnets)
self._solved_ports = list()
# preallocate massflow grid with port_num. An estimate of total rows
# will be preallocated before simulation start in initialize_sim:
self.res_dm = np.zeros((2, self.port_num))
# set if type has to be solved numeric:
self.solve_numeric = False
# if port arrays shall be collapsed to amount of ports to improve speed
self.collapse_arrays = False
self._collapsed = False # bool checker if already collapsed
# determine if part is treated as hydraulic compensator
self.hydr_comp = False
# if part can be a parent part of a primary flow net:
self._flow_net_parent = True
# add each flow channel of part to hydr_comps (will be removed once its
# massflow solving method is completely integrated in flow_net.
# remaining parts except real hydr comps will be used to generate an
# error):
self._models._hydr_comps.add(self.name)
# if the topology construction method has to stop when it reaches the
# part to solve more ports from other sides before completely solving
# the massflow of it. This will be set to false as soon as only one
# port to solve is remaining:
self.break_topology = False
# count how many ports are still open to be solved by topology. If
# break topology is True, this is used to set it to False if 1 is
# reached.
self._cnt_open_prts = self.port_num # not required here
self._port_heatcond = True # if heatcond. over ports is enabled
# determine if part has the capability to affect massflow (dm) by
# diverting flow through ports or adding flow through ports:
self.affect_dm = False
# if the massflow (dm) has the same value in all cells of the part
# (respectively in each flow channel for parts with multiple flows):
self.dm_invariant = True
# if the part has multiple separated flow channels which do NOT mix
# (like a heat exchanger for exampe):
self.multiple_flows = False
# bool checker if flows were updated in update_flownet to avoid
# processing flows in get_diff each time (array for referencing):
self._process_flows = np.array([True])
# bool check if massflow is given for the entire program run:
self.dm_given = False
# if the part CAN BE controlled by the control algorithm:
self.is_actuator = True
# if the part HAS TO BE controlled by the control algorithm:
self.control_req = True
# if the part needs a special control algorithm (for parts with 2 or
# more controllable inlets/outlets/...):
self.actuator_special = False
# initialize bool if control specified:
self.ctrl_defined = False
# if the parts get_diff method is solved with memory views entirely and
# thus has arrays which are extended by +2 (+1 at each end):
self.enlarged_memview = False
# if the part has a special plot method which is defined within the
# part's class:
self.plot_special = True
# save initialization status:
self.initialized = False
# save memory address of T
self._memadd_T = self.T.__array_interface__['data'][0]
# preallocate massflow grid:
if self.dm_invariant:
self.dm = np.zeros(1)
else:
self.dm = np.zeros(self.port_num)
# and also preallocate grid for massflow through ports:
if not self.hydr_comp:
# if part is no hydraulic compensator, dm ports grid is simply a
# memory view to massflow grid
self._dm_port = self.dm[:]
self._dm_io = self.dm[:]
else:
# if part is a hydraulic compensator, dm ports is separate from dm
self._dm_port = np.zeros_like(self.T)
self._dm_io = np.zeros_like(self.T)
# set array where the CV is set to:
if self.is_actuator:
self._actuator_CV = self.dm[:] # set array to be controlled
self._actuator_CV_name = 'massflow' # set description
# save memory address of dm
self._memadd_dm = self.dm.__array_interface__['data'][0]
# save all kind of info stuff to dicts:
# topology info:
self.info_topology = dict()
# IMPORTANT: THIS VARIABLE **MUST NOT BE INHERITED BY SUB-CLASSES**!!
# If sub-classes are inherited from this part, this bool checker AND
# the following variables MUST BE OVERWRITTEN!
# ist the diff function fully njitted AND are all input-variables
# stored in a container?
self._diff_fully_njit = False
# self._diff_njit = pipe1D_diff # handle to njitted diff function
# input args are created in simenv _create_diff_inputs method
def init_part(self, *, start_massflow, **kwargs):
"""
Initialize pump with specifications, material and initial conditions.
"""
# get material properties and pipe specifications:
self._get_specs_n_props(**kwargs)
# gridspacing is saved in an array of length port_num to save the
# gridspacing of connected parts for heat flux calculation. this array
# is pre-filled with an estimate of 1.1 times the DN outer diameter but
# will be overwritten and filled by get_port_connections() method with
# connected part values, if any numeric parts are connected.
# therefore get the info topology key:
if 'in' in self.info_topology:
key = 'in'
else:
key = 'all_ports'
self.grid_spacing = np.full_like(
self._T_port, self.info_topology[key]['pipe_specs']['d_o'] * 1.1
)
# for nonnumeric parts moved to initialize sim
# self.T = self._get_ports()
# set starting massflow:
self.dm[0] = start_massflow
self._dm_init = start_massflow # save for resetting
# if pump has to be controlled (default) and thus is NOT set to static,
# it needs a lower and upper limit for the values to set:
if 'no_control' not in kwargs or (
'no_control' in kwargs and kwargs['no_control'] is False
):
err_str = (
self._base_err
+ self._arg_err.format('lower_limit, upper_limit')
+ 'The part was set to be an actuator and need a control with '
'`no_control=False`, thus `lower_limit` and `upper_limit` '
'in {0} have to be passed to clip the controller action on '
'the actuator to the limits.\n'
'The limits have to be given as integer or float values with '
'`lower_limit < upper_limit`.'
).format(self._unit)
assert 'lower_limit' in kwargs and 'upper_limit' in kwargs, err_str
# set limits to array:
self._lims = np.array( # set limits to array
[kwargs['lower_limit'], kwargs['upper_limit']],
dtype=np.float64,
)
self._llim = self._lims[0] # also save to single floats
self._ulim = self._lims[1] # also save to single floats
# check if lower limit is less than upper limit:
assert self._lims[0] < self._lims[1], err_str
# if part does not need control (static or given values):
elif 'no_control' in kwargs and kwargs['no_control'] is True:
# if part is static:
if 'dm_const' in kwargs:
# check for correct type:
err_str = (
self._base_err
+ self._arg_err.format('dm_const')
+ 'If the part was set to static with `dm_const=X`, X has '
'to be either a single float or integer value. To set '
'array values over a predefined timespan, use '
'`dm_given=value_array` instead.'
)
assert isinstance(kwargs['dm_const'], (int, float)), err_str
self.dm[0] = kwargs['dm_const']
self._dm_init = kwargs['dm_const'] # backup for resetting
elif 'time_series' in kwargs:
# check for correct type:
err_str = (
self._base_err
+ self._arg_err.format('time_series')
+ 'If the part is set with predefined values over a '
'timespan, `time_series=X` has to be given. `X` has to '
'be a Pandas Series with the index column filled with '
'timestamps which have to outlast the simulation '
'timeframe. The massflow to set has to be given in '
'the first column (index 0). To set a constant massflow, '
'use `dm_const` instead.'
)
assert isinstance(
kwargs['time_series'], (pd.Series, pd.DataFrame)
), err_str
assert isinstance(
kwargs['time_series'].index, pd.DatetimeIndex
), err_str
assert isinstance(
kwargs['time_series'].index.values, np.ndarray
), err_str
assert (
kwargs['time_series'].index.values.dtype
) == 'datetime64[ns]', err_str
self.dm_given = True
self._models.assign_boundary_cond(
time_series=kwargs['time_series'],
open_port=None,
part=self.name,
variable_name='dm',
array_index=0,
)
else:
# else raise error
err_str = (
self._base_err
+ self._arg_err.format('dm_const OR dm_given')
+ 'If `no_control=True` is set, the massflow in [kg/s] has '
'either to be given with `dm_const` as a constant '
'massflow or with `dm_given` as time dependent '
'Pandas Series.'
)
assert 'dm_const' in kwargs or 'dm_given' in kwargs, err_str
self.control_req = False
self.ctrl_defined = True
else:
err_str = (
'An error during the initialization of '
+ self.name
+ ' occurred! Please check the spelling and type of all '
'arguments passed to the parts `init_part()`!'
)
# construct list of differential input argument names IN THE CORRECT
# ORDER!!!
# regex to remove strings: [a-zA-Z_]*[ ]*=self.
self._input_arg_names_sorted = [
'ports_all',
'_port_link_idx',
'T',
'res',
'res_dm',
'dm',
'stepnum',
]
# update init status:
self.initialized = True
def _reset_to_init_cond(self):
self.dm[0] = self._dm_init
def _get_flow_routine(
self, port, parent_port=None, subnet=False, **kwargs
):
"""
Returns the massflow calculation routine for the port of the current
part to the topology construction. The massflow calculation routine has
to look like:
routine = (memory_view_to_target_port,
operation_id,
memory_view_to_port1, memory_view_to_port2, ...)
with target_port being the port which has to be calculated and port1
and port2 being the other/source ports which **don't** have to be
calculated with this routine! These source ports **must be given**
when the routine is called.
Parameters:
-----------
port : string
Port name of the port which shall be calculated (target port).
"""
# get port index slice of target port to create memory view:
trgt_idx = self._get_topo_cond(port)
# if part is the starting point of a net (this part as a pump is ALWAYS
# the starting point of a primary flow net!) OR this part is hitting
# itself again in the topology (circular net):
if parent_port is None:
# for pumps there is no operation id, since they will always
# be the parent part of the whole net and will thus define the nets
# massflow, won't have another parent part and won't need any
# operation routine!
pass
elif self.name == kwargs['parent_pump']:
return ()
# pump not at start of net:
else:
# this will only raise an error and then make the topology analyzer
# break:
err_str = (
'Pump ' + self.name + ' was added to a flow network '
'where another pump is already existing. There must '
'not be two pumps in the same flow network!'
)
raise TypeError(err_str)
# set all to fully solved since Pump only has one massflow cell
# self._solved_ports = self.port_names[:]
# self._cnt_open_prts = 0
# # this stays always False for Pump!
# self.break_topology = False
# # remove part from hydr_comps if completely solved:
# if self._cnt_open_prts == 0:
# self._models._hydr_comps.remove(self.name)
# update solved ports list and counter stop break:
self._solved_ports.append(port)
self._cnt_open_prts = self.port_num - len(self._solved_ports)
# this stays always False for 3w Valve!
self.break_topology = False
# remove part from hydr_comps if completely solved:
if self._cnt_open_prts == 0:
self._models._hydr_comps.remove(self.name)
# save topology parameters to dict for easy information lookup:
net = 'Subnet' if subnet else 'Flownet'
operation_routine = 'Primary massflow defining part of flow net'
parent_part = self.name
# add port dict for current port and fill it:
if port not in self.info_topology:
self.info_topology[port] = dict()
self.info_topology[port].update(
{
'Net': net,
'Massflow': self._dm_io.reshape(-1)[trgt_idx],
'Calculation routine': operation_routine,
'Source part': parent_part,
'Source port(s)': 'No source ports',
'Connected part': (
self._models.port_links[self.name + ';' + port].split(';')[
0
]
),
'Connected port': (
self._models.port_links[self.name + ';' + port].split(';')[
1
]
),
'Parent pump/part': kwargs['parent_pump'],
'Pump side': kwargs['pump_side'],
}
)
def solve(self, timestep):
"""
Solves the Pump model. That means getting values from connected ports,
inverting them to pass them on without changing them and saving the
resulting vector to the temperature array.
"""
# self.res_dm[self.stepnum, 0] = self.dm[0]
#
# # also quite fast, like numba
# # self.T[:] = self._port_getter(self._models.ports)[::-1]
# # numba-version fast speed (57ms)
# _get_p_arr_pump(self._models.ports_all, self._port_link_idx, self.T)
# self.res[self.stepnum] = self.T
# medium speed (about 100ms)
# self.T[:] = self._port_arrgetter(self._models.ports_all)[::-1]
# by far the slowest (2x time compared to numba)
# self.T = self._models.ports_all[self._models.plinks_arr[self.port_ids]][::-1]
_pf.solve_pump(
ports_all=self._models.ports_all,
port_link_idx=self._port_link_idx,
T=self.T,
res=self.res,
res_dm=self.res_dm,
dm=self.dm,
stepnum=self.stepnum,
)
def _process_cv(self, ctrl_inst):
self.dm[:] = (
ctrl_inst.cv
if self._llim <= ctrl_inst.cv <= self._ulim
else self._ulim
if ctrl_inst.cv > self._ulim
else self._llim
)
def draw_part(self, axis, timestep, draw, animate=False):
"""
Draws the current part in the plot environment, using vector
transformation to rotate the part drawing.
"""
# get part start and end position from plot info dict:
pos_start = self.info_plot['path'][0]['start_coordinates']
pos_end = self.info_plot['path'][0]['end_coordinates']
# get direction vector from info dict:
vec_dir = self.info_plot['path'][0]['vector']
# get part rotation angle from the drawing direction vector (vector
# from part start to part end in drawing):
rot_angle = self._models._angle_to_x_axis(vec_dir)
# get length of part:
vec_len = np.sqrt((vec_dir * vec_dir).sum())
# vector to line-circle intersection at top and bottom of the drawing
# (not rotated):
vec_top = np.array([vec_len / 2, vec_len / 2])
vec_bot = np.array([vec_len / 2, -vec_len / 2])
# rotate these vectors:
vec_top = self._models._rotate_vector(vec_top, rot_angle)
vec_bot = self._models._rotate_vector(vec_bot, rot_angle)
# construct top and bottom points:
pos_top = pos_start + vec_top
pos_bot = pos_start + vec_bot
# construct x- and y-grid for lines (from top point to end point to bot
# point):
x_grid = np.array([pos_top[0], pos_end[0], pos_bot[0]])
y_grid = np.array([pos_top[1], pos_end[1], pos_bot[1]])
# only draw if true
if draw:
# construct circle around midpoint of start and pos:
circ = _plt.Circle(
tuple((pos_start + np.asarray(pos_end)) / 2),
radius=np.sqrt((vec_dir * vec_dir).sum()) / 2,
facecolor='None',
edgecolor=[0, 0, 0],
linewidth=self.info_plot['path_linewidth'],
zorder=5,
animated=animate,
)
# add circle to plot
axis.add_patch(circ)
# add lines to plot
axis.plot(
x_grid,
y_grid,
color=[0, 0, 0],
linewidth=self.info_plot['path_linewidth'],
zorder=5,
animated=animate,
)
# construct name and massflow string:
txt = (r'{0} \n$\dot{{m}} = $ {1:.3f} $\,$kg/s').format(
self.name, np.round(self.res_dm[timestep][0], 3)
)
# construct name and massflow string constructor for animation
if animate:
txt_constr = (
self.name + r'\n$\dot{{m}} = $' + r'{0:6.3f}$\,$kg/s'
)
# get view to part of array where to get text from:
arr_view = self.res_dm[:, 0:1]
# get offset vector depending on rotation of pump to deal with
# none-quadratic form of textbox to avoid overlapping. only in the
# range of +/-45° of pos. and neg. x-axis an offset vec length of
# -20 is allowed, else -30:
offset = (
-20
if (
0 <= rot_angle <= 45 / 180 * np.pi
or 135 / 180 * np.pi <= rot_angle <= 225 / 180 * np.pi
or rot_angle >= 315 / 180 * np.pi
)
else -40
)
# get text offset from bottom point of pump by vector rotation:
txt_offset = tuple(
self._models._rotate_vector(np.array([0, offset]), rot_angle)
)
ann = axis.annotate(
txt,
xy=(pos_bot),
xytext=txt_offset,
textcoords='offset points',
ha='center',
va='center',
animated=animate,
)
if animate:
ann.set_animated(True)
return [[ann, txt_constr, arr_view]]
```
#### File: multisim/_precompiled/dimensionless_numbers.py
```python
import numba as nb
# from numba import jit, njit, float64, int32
import numpy as np
nb.NUMBA_DISABLE_JIT = 0
GLOB_NOGIL = True
GLOB_PARALLEL = True
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_Re_water(v, L, ny, Re):
"""
Calculate Reynolds number. Result is written to referenced variable Re.
Parameters
----------
v : TYPE
DESCRIPTION.
L : TYPE
DESCRIPTION.
ny : TYPE
DESCRIPTION.
Re : TYPE
DESCRIPTION.
Returns
-------
None.
"""
Re[:] = np.abs(v) * L / ny
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def Re_water_return(v, L, ny):
"""
Calculate Reynolds number. Result is returned.
Parameters
----------
v : TYPE
DESCRIPTION.
L : TYPE
DESCRIPTION.
ny : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
return np.abs(v) * L / ny
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def rayleigh_number(T_s, T_inf, Pr, ny, Kelvin, flow_length):
"""
Calculate the Rayleigh number for the given parameters [1]_.
Parameters:
-----------
T_s : float, int, np.ndarray
Surface temperature in [°C] or [K].
T_inf : float, int, np.ndarray
Surrounding fluid temperature in [°C] or [K].
Pr : float, int, np.ndarray
Prandtl number of the surrounding fluid at the mean temperature:
$$(T_s + T_{inf}) / 2$$
For (dry) air this can be set to a constant value of ca.:
$$Pr = 0.708$$
ny : float, int, np.ndarray
Kinematic viscosity in [m^2 / s] of the surrounding fluid at the mean
temperature: $$(T_s + T_{inf}) / 2$$
Kelvin : float, int
If temperatures `T_s` and `T_inf` are given in [°C], Kelvin has to be
set to `Kelvin=273.15`. If `T_s` and `T_inf` are given in [K], Kelvin
has to be set to `Kelvin=0`
flow_length : float, int
Specific flow length in [m]. Has to be calculated depending on the part
geometry. See function calc_flow_length() for further information.
Notes:
------
.. [1] VDI Wärmeatlas 2013, VDI-Gesellschaft Verfahrenstechnik und
Chemieingenieurwesen, Düsseldorf, Deutschland, p. 754
"""
# Rayleigh number according to VDI Wärmeatlas 2013 chapter F1
# eq (7), replacing kappa with kappa = ny/Pr (F1 eq (8)) and beta
# with 1/T_inf (F1 eq (2)):
return (
np.abs(T_s - T_inf)
* 9.81
* flow_length ** 3
* Pr
/ ((T_inf + Kelvin) * ny ** 2)
)
```
#### File: multisim/_precompiled/precomp_funs.py
```python
import numba as nb
from numba import jit, njit, float64, int32
import numpy as np
nb.NUMBA_DISABLE_JIT = 0
GLOB_NOGIL = True
GLOB_PARALLEL = True
# %% Simulation environment processing
# f.i. port and part flow processing, material properties etc..
# %%% Simulation Env. port updating
@jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def upd_p_arr(ports_all, port_ids, values, _port_own_idx):
"""
Updates the array which stores the values of all ports. This only updates
the port values of a single part per call!
"""
# get values from part result arrays at ports and pass them to the model
# environment ports array (using flattened array is about 6% slower than
# direct indexing, but allows 1D and 2D indexing):
for i in range(_port_own_idx.shape[0]):
ports_all[port_ids[i]] = values.flat[_port_own_idx[i]]
@njit(nogil=GLOB_NOGIL, cache=True)
def _upddate_ports_interm(ports_all, trgt_indices, ports_src, source=0):
"""
This function updates the array which stores the port values of all parts
with the intermediate result values of the current step stored in
`ports_src`. If more than one intermediate step is calculated during the
solver run, these can be update by passing the number of the intermediate
result to `source=X`, where X is an integer value starting with 0 for the
first intermediate step.
"""
values = ports_src[source]
i = 0
for val in values:
ports_all[trgt_indices[i]] = val[0]
i += 1
@njit(nogil=GLOB_NOGIL, cache=True)
def _upddate_ports_result(
ports_all, trgt_indices, ports_src, stepnum, src_list
):
"""
This function updates the array which stores the port values of all parts
with the final result values of the current step stored in `ports_src`.
"""
i = 0
for val in ports_src:
ports_all[trgt_indices[i]] = val[
(stepnum * src_list[i][0]) + src_list[i][1]
]
i += 1
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _port_values_to_idx(ports_all, port_link_idx, port_own_idx, out):
"""
Values of requested ports are saved to a non-contiguous array (port values
are only stored at specific locations).
"""
for i in range(port_link_idx.size):
out.flat[port_own_idx[i]] = ports_all[port_link_idx[i]]
@nb.njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _port_values_to_cont(ports_all, port_link_idx, out):
"""
Values of requested ports are saved to a contiguous array.
"""
for i in range(port_link_idx.size):
out.flat[i] = ports_all[port_link_idx[i]]
# %%% Simulation Env. in-part flow processing:
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_flow_invar(
process_flows, dm_io, dm_top, dm_bot, dm_port, stepnum, res_dm
):
"""
Process massflows.
Massflows are being processed for parts where the massflow is defined as
invariant.
"""
if process_flows[0]:
if dm_io[0] >= 0.0: # massflow from the top
# get massflow though top cell-cell border:
dm_top[1:] = dm_io[0]
dm_bot[:-1] = 0.0 # set massflow from the bottom to zero
else: # massflow from the bottom:
# get massflow though bottom cell-cell border (and *-1!):
dm_bot[:-1] = -dm_io[0]
dm_top[1:] = 0.0 # set massflow from the top to zero
# get ports massflow (only for the positive inflow):
if dm_io[0] >= 0.0:
dm_port[0] = dm_io[0]
dm_port[-1] = 0.0
else:
dm_port[-1] = -dm_io[0]
dm_port[0] = 0.0
res_dm[stepnum[0]] = dm_io[0]
# return process flows bool to disable processing flows until next step
return False
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_flow_invar_fast(
process_flows, dm_io, dm_top, dm_bot, stepnum, res_dm
):
"""
Process massflows for parts with invariant flows.
Massflows are being processed for parts where the massflow is defined as
invariant.
"""
if process_flows[0]:
# preallocate arrays which need assignment:
dm_port = np.empty(2)
if dm_io[0] >= 0.0: # massflow from the top
# get massflow though top cell-cell border:
dm_top[1:] = dm_io[0]
dm_bot[:-1] = 0.0 # set massflow from the bottom to zero
else: # massflow from the bottom:
# get massflow though bottom cell-cell border (and *-1!):
dm_bot[:-1] = -dm_io[0]
dm_top[1:] = 0.0 # set massflow from the top to zero
# get ports massflow (only for the positive inflow):
if dm_io[0] >= 0.0:
dm_port[0] = dm_io[0]
dm_port[-1] = 0.0
else:
dm_port[-1] = -dm_io[0]
dm_port[0] = 0.0
res_dm[stepnum[0]] = dm_io[0]
# return process flows bool to disable processing flows until next step
return False, dm_port
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_flow_var(
process_flows,
dm_io,
dm,
dm_top,
dm_bot,
dm_port,
port_own_idx,
stepnum,
res_dm,
):
"""
Process massflows for parts with variant flows.
Massflows are being processed for parts where the massflow is defined as
variant.
"""
if process_flows[0]:
# massflow through ports is aquired by update_FlowNet
# get massflow through each cell (sum up in/out dm of ports and then
# run cumulative sum over all cells)
# copy I/O flows to NOT alter the I/O array during calculations:
# this is the procedure for collapsed and flattened dm_io.
dm[:] = 0.0
cs = np.cumsum(dm_io)
for i in range(port_own_idx.size - 1):
dm[port_own_idx[i] : port_own_idx[i + 1]] += cs[i]
# get port values
dm_port[:] = dm_io
# get massflow though top cell-cell border:
dm_top[1:] = dm[:-1]
# get massflow though bottom cell-cell border (and *-1!):
dm_bot[:-1] = -dm[:-1]
# remove negative values:
dm_top[dm_top < 0] = 0.0
dm_bot[dm_bot < 0] = 0.0
dp = dm_port.ravel() # flattened view to ports for 2D indexing
dp[dp < 0.0] = 0.0
# set last value of dm to be the same as the value of the previous cell
# to avoid having 0-massflow in it due to cumsum:
dm[-1] = dm[-2]
res_dm[stepnum[0]] = dm
# return process flows bool to disable processing flows until next step
return False
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_flow_multi_flow(
process_flows, dm_io, dm_top, dm_bot, dm_port, stepnum, res_dm
):
"""
Process masssflows for parts with multiple flow channels.
Massflows are being processed for parts which have multiple separated flow
channels. The massflow in each flow channel must be invariant.
The massflow through ports in `dm_io` is aquired by update_FlowNet.
"""
# if flows were not yet processed in this timestep
if process_flows[0]:
# loop over channels and get each channel's massflow
for i in range(dm_io.size):
if dm_io[i] >= 0.0: # massflow from in port (top)
# get massflow though top cell-cell border:
dm_top[1:, i] = dm_io[i]
dm_bot[:-1, i] = 0.0 # set massflow from the bottom to zero
# set port massflows. dm_port has 2 cells per flow channel,
# first is in, second is out. Thus if flow from in port, set
# flow to in and out to 0.
dm_port[i * 2] = dm_io[i]
dm_port[i * 2 + 1] = 0.0
else: # massflow from out port (bottom):
# get massflow though bottom cell-cell border (and *-1!):
dm_bot[:-1, i] = -dm_io[i] # -1 makes this a pos. massflow!
dm_top[1:, i] = 0.0 # set massflow from the top to zero
# set port massflows. dm_port has 2 cells per flow channel,
# first is in, second is out. Thus if flow from out port, set
# flow to out (make it positive!) and in to 0.
dm_port[i * 2] = 0.0
dm_port[i * 2 + 1] = -dm_io[i] # dm port is ALWAYS positive!
# set current steps flow to result
res_dm[stepnum[0]] = dm_io
# return process flows bool to disable processing flows until next step
return False
# %%% Simulation Env. in-part port temperatures processing:
@nb.njit(cache=True, nogil=GLOB_NOGIL)
def _process_ports_collapsed(
ports_all,
port_link_idx,
port_own_idx,
T,
mcp,
UA_port,
UA_port_wll,
A_p_fld_mean,
port_gsp,
grid_spacing,
lam_T,
cp_port,
lam_port_fld,
T_port,
):
"""
Values of requested ports are saved to results array. Only works for parts
which use collapsed port arrays.
"""
dT_cond_port = np.zeros(port_own_idx.shape)
for i in range(port_link_idx.size):
p_val = ports_all[port_link_idx[i]]
idx = port_own_idx[i]
# collapsed arrays only take index i:
T_port.flat[i] = p_val
cp_port.flat[i] = cp_water(p_val)
lam_port_fld.flat[i] = lambda_water(p_val)
# lam_fld_own_p[i] =
# get total port heat conduction:
UA_port.flat[i] = (
A_p_fld_mean[i]
/ (
+(port_gsp[i] / (2 * lam_port_fld[i]))
+ (grid_spacing / (2 * lam_T.flat[idx]))
)
+ UA_port_wll[i]
)
dT_cond_port.flat[i] = (
UA_port.flat[i] * (p_val - T.flat[idx]) / mcp.flat[idx]
)
return dT_cond_port
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_ports(
ports_all,
port_link_idx,
port_own_idx,
T,
mcp,
UA_port,
UA_port_wll,
A_p_fld_mean,
port_gsp,
grid_spacing,
lam_T,
cp_port,
lam_port_fld,
T_port,
):
"""
Values of requested ports are saved to results array.
"""
dT_cond_port = np.zeros(port_own_idx.shape)
for i in range(port_link_idx.size):
p_val = ports_all[port_link_idx[i]]
idx = port_own_idx[i]
T_port.flat[idx] = p_val
cp_port.flat[idx] = cp_water(p_val)
# collapsed arrays only take index i:
lam_port_fld.flat[idx] = lambda_water(p_val)
# lam_fld_own_p[i] =
# get total port heat conduction:
UA_port.flat[idx] = (
A_p_fld_mean.flat[idx]
/ (
+(port_gsp.flat[idx] / (2 * lam_port_fld.flat[idx]))
+ (grid_spacing / (2 * lam_T.flat[idx]))
)
+ UA_port_wll.flat[idx]
)
dT_cond_port.flat[i] = UA_port.flat[idx] * (p_val - T[idx]) / mcp[idx]
return dT_cond_port
# %%% Simulation Env. in-part material properties processing:
@njit(nogil=GLOB_NOGIL, cache=True)
def water_mat_props_ext_view(T_ext, cp_T, lam_T, rho_T, ny_T):
"""
Get the relevant temperature dependent material properties of water for
parts which use the extended array format:
cp: Specific heat capacity in [J/(kg K)]
lam: Heat conductivity in [W/(m K)]
rho: Density in [kg/m^3]
ny: Kinematic viscosity in [Pa/m^2]
"""
get_cp_water(T_ext, cp_T) # extended array for top/bot views in adv.
get_lambda_water(T_ext[1:-1], lam_T) # non-ext. array for other mat.
get_rho_water(T_ext[1:-1], rho_T) # props. since no views needed here
get_ny_water(T_ext[1:-1], ny_T)
@njit(nogil=GLOB_NOGIL, cache=True)
def water_mat_props_ext(T_ext):
"""
Get the relevant temperature dependent material properties of water for
parts which use the extended array format:
cp: Specific heat capacity in [J/(kg K)]
lam: Heat conductivity in [W/(m K)]
rho: Density in [kg/m^3]
ny: Kinematic viscosity in [Pa/m^2]
"""
# cp: extended array for top/bot views in adv.
# non-ext. array for other mat. props. since no views needed here
return (
cp_water(T_ext),
lambda_water(T_ext[1:-1]),
rho_water(T_ext[1:-1]),
ny_water(T_ext[1:-1]),
)
@njit(nogil=GLOB_NOGIL, cache=True)
def water_mat_props(T, cp_T, lam_T, rho_T, ny_T):
"""
Get the relevant temperature dependent material properties of water:
cp: Specific heat capacity in [J/(kg K)]
lam: Heat conductivity in [W/(m K)]
rho: Density in [kg/m^3]
ny: Kinematic viscosity in [Pa/m^2]
"""
get_cp_water(T, cp_T)
get_lambda_water(T, lam_T) # non-ext. array for mat.
get_rho_water(T, rho_T) # props. since no views needed here
get_ny_water(T, ny_T)
@njit(nogil=GLOB_NOGIL, cache=True)
def cell_temp_props_ext(T_ext, V_cell, cp_T, rho_T, mcp_wll, rhocp, mcp, ui):
"""
Calculate the each cells specific temperature dependent properties for
parts which use the extended array format:
rho*cp: Volume specific heat capacity in [J / (K m^3)]
m*cp: heat capacity (of fluid AND wall) in [J / K]
u_i: mass specific inner energy in [J / kg]
"""
# precalculate values which are needed multiple times:
# volume specific heat capacity:
rhocp[:] = rho_T * cp_T[1:-1]
# heat capacity of fluid AND wall:
mcp[:] = V_cell * rhocp + mcp_wll
# mass specific inner energy:
ui[:] = cp_T[1:-1] * T_ext[1:-1]
@njit(nogil=GLOB_NOGIL, cache=True)
def cell_temp_props_fld(
T_ext_fld, V_cell, cp_T, rho_T, rhocp_fld, mcp_fld, ui_fld
):
"""
Calculate the each cells fluid specific temperature dependent properties
for parts which use the extended array format:
rho*cp: Volume specific heat capacity in [J / (K m^3)]
m*cp: heat capacity (of fluid) in [J / K]
u_i: mass specific inner energy in [J / kg]
"""
# precalculate values which are needed multiple times:
# volume specific heat capacity:
rhocp_fld[:] = rho_T * cp_T[1:-1]
# heat capacity of fluid AND wall:
mcp_fld[:] = V_cell * rhocp_fld
# mass specific inner energy:
ui_fld[:] = cp_T[1:-1] * T_ext_fld[1:-1]
@njit(nogil=GLOB_NOGIL, cache=True)
def specific_inner_energy_wll(T_wll, cp_wll, ui):
"""
Calculate the each cells specific temperature dependent properties for
parts which use the extended array format:
rho*cp: Volume specific heat capacity in [J / (K m^3)]
m*cp: heat capacity (of fluid) in [J / K]
u_i: mass specific inner energy in [J / kg]
"""
# mass specific inner energy:
ui[:] = cp_wll * T_wll
@njit(nogil=GLOB_NOGIL, cache=True)
def cell_temp_props(T, V_cell, cp_T, rho_T, mcp_wll, rhocp, mcp, ui):
"""
Calculate the each cells specific temperature dependent properties:
rho*cp: Volume specific heat capacity in [J / (K m^3)]
m*cp: heat capacity (of fluid AND wall) in [J / K]
u_i: mass specific inner energy in [J / kg]
"""
# precalculate values which are needed multiple times:
# volume specific heat capacity:
rhocp[:] = rho_T * cp_T
# heat capacity of fluid AND wall:
mcp[:] = V_cell * rhocp + mcp_wll
# mass specific inner energy:
ui[:] = cp_T * T
@njit(nogil=GLOB_NOGIL, cache=True)
def _lambda_mean_view(lam_T, out):
"""
Get mean lambda of two neighbouring cells for the first axis of an
n-dimensional grid.
This is **NOT** the arithmetic mean, since the mean value of two heat
conductivities in series circuit is calculated by adding the inverse of
the heat conductivities.
For example for two heat conductivites `lam_1=40` and `lam_2=80`, each over
a length of `L=0.2`, the mean value is:
eq:: $$lam_{mean} = 2*L / (L/lam_1 + L/lam_2) = 2 / (1/lam_1 + 1/lam_2)$$
where the second right hand side of the equation is only true for
equally spaced grids.
"""
out[:] = 2 * lam_T[:-1] * lam_T[1:] / (lam_T[:-1] + lam_T[1:])
@njit(nogil=GLOB_NOGIL, cache=True)
def _lambda_mean(lam_T):
"""
Get mean lambda of two neighbouring cells for the first axis of an
n-dimensional grid.
This is **NOT** the arithmetic mean, since the mean value of two heat
conductivities in series circuit is calculated by adding the inverse of
the heat conductivities.
For example for two heat conductivites `lam_1=40` and `lam_2=80`, each over
a length of `L=0.2`, the mean value is:
eq:: $$lam_{mean} = 2*L / (L/lam_1 + L/lam_2) = 2 / (1/lam_1 + 1/lam_2)$$
where the second right hand side of the equation is only true for
equally spaced grids.
"""
return 2 * lam_T[:-1] * lam_T[1:] / (lam_T[:-1] + lam_T[1:])
# %% U*A values calculation:
@njit(nogil=GLOB_NOGIL, cache=True)
def UA_plate_tb(A_cell, grid_spacing, lam_mean, UA_tb_wll, out):
"""
Get the UA-value for plate-like geometry, for example in a pipe or TES,
between neighboring cells.
"""
# get UA value between cells. UA value of walls added (parallel circuit).
# UA is extended array to enable using views for calculation:
out[1:-1] = A_cell / grid_spacing * lam_mean + UA_tb_wll
@njit(nogil=GLOB_NOGIL, cache=True)
def UA_plate_tb_fld(A_cell, grid_spacing, lam_mean, out):
"""
Get the UA-value for plate-like geometry, for example in a pipe or TES,
between neighboring cells.
"""
# get UA value between cells. UA value of walls added (parallel circuit).
# UA is extended array to enable using views for calculation:
out[1:-1] = A_cell / grid_spacing * lam_mean
@njit(nogil=GLOB_NOGIL, cache=True)
def UA_plate_tb_wll(UA_tb_wll, out):
"""
Get the UA-value for plate-like geometry, for example in a pipe or TES,
between neighboring cells.
"""
# get UA value between cells. UA value of walls added (parallel circuit).
# UA is extended array to enable using views for calculation:
out[1:-1] = UA_tb_wll
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def buoyancy_byNusselt(T, ny, d_i, lam_mean):
"""
Calculate the buoyancy driven heat flow by Nusselt approximation.
Calculate the buoyancy driven heat flow inside a vertically stratified
thermal energy storage tank by using Nusselt approximations to calculate a
correction factor for the heat conductivity.
"""
# get temperature difference for all cells (temperature below last cell
# is 0, thus don't use the last cell):
# T_diff = T_bot[:-1] - T[:-1] # replaced with stencil operation below:
T_diff = T[1:] - T[:-1]
# if there is no temperature inversion, skip this function:
if np.all(T_diff <= 0):
return
# only use the positive difference (inverted cells):
T_diff[T_diff < 0] = 0
# buoyancy correction factor to get the buoyant flow from fluid-fluid
# instead of a solid-fluid horizontal plate:
corr_f = 20
# preallocate arrays:
shape = T.shape[0] - 1
Nu = np.zeros(shape)
# free convection over a horizontal plate, VDI F2 3.1:
# get material properties for all bottom cells:
Pr = Pr_water_return(T[1:])
beta = beta_water_return(T[1:])
# to deal with the minimum in water density at 4°C, just set negative
# values to pos.
beta[beta < 0] *= -1
# get characteristic length:
L = d_i / 4
# get Rayleigh number
Ra = Pr * 9.81 * L ** 3 * beta * T_diff / ny[1:] ** 2
# get Rayleigh number with Prandtl function, VDI F2 eq (9):
Ra_f2 = Ra * (1 + (0.322 / Pr) ** (11 / 20)) ** (-20 / 11)
# get bool index for laminar or turbulent convection:
turb = Ra_f2 > 7e4
# get Nusselt number, following VDI Wärmeatlas 2013 F2 eq (7) and (8):
Nu[~turb] = 0.766 * (Ra_f2[~turb]) ** 0.2
Nu[turb] = 0.15 * (Ra_f2[turb]) ** (1 / 3)
# get bool index for Nusselt number > 1 to ignore lower values
Nu_idx = Nu >= 1
# multiplicate lambda value between cells with the Nusselt number. The
# calculation of the alpha value is implemented in the calculation of
# the UA value.
lam_mean[Nu_idx] *= Nu[Nu_idx] * corr_f
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def buoyancy_AixLib(T, cp, rho, ny, grid_spacing, lam_mean):
"""
Calculate the buoyancy driven heat flow by conductivity plus.
Calculate the buoyancy driven heat flow inside a vertically stratified
thermal energy storage tank by using AixLib based epmirical relations for
an additional heat conductivity [1]_.
Sources:
[1] : https://github.com/RWTH-EBC/AixLib/blob/master/AixLib/Fluid/Storage/BaseClasses/Bouyancy.mo
"""
# get temperature difference for all cells (temperature below last cell
# is 0, thus don't use the last cell):
# T_diff = T_bot[:-1] - T[:-1] # replaced with stencil operation below:
T_diff = T[1:] - T[:-1]
# if there is no temperature inversion, skip this function:
if np.all(T_diff <= 0):
return
# only use the positive difference (inverted cells):
T_diff[T_diff < 0] = 0
# kappa is assumed to be constant at 0.4, g at 9.81
kappa = 0.4
g = 9.81
# get material properties for all bottom cells:
beta = beta_water_return(T[1:])
# to deal with the minimum in water density at 4°C, just set negative
# values to pos.
beta[beta < 0] *= -1
# calculate lambda surplus due to buoyancy
lambda_plus = (
2
/ 3
* rho
* cp
* kappa
* grid_spacing ** 2
* np.sqrt(np.abs(-g * beta * T_diff / grid_spacing))
)
# add up to lambda mean
lam_mean += lambda_plus
# %% Simulation Env. in-part von Neumann stability calculation:
@njit(nogil=GLOB_NOGIL, cache=True)
def _vonNeumann_stability_invar(
part_id,
stability_breaches,
UA_tb,
UA_port,
UA_amb_shell,
dm_io,
rho_T,
rhocp,
grid_spacing,
port_subs_gsp,
A_cell,
A_port,
A_shell, # areas to backcalc diffusivity from UA
r_total,
V_cell,
step_stable, # check_vN, , # system wide bools
vN_max_step,
max_factor,
stepnum,
timestep, # system wide vars
):
r"""
Check for L2/von Neumann stability for diffusion and massflows.
Massflows are checked for parts where the massflow is defined NOT
invariant, that means where all cells in the part share the same massflow!
Notes
-----
Von Neumann stability for conduction:
.. math::
r = \frac{\alpha \Delta t}{(\Delta x)^2} \leq \frac{1}{2} \\
\text{With the thermal diffusivity: } \alpha = \frac{
\lambda}{\rho c_{p}}\\
\text{and } \lambda = \frac{U\cdot A}{A} \cdot \Delta x \\
\text{yields } r = \frac{(UA)}{\rho c_{p}} \frac{\Delta t}{A \Delta x}
Von Neumann stability for advection:
"""
# save von Neumann stability values for cells by multiplying the cells
# relevant total x-gridspacing with the maximum UA-value (this gives a
# substitue heat conduction to get a total diffusion coefficient) and
# the inverse maximum rho*cp value (of all cells! this may result in a
# worst-case-result with a security factor of up to about 4.2%) to get
# the substitute diffusion coefficient and then mult. with step and
# div. by gridspacing**2 (not **2 since this is cut out with mult. by
# it to get substitute diffusion from UA) and save to array:
vN_diff = np.empty(3)
# rhocpmax = rhocp.max()
# For calculation see docstring
# replaced old and faulty calculations with missing Area
# vN_diff[0] = (UA_tb.max() / rhocpmax) * timestep / grid_spacing
vN_diff[0] = (
np.max(UA_tb[1:-1] / rhocp[1:]) * timestep / (A_cell * grid_spacing)
)
# for the next two with non-constant gridspacing, find max of UA/gsp:
# vN_diff[1] = (UA_port / port_subs_gsp).max() / rhocpmax * timestep
vN_diff[1] = (
np.max(UA_port / (A_port * port_subs_gsp)) * timestep / rhocp.max()
)
# vN_diff[2] = UA_amb_shell.max() / r_total / rhocpmax * timestep
vN_diff[2] = np.max(UA_amb_shell / rhocp) * timestep / (A_shell * r_total)
# get maximum:
vN_diff_max = vN_diff.max()
# for massflow:
# get maximum cfl number (this is the von Neumann stability condition
# for massflow through cells), again with total max. of rho to get a
# small security factor for worst case:
Vcellrhomax = V_cell * rho_T.max()
vN_dm_max = np.abs(dm_io).max() * timestep / Vcellrhomax
# get maximum von Neumann stability condition values:
# get dividers for maximum stable timestep to increase or decrease
# stepsize:
vN_diff_mult = vN_diff_max / 0.5
vN_dm_mult = vN_dm_max / 1
# get biggest divider:
vN_div_max = max(vN_diff_mult, vN_dm_mult)
# check if any L2 stability conditions are violated:
if vN_div_max > 1:
# only do something if von Neumann checking is active, else just
# print an error but go on with the calculation:
# if check_vN:
if True:
# if not stable, set stable step bool to False
step_stable[0] = False
stability_breaches += 1 # increase breaches counter for this part
# calculate required timestep to make this part stable with a
# security factor of 0.95:
local_vN_max_step = timestep / vN_div_max * 0.95
# if this is the smallest step of all parts needed to make all
# parts stable save it to maximum von Neumann step:
if vN_max_step[0] > local_vN_max_step:
vN_max_step[0] = local_vN_max_step
# # increase error weight of this part by the factor of 1.1 to
# # avoid running into this error again:
# self._trnc_err_cell_weight *= 1.1 # NOT good
# adjust max factor if vN was violated:
if max_factor[0] > 1.05:
max_factor[0] = max_factor[0] ** 0.99
if max_factor[0] < 1.05: # clip to 1.05
max_factor[0] = 1.05
else:
print(
'\nVon Neumann stability violated at step',
stepnum,
'and part with id',
part_id,
'!',
)
raise ValueError
# return new values (or old values if unchanged):
return step_stable, vN_max_step, max_factor
@njit(nogil=GLOB_NOGIL, cache=True)
def _vonNeumann_stability_invar_hexnum(
part_id,
stability_breaches,
UA_dim1,
UA_dim2,
UA_port,
dm_io,
rho_T,
rhocp,
grid_spacing,
port_subs_gsp,
A_channel,
A_plate_eff,
A_port,
V_cell,
step_stable, # check_vN, , # system wide bools
vN_max_step,
max_factor,
stepnum,
timestep, # system wide vars
):
r"""
Check for L2/von Neumann stability for diffusion and massflows.
Special method for numeric Heat Exchanger calculation with two-dimensional
heat flow and two seeparated flow regimes.
Notes
-----
Von Neumann stability for conduction:
.. math::
r = \frac{\alpha \Delta t}{(\Delta x)^2} \leq \frac{1}{2} \\
\text{With the thermal diffusivity: } \alpha = \frac{
\lambda}{\rho c_{p}}\\
\text{and } \lambda = \frac{U\cdot A}{A} \cdot \Delta x \\
\text{yields } r = \frac{(UA)}{\rho c_{p}} \frac{\Delta t}{A \Delta x}
Von Neumann stability for advection:
"""
# save von Neumann stability values for cells by multiplying the cells
# relevant total x-gridspacing with the maximum UA-value (this gives a
# substitue heat conduction to get a total diffusion coefficient) and
# the inverse maximum rho*cp value (of all cells! this may result in a
# worst-case-result with a security factor of up to about 4.2%) to get
# the substitute diffusion coefficient and then mult. with step and
# div. by gridspacing**2 (not **2 since this is cut out with mult. by
# it to get substitute diffusion from UA) and save to array:
vN_diff = np.empty(3)
rhocpmax = rhocp.max()
# heat conduction in flow direction:
vN_diff[0] = ( # intermediate solution with Area but not detailed max.
(UA_dim1.max() / rhocpmax) * timestep / (A_channel * grid_spacing)
)
# old version without area
# vN_diff[0] = (UA_dim1.max() / rhocpmax) * timestep / grid_spacing
# new version with detailed checks, but not validated yet, thus
# replaced with the intermediate solution
# vN_diff[0] = (
# np.max(UA_dim1[1:-1] / rhocp[1:])
# * timestep / (A_channel * grid_spacing))
# heat conduction perpendicular to flow direction (fluid-plate-fuild):
vN_diff[1] = (
(UA_dim2.max() / rhocpmax) * timestep / (A_plate_eff * grid_spacing)
)
# vN_diff[1] = (UA_dim2.max() / rhocpmax) * timestep / grid_spacing
# vN_diff[1] = (
# np.max(UA_dim2[1:-1] / rhocp[1:])
# * timestep / (A_plate_eff * grid_spacing))
# for the next two with non-constant gridspacing, find max of UA/gsp:
vN_diff[2] = (
(UA_port / (A_port * port_subs_gsp)).max() / rhocpmax * timestep
)
# vN_diff[2] = (UA_port / port_subs_gsp).max() / rhocpmax * timestep
# vN_diff[2] = (
# np.max(UA_port / (A_port * port_subs_gsp)) * timestep / rhocp.max())
# get maximum:
vN_diff_max = vN_diff.max()
# for massflow:
# get maximum cfl number (this is the von Neumann stability condition
# for massflow through cells), again with total max. of rho to get a
# small security factor for worst case:
Vcellrhomax = V_cell * rho_T.max()
vN_dm_max = np.abs(dm_io).max() * timestep / Vcellrhomax
# get maximum von Neumann stability condition values:
# get dividers for maximum stable timestep to increase or decrease
# stepsize:
vN_diff_mult = vN_diff_max / 0.5
vN_dm_mult = vN_dm_max / 1
# get biggest divider:
vN_div_max = max(vN_diff_mult, vN_dm_mult)
# check if any L2 stability conditions are violated:
if vN_div_max > 1:
# only do something if von Neumann checking is active, else just
# print an error but go on with the calculation:
# if check_vN:
if True:
# if not stable, set stable step bool to False
step_stable[0] = False
stability_breaches += 1 # increase breaches counter for this part
# calculate required timestep to make this part stable with a
# security factor of 0.95:
local_vN_max_step = timestep / vN_div_max * 0.95
# if this is the smallest step of all parts needed to make all
# parts stable save it to maximum von Neumann step:
if vN_max_step[0] > local_vN_max_step:
vN_max_step[0] = local_vN_max_step
# # increase error weight of this part by the factor of 1.1 to
# # avoid running into this error again:
# self._trnc_err_cell_weight *= 1.1 # NOT good
# adjust max factor if vN was violated:
if max_factor[0] > 1.05:
max_factor[0] = max_factor[0] ** 0.99
if max_factor[0] < 1.05: # clip to 1.05
max_factor[0] = 1.05
else:
print(
'\nVon Neumann stability violated at step',
stepnum,
'and part with id',
part_id,
'!',
)
raise ValueError
# return new values (or old values if unchanged):
return step_stable, vN_max_step, max_factor
@njit(nogil=GLOB_NOGIL, cache=True)
def _vonNeumann_stability_var(
part_id,
stability_breaches,
UA_tb,
UA_port,
UA_amb_shell,
dm_top,
dm_bot,
dm_port,
rho_T,
rhocp,
grid_spacing,
port_subs_gsp,
A_cell,
A_port,
A_shell, # areas to backcalc diffusivity from UA
r_total,
V_cell,
step_stable, # check_vN, , # system wide bools
vN_max_step,
max_factor,
stepnum,
timestep, # system wide vars
):
r"""
Check for L2/von Neumann stability for diffusion and massflows.
Massflows are checked for parts where the massflow is defined as NOT
invariant, that means where all cells in the part may have different
massflow!
Notes
-----
Von Neumann stability for conduction:
.. math::
r = \frac{\alpha \Delta t}{(\Delta x)^2} \leq \frac{1}{2} \\
\text{With the thermal diffusivity: } \alpha = \frac{
\lambda}{\rho c_{p}}\\
\text{and } \lambda = \frac{U\cdot A}{A} \cdot \Delta x \\
\text{yields } r = \frac{(UA)}{\rho c_{p}} \frac{\Delta t}{A \Delta x}
Von Neumann stability for advection:
"""
# save von Neumann stability values for cells by multiplying the cells
# relevant total x-gridspacing with the maximum UA-value (this gives a
# substitue heat conduction to get a total diffusion coefficient) and
# the inverse maximum rho*cp value (of all cells! this may result in a
# worst-case-result with a security factor of up to about 4.2%) to get
# the substitute diffusion coefficient and then mult. with step and
# div. by gridspacing**2 (not **2 since this is cut out with mult. by
# it to get substitute diffusion from UA) and save to array:
vN_diff = np.empty(3)
# rhocpmax = rhocp.max()
# For calculation see docstring
# replaced old and faulty calculations with missing Area
# vN_diff[0] = (UA_tb.max() / rhocpmax) * timestep / grid_spacing
vN_diff[0] = (
np.max(UA_tb[1:-1] / rhocp[1:]) * timestep / (A_cell * grid_spacing)
)
# for the next two with non-constant gridspacing, find max of UA/gsp:
# vN_diff[1] = (UA_port / port_subs_gsp).max() / rhocpmax * timestep
vN_diff[1] = (
np.max(UA_port / (A_port * port_subs_gsp)) * timestep / rhocp.max()
)
# vN_diff[2] = UA_amb_shell.max() / r_total / rhocpmax * timestep
vN_diff[2] = np.max(UA_amb_shell / rhocp) * timestep / (A_shell * r_total)
# get maximum:
vN_diff_max = vN_diff.max()
# for massflow:
# get maximum cfl number (this is the von Neumann stability condition
# for massflow through cells), again with total max. of rho to get a
# small security factor for worst case:
Vcellrhomax = V_cell * rho_T.max()
# NO checking for dims, since error probability of only having a critical
# massflow sum at the port inflow cell and NOT at the next cell border is
# extremely low AND this calculation would require either complicated
# generated_jit functions OR keepdims support in sum! Thus just simple
# check.
# if UA_port.ndim == 1:
vN_dm_max = (
max(dm_top.max(), dm_bot.max(), np.abs(dm_port).max())
* timestep
/ Vcellrhomax
)
# else:
# vN_dm = (
# max(dm_top.max(), dm_bot.max(),
# np.abs(dm_port.sum(axis=0, keepdims=True)).max())
# * timestep / Vcellrhomax)
# get maximum von Neumann stability condition values:
# get dividers for maximum stable timestep to increase or decrease
# stepsize:
vN_diff_mult = vN_diff_max / 0.5
vN_dm_mult = vN_dm_max / 1
# get biggest divider:
vN_div_max = max(vN_diff_mult, vN_dm_mult)
# check if any L2 stability conditions are violated:
if vN_div_max > 1.0:
# only do something if von Neumann checking is active, else just
# print an error but go on with the calculation:
# if check_vN:
if True:
# if not stable, set stable step bool to False
step_stable[0] = False
stability_breaches += 1 # increase breaches counter for this part
# calculate required timestep to make this part stable with a
# security factor of 0.95:
local_vN_max_step = timestep / vN_div_max * 0.95
# if this is the smallest step of all parts needed to make all
# parts stable save it to maximum von Neumann step:
if vN_max_step[0] > local_vN_max_step:
vN_max_step[0] = local_vN_max_step
# # increase error weight of this part by the factor of 1.1 to
# # avoid running into this error again:
# self._trnc_err_cell_weight *= 1.1 # NOT good
# adjust max factor if vN was violated:
if max_factor[0] > 1.05:
max_factor[0] = max_factor[0] ** 0.99
if max_factor[0] < 1.05: # clip to 1.05
max_factor[0] = 1.05
else:
print(
'\nVon Neumann stability violated at step',
stepnum,
'and part with id',
part_id,
'!',
)
raise ValueError
# return new values (or old values if unchanged):
return step_stable, vN_max_step, max_factor
# %% Simulation Env. part specific differential functions:
@njit(nogil=GLOB_NOGIL, cache=True)
def pipe1D_diff(
T_ext,
T_port,
T_s,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld,
mcp,
rhocp,
lam_wll,
lam_ins,
mcp_wll,
ui, # material properties.
alpha_i,
alpha_inf, # alpha values
UA_tb,
UA_tb_wll,
UA_amb_shell,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_i,
cell_dist, # lengths
flow_length,
r_total,
r_ln_wll,
r_ln_ins,
r_rins, # lengths
A_cell,
V_cell,
A_shell_i,
A_shell_ins,
A_p_fld_mean, # areas and vols
process_flows,
vertical,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
timestep,
):
process_flows[0] = _process_flow_invar(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
UA_plate_tb(
A_cell=A_cell,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
UA_tb_wll=UA_tb_wll,
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=A_shell_ins,
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm_io, T_ext[1:-1], rho_T, ny_T, lam_T, A_cell, d_i, cell_dist, alpha_i
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=flow_length,
vertical=vertical,
r_total=r_total,
alpha_inf=alpha_inf,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=A_shell_i,
r_ln_wll=r_ln_wll,
r_ln_ins=r_ln_ins,
r_rins=r_rins,
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=lam_wll,
lam_ins=lam_ins,
out=UA_amb_shell,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=V_cell,
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=mcp_wll,
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_io=dm_io,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_cell=A_cell,
A_port=A_p_fld_mean,
A_shell=A_shell_ins,
r_total=r_total,
V_cell=V_cell,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by conduction
dT_cond[:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
# + UA_port * (T_port - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# calculate heat transfer by advection
dT_adv[:] = (
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond[idx] += dT_cond_port[i]
# advection
dT_adv[idx] += (
dm_port[i] * (cp_port[i] * T_port[i] - ui[idx]) / mcp[idx]
)
dT_total[:] = dT_cond + dT_adv
return dT_total
@njit(nogil=GLOB_NOGIL, cache=True)
def pipe1D_diff_fullstructarr(
T_ext,
ports_all, # temperatures
dm_io,
res_dm,
cp_T,
lam_mean,
UA_tb,
port_link_idx,
port_subs_gsp,
step_stable, # bools
vN_max_step,
max_factor,
process_flows,
vertical, # misc.
stepnum,
ra1,
ra2,
ra5,
timestep,
):
"""
This function uses as many structured arrays as possible to reduce the
time needed for calls to typeof_pyval. ra1, ra2, and ra5 are the
structured arrays. ra1 contains single floats, ra2 all values of the shape
of the port arrays and ra5 all non-extended value array sized arrays.
The current speedup over using a list of args is so small, if any, that
the easy approach if lists is preferred.
As soon as structured arrays of variable shaped sub arrays is supported,
this may become interesting.
"""
process_flows[0] = _process_flow_invar(
process_flows=process_flows,
dm_io=dm_io,
dm_top=ra5['dm_top'],
dm_bot=ra5['dm_bot'],
dm_port=ra2['dm_port'],
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext,
cp_T=cp_T,
lam_T=ra5['lam_T'],
rho_T=ra5['rho_T'],
ny_T=ra5['ny_T'],
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=ra5['lam_T'], out=lam_mean)
UA_plate_tb(
A_cell=ra1['A_cell'],
grid_spacing=ra1['grid_spacing'],
lam_mean=lam_mean,
UA_tb_wll=ra1['UA_tb_wll'],
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=ra1['T_amb'][0],
A_s=ra1['A_shell_ins'],
alpha_inf=ra5['alpha_inf'],
UA=ra5['UA_amb_shell'],
T_s=ra5['T_s'],
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm_io,
T_ext[1:-1],
ra5['rho_T'],
ra5['ny_T'],
ra5['lam_T'],
ra1['A_cell'],
ra1['d_i'],
ra5['cell_dist'],
ra5['alpha_i'],
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=ra5['T_s'],
T_inf=ra1['T_amb'][0],
flow_length=ra1['flow_length'],
vertical=vertical,
r_total=ra1['r_total'],
alpha_inf=ra5['alpha_inf'],
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=ra1['A_shell_i'],
r_ln_wll=ra1['r_ln_wll'],
r_ln_ins=ra1['r_ln_ins'],
r_rins=ra1['r_rins'],
alpha_i=ra5['alpha_i'],
alpha_inf=ra5['alpha_inf'],
lam_wll=ra1['lam_wll'],
lam_ins=ra1['lam_ins'],
out=ra5['UA_amb_shell'],
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=ra1['V_cell'],
cp_T=cp_T,
rho_T=ra5['rho_T'],
mcp_wll=ra1['mcp_wll'],
rhocp=ra5['rhocp'],
mcp=ra5['mcp'],
ui=ra5['ui'],
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=ra2['port_own_idx'],
T=T_ext[1:-1],
mcp=ra5['mcp'],
UA_port=ra2['UA_port'],
UA_port_wll=ra2['UA_port_wll'],
A_p_fld_mean=ra2['A_p_fld_mean'],
port_gsp=ra2['port_gsp'],
grid_spacing=ra1['grid_spacing'][0],
lam_T=ra5['lam_T'],
cp_port=ra2['cp_port'],
lam_port_fld=ra2['lam_port_fld'],
T_port=ra2['T_port'],
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=ra1['part_id'],
stability_breaches=ra1['stability_breaches'],
UA_tb=UA_tb,
UA_port=ra2['UA_port'],
UA_amb_shell=ra5['UA_amb_shell'],
dm_io=dm_io,
rho_T=ra5['rho_T'],
rhocp=ra5['rhocp'],
grid_spacing=ra1['grid_spacing'][0],
port_subs_gsp=port_subs_gsp,
A_cell=ra1['A_cell'],
A_port=ra2['A_p_fld_mean'],
A_shell=ra1['A_shell_ins'],
r_total=ra1['r_total'][0],
V_cell=ra1['V_cell'][0],
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by conduction
ra5['dT_cond'][:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
# + UA_port * (T_port - T_ext[1:-1])
+ ra5['UA_amb_shell'] * (ra1['T_amb'][0] - T_ext[1:-1])
) / ra5['mcp']
# dT_cond[0] += dT_cond_port[0]
# dT_cond[-1] += dT_cond_port[-1]
# calculate heat transfer by advection
ra5['dT_adv'][:] = (
(
+ra5['dm_top'] * (cp_T[:-2] * T_ext[:-2] - ra5['ui'])
+ ra5['dm_bot'] * (cp_T[2:] * T_ext[2:] - ra5['ui'])
)
# + dm_port * (cp_port * T_port - ui))
/ ra5['mcp']
)
# dT_adv[0] += dm_port[0] * (cp_port[0] * T_port[0] - ui[0]) / mcp[0]
# dT_adv[-1] += dm_port[-1] * (cp_port[-1] * T_port[-1] - ui[-1]) / mcp[-1]
# T_port and cp_port NOT collapsed
# for i in range(port_own_idx.size):
# idx = port_own_idx[i]
# dT_cond[idx] += dT_cond_port[i]
# dT_adv[idx] += (
# dm_port[idx] * (cp_port[idx] * T_port[idx] - ui[idx])
# / mcp[idx])
# all (except dm_port) collapsed:
for i in range(ra2['port_own_idx'].size):
idx = ra2['port_own_idx'][i]
ra5['dT_cond'][idx] += dT_cond_port[i]
# dT_adv[idx] += ( # dm_port like T
# dm_port[idx] * (cp_port[i] * T_port[i] - ui[idx])
# / mcp[idx])
ra5['dT_adv'][idx] += ( # dm port only 2 cells
ra2['dm_port'][i]
* (ra2['cp_port'][i] * ra2['T_port'][i] - ra5['ui'][idx])
/ ra5['mcp'][idx]
)
ra5['dT_total'][:] = ra5['dT_cond'] + ra5['dT_adv']
return ra5['dT_total']
@njit(nogil=GLOB_NOGIL, cache=True)
def pipe1D_diff_structarr(
T_ext,
T_port,
T_s,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld, # mat. props.
mcp,
rhocp,
ui, # material properties.
alpha_i,
alpha_inf, # alpha values
UA_tb,
UA_amb_shell,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
port_gsp,
port_subs_gsp,
cell_dist, # lengths
A_p_fld_mean, # areas and vols
process_flows,
step_stable, # bools
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
sar, # structarr
vertical,
part_id,
timestep,
):
process_flows[0] = _process_flow_invar(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
UA_plate_tb(
A_cell=sar['A_cell'][0],
grid_spacing=sar['grid_spacing'][0],
lam_mean=lam_mean,
UA_tb_wll=sar['UA_tb_wll'][0],
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=sar['A_shell_ins'][0],
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm_io,
T_ext[1:-1],
rho_T,
ny_T,
lam_T,
sar['A_cell'][0],
sar['d_i'][0],
cell_dist,
alpha_i,
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=sar['flow_length'][0],
vertical=vertical,
r_total=sar['r_total'][0],
alpha_inf=alpha_inf,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=sar['A_shell_i'][0],
r_ln_wll=sar['r_ln_wll'][0],
r_ln_ins=sar['r_ln_ins'][0],
r_rins=sar['r_rins'][0],
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=sar['lam_wll'][0],
lam_ins=sar['lam_ins'][0],
out=UA_amb_shell,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=sar['V_cell'][0],
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=sar['mcp_wll'][0],
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=sar['grid_spacing'][0],
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_io=dm_io,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=sar['grid_spacing'][0],
port_subs_gsp=port_subs_gsp,
A_cell=sar['A_cell'],
A_port=A_p_fld_mean,
A_shell=sar['A_shell_ins'],
r_total=sar['r_total'][0],
V_cell=sar['V_cell'][0],
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by conduction
dT_cond[:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
# + UA_port * (T_port - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# dT_cond[0] += dT_cond_port[0]
# dT_cond[-1] += dT_cond_port[-1]
# calculate heat transfer by advection
dT_adv[:] = (
(
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
)
# + dm_port * (cp_port * T_port - ui))
/ mcp
)
# dT_adv[0] += dm_port[0] * (cp_port[0] * T_port[0] - ui[0]) / mcp[0]
# dT_adv[-1] += dm_port[-1] * (cp_port[-1] * T_port[-1] - ui[-1]) / mcp[-1]
# T_port and cp_port NOT collapsed
# for i in range(port_own_idx.size):
# idx = port_own_idx[i]
# dT_cond[idx] += dT_cond_port[i]
# dT_adv[idx] += (
# dm_port[idx] * (cp_port[idx] * T_port[idx] - ui[idx])
# / mcp[idx])
# all (except dm_port) collapsed:
for i in range(port_own_idx.size):
idx = port_own_idx[i]
dT_cond[idx] += dT_cond_port[i]
# dT_adv[idx] += ( # dm_port like T
# dm_port[idx] * (cp_port[i] * T_port[i] - ui[idx])
# / mcp[idx])
dT_adv[idx] += ( # dm port only 2 cells
dm_port[i] * (cp_port[i] * T_port[i] - ui[idx]) / mcp[idx]
)
dT_total[:] = dT_cond + dT_adv
return dT_total
@njit(nogil=GLOB_NOGIL, cache=True)
def pipe1D_branched_diff(
T_ext,
T_port,
T_s,
T_amb,
ports_all, # temperatures
dm_io,
dm,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld,
mcp,
rhocp,
lam_wll,
lam_ins,
mcp_wll,
ui, # material properties.
alpha_i,
alpha_inf, # alpha values
UA_tb,
UA_tb_wll,
UA_amb_shell,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_i,
cell_dist, # lengths
flow_length,
r_total,
r_ln_wll,
r_ln_ins,
r_rins, # lengths
A_cell,
V_cell,
A_shell_i,
A_shell_ins,
A_p_fld_mean, # areas and vols
process_flows,
vertical,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
timestep,
):
process_flows[0] = _process_flow_var(
process_flows=process_flows,
dm_io=dm_io,
dm=dm,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
port_own_idx=port_own_idx,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
UA_plate_tb(
A_cell=A_cell,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
UA_tb_wll=UA_tb_wll,
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=A_shell_ins,
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm, T_ext[1:-1], rho_T, ny_T, lam_T, A_cell, d_i, cell_dist, alpha_i
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=flow_length,
vertical=vertical,
r_total=r_total,
alpha_inf=alpha_inf,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=A_shell_i,
r_ln_wll=r_ln_wll,
r_ln_ins=r_ln_ins,
r_rins=r_rins,
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=lam_wll,
lam_ins=lam_ins,
out=UA_amb_shell,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=V_cell,
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=mcp_wll,
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_io=dm_io,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_cell=A_cell,
A_port=A_p_fld_mean,
A_shell=A_shell_ins,
r_total=r_total,
V_cell=V_cell,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by conduction
dT_cond[:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
# + UA_port * (T_port - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# calculate heat transfer by advection
dT_adv[:] = (
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond[idx] += dT_cond_port[i]
# advection
dT_adv[idx] += (
dm_port[i] * (cp_port[i] * T_port[i] - ui[idx]) / mcp[idx]
)
dT_total[:] = dT_cond + dT_adv
return dT_total
@njit(nogil=GLOB_NOGIL, cache=True)
def heatedpipe1D_diff(
T_ext,
T_port,
T_s,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
dQ_heating,
res_dm,
res_dQ, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld, # mat. props.
mcp,
mcp_heated,
rhocp,
lam_wll,
lam_ins,
mcp_wll,
ui, # material properties.
alpha_i,
alpha_inf, # alpha values
UA_tb,
UA_tb_wll,
UA_amb_shell,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx,
heat_mult, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_i,
cell_dist, # lengths
flow_length,
r_total,
r_ln_wll,
r_ln_ins,
r_rins, # lengths
A_cell,
V_cell,
A_shell_i,
A_shell_ins,
A_p_fld_mean, # areas and vols
process_flows,
vertical,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum,
timestep, # step information
dT_cond,
dT_adv,
dT_heat,
dT_heated, # differentials
emergency_shutdown=110.0,
):
# shutdown gasboiler immediately if any temperatures are exceeding
# emergency_shutdown-value
if np.any(T_ext >= emergency_shutdown):
dQ_heating[:] = 0.0
# save rate of heat flow to result array
if process_flows[0]: # only if flows not already processed
res_dQ[stepnum] = dQ_heating
process_flows[0] = _process_flow_invar(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
UA_plate_tb(
A_cell=A_cell,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
UA_tb_wll=UA_tb_wll,
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=A_shell_ins,
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm_io, T_ext[1:-1], rho_T, ny_T, lam_T, A_cell, d_i, cell_dist, alpha_i
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=flow_length,
vertical=vertical,
r_total=r_total,
alpha_inf=alpha_inf,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=A_shell_i,
r_ln_wll=r_ln_wll,
r_ln_ins=r_ln_ins,
r_rins=r_rins,
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=lam_wll,
lam_ins=lam_ins,
out=UA_amb_shell,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=V_cell,
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=mcp_wll,
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_io=dm_io,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_cell=A_cell,
A_port=A_p_fld_mean,
A_shell=A_shell_ins,
r_total=r_total,
V_cell=V_cell,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by internal heat sources
dT_heated[:] = dQ_heating * heat_mult / mcp_heated
# calculate heat transfer by conduction
dT_cond[:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# calculate heat transfer by advection
dT_adv[:] = (
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond[idx] += dT_cond_port[i]
# advection
dT_adv[idx] += (
dm_port[i] * (cp_port[i] * T_port[i] - ui[idx]) / mcp[idx]
)
dT_total = dT_cond + dT_adv + dT_heat
return dT_total, process_flows, step_stable, vN_max_step, max_factor
@njit(nogil=GLOB_NOGIL, cache=True)
def tes_diff(
T_ext,
T_port,
T_s,
T_s_lid,
T_amb,
ports_all, # temperatures
dm_io,
dm,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld,
mcp,
rhocp,
lam_wll,
lam_ins,
mcp_wll,
ui, # mat. props.
alpha_i,
alpha_inf, # alpha_inf_lid, # alpha values
UA_tb,
UA_tb_wll,
UA_amb_shell,
UA_amb_lid,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_i,
cell_dist,
flow_length,
flow_length_lid,
r_total,
r_ln_wll,
r_ln_ins,
r_rins,
s_wll,
s_ins, # lengths
A_cell,
V_cell,
A_shell_i,
A_shell_ins,
A_p_fld_mean, # areas and vols
process_flows,
vertical,
vertical_lid,
lid_top,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
# T, T_top, T_bot, # T+bot/top NOT NEEDED ANYMORE
# cp_top, cp_bot, # cp_top/botT NOT NEEDED ANYMORE
timestep,
):
process_flows[0] = _process_flow_var(
process_flows=process_flows,
dm_io=dm_io,
dm=dm,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
port_own_idx=port_own_idx,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
# calculate buoyancy with Nusselt correction:
buoyancy_byNusselt(T=T_ext[1:-1], ny=ny_T, d_i=d_i, lam_mean=lam_mean)
UA_plate_tb(
A_cell=A_cell,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
UA_tb_wll=UA_tb_wll,
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=A_shell_ins,
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm=dm,
T=T_ext[1:-1],
rho=rho_T,
ny=ny_T,
lam_fld=lam_T,
A=A_cell,
d_i=d_i,
x=cell_dist,
alpha=alpha_i,
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=flow_length,
vertical=vertical,
r_total=r_total,
alpha_inf=alpha_inf,
)
alpha_inf_lid = plane_alpha_inf(
T_s=T_s_lid,
T_inf=T_amb[0],
flow_length=flow_length_lid,
vertical=vertical_lid,
top=lid_top,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=A_shell_i,
r_ln_wll=r_ln_wll,
r_ln_ins=r_ln_ins,
r_rins=r_rins,
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=lam_wll,
lam_ins=lam_ins,
out=UA_amb_shell,
)
UA_amb_lid[:] = UA_fld_wll_ins_amb_plate(
A=A_cell,
s_wll=s_wll,
s_ins=s_ins, # alpha_i FIRST AND LAST element! alpha_fld=alpha_i[0],
alpha_fld=alpha_i[:: alpha_i.size - 1],
alpha_inf=alpha_inf_lid,
lam_wll=lam_wll,
lam_ins=lam_ins,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=V_cell,
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=mcp_wll,
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
# dT_cond_port = _process_ports_collapsed(
_ = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_var(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_cell=A_cell,
A_port=A_p_fld_mean,
A_shell=A_shell_ins,
r_total=r_total,
V_cell=V_cell,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
if T_port.ndim == 1:
# calculate heat transfer by conduction
dT_cond[:] = ( # EXTENDED ARRAY VERSION
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# add losses through top and bottom lid:
dT_cond[0] += ( # EXTENDED ARRAY VERSION
UA_amb_lid[0] * (T_amb[0] - T_ext[1]) / mcp[0]
)
dT_cond[-1] += UA_amb_lid[-1] * (T_amb[0] - T_ext[-2]) / mcp[-1]
# calculate heat transfer by advection
dT_adv[:] = ( # EXTENDED ARRAY VERSION
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
else:
# the same if multiple ports per cell exist. to correctly calculate
# this, the sum of the arrays has to be taken:
dT_cond[:] = ( # EXTENDED ARRAY VERSION
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# add losses through top and bottom lid:
dT_cond[0] += ( # EXTENDED ARRAY VERSION
UA_amb_lid[0] * (T_amb[0] - T_ext[1]) / mcp[0]
)
dT_cond[-1] += UA_amb_lid[-1] * (T_amb[0] - T_ext[-2]) / mcp[-1]
# calculate heat transfer by advection
dT_adv[:] = ( # EXTENDED ARRAY VERSION
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
for i in range(T_port.size):
idx = port_own_idx[i]
# dT_cond[idx] += dT_cond_port[i]
# heat conduction over ports (T_ext[idx+1] since index is not extended)
dT_cond[idx] += UA_port[i] * (T_port[i] - T_ext[idx + 1]) / mcp[idx]
# heat advection through ports
dT_adv[idx] += (
dm_port.flat[i]
* (cp_port[i] * T_port[i] - ui[idx]) # collapsed dmport
/ mcp[idx]
)
# sum up all differentials
dT_total[:] = dT_cond + dT_adv
return (
dT_total,
process_flows,
step_stable,
vN_max_step,
max_factor,
alpha_inf_lid,
)
@njit(nogil=GLOB_NOGIL, cache=True)
def chp_core_diff(
T_ext,
T_port,
T_s,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
dQ_heating,
res_dm,
res_dQ, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld, # mat. props.
mcp,
mcp_heated,
rhocp,
lam_wll,
lam_ins,
mcp_wll,
ui, # material properties.
alpha_i,
alpha_inf, # alpha values
UA_tb,
UA_tb_wll,
UA_amb_shell,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx,
heat_mult, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_i,
cell_dist, # lengths
flow_length,
r_total,
r_ln_wll,
r_ln_ins,
r_rins, # lengths
A_cell,
V_cell,
A_shell_i,
A_shell_ins,
A_p_fld_mean, # areas and vols
process_flows,
vertical,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum,
timestep, # step information
dT_cond,
dT_adv,
dT_heat,
dT_heated, # differentials
):
# save rate of heat flow to result array
if process_flows[0]: # only if flows not already processed
res_dQ[stepnum] = dQ_heating
process_flows[0] = _process_flow_invar(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
UA_plate_tb(
A_cell=A_cell,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
UA_tb_wll=UA_tb_wll,
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=A_shell_ins,
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm_io, T_ext[1:-1], rho_T, ny_T, lam_T, A_cell, d_i, cell_dist, alpha_i
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=flow_length,
vertical=vertical,
r_total=r_total,
alpha_inf=alpha_inf,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=A_shell_i,
r_ln_wll=r_ln_wll,
r_ln_ins=r_ln_ins,
r_rins=r_rins,
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=lam_wll,
lam_ins=lam_ins,
out=UA_amb_shell,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=V_cell,
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=mcp_wll,
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_io=dm_io,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_cell=A_cell,
A_port=A_p_fld_mean,
A_shell=A_shell_ins,
r_total=r_total,
V_cell=V_cell,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by internal heat sources
dT_heated[:] = dQ_heating * heat_mult / mcp_heated
# calculate heat transfer by conduction
dT_cond[:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# calculate heat transfer by advection
dT_adv[:] = (
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond[idx] += dT_cond_port[i]
# advection
dT_adv[idx] += (
dm_port[i] * (cp_port[i] * T_port[i] - ui[idx]) / mcp[idx]
)
dT_total = dT_cond + dT_adv + dT_heat
return dT_total, process_flows, step_stable, vN_max_step, max_factor
@njit(nogil=GLOB_NOGIL, cache=True)
def hexnum_diff(
T_ext,
T_port,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_fld,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld,
mcp,
rhocp,
cp_wll,
lam_wll,
ui, # material properties.
alpha_i, # alpha values
UA_dim1,
UA_dim2,
UA_dim1_wll,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_h,
s_plate,
cell_dist,
dist_min, # lengths
A_channel,
V_cell_fld,
A_plate_eff,
A_p_fld_mean, # areas and vols
channel_divisor,
corr_Re,
process_flows,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
timestep,
):
# generate views needed to make calculations easier:
T_sup = T_ext[1:-1, 1] # view to supply side
T_dmd = T_ext[1:-1, 3] # view to demand side
# T_wll = T_ext[1:-1, 2] # view to wall temperature
dm_sup = dm_io[:1] # view to supply side massflow
dm_dmd = dm_io[1:] # view to demand side massflow
process_flows[0] = _process_flow_multi_flow(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
flow_per_channel = np.abs(dm_io / channel_divisor)
water_mat_props_ext_view( # only pass fluid columns to T_ext
T_ext=T_ext[:, 1::2], cp_T=cp_T, lam_T=lam_fld, rho_T=rho_T, ny_T=ny_T
)
_lambda_mean_view(lam_T=lam_fld, out=lam_mean)
UA_plate_tb_fld( # only pass the fluid columns to out
A_cell=A_channel,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
out=UA_dim1[:, ::2],
)
UA_plate_tb_wll( # only pass the wall column to out
UA_tb_wll=UA_dim1_wll, out=UA_dim1[:, 1]
)
phex_alpha_i_wll_sep_discretized(
dm=dm_sup / channel_divisor[0],
T_fld=T_sup,
T_wll=T_sup,
rho=rho_T[:, 0],
ny=ny_T[:, 0],
lam_fld=lam_fld[:, 0],
A=A_channel,
d_h=d_h,
x=cell_dist,
corr_Re=corr_Re,
alpha=alpha_i[:, 0],
)
phex_alpha_i_wll_sep_discretized(
dm=dm_dmd / channel_divisor[1],
T_fld=T_dmd,
T_wll=T_dmd,
rho=rho_T[:, 1],
ny=ny_T[:, 1],
lam_fld=lam_fld[:, 1],
A=A_channel,
d_h=d_h,
x=cell_dist,
corr_Re=corr_Re,
alpha=alpha_i[:, 1],
)
UA_dim2[:, 1:3] = UA_fld_wll_plate(
A=A_plate_eff, s_wll=s_plate / 2, alpha_fld=alpha_i, lam_wll=lam_wll
)
cell_temp_props_fld(
T_ext_fld=T_ext[:, 1::2],
V_cell=V_cell_fld,
cp_T=cp_T,
rho_T=rho_T,
rhocp_fld=rhocp[:, ::2],
mcp_fld=mcp[:, ::2],
ui_fld=ui[:, ::2],
)
specific_inner_energy_wll(T_wll=T_ext[1:-1, 2], cp_wll=cp_wll, ui=ui[:, 1])
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1, 1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_fld,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
(
_step_stable,
_vN_max_step,
_max_factor,
) = _vonNeumann_stability_invar_hexnum(
part_id=part_id,
stability_breaches=stability_breaches,
UA_dim1=UA_dim1,
UA_dim2=UA_dim2,
UA_port=UA_port,
dm_io=flow_per_channel,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_channel=A_channel,
A_plate_eff=A_plate_eff,
A_port=A_p_fld_mean,
V_cell=V_cell_fld,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
UA_amb_shell = 0.0
dT_cond[:] = (
# heat conduction in first dimension (axis 0), top -> bottom:
(
+UA_dim1[:-1] * (T_ext[:-2, 1:-1] - T_ext[1:-1, 1:-1])
# heat conduction in first dimension (axis 0), bottom -> top:
+ UA_dim1[1:] * (T_ext[2:, 1:-1] - T_ext[1:-1, 1:-1])
# heat conduction in second dimension (axis 1), left -> right:
+ UA_dim2[:, :-1] * (T_ext[1:-1, :-2] - T_ext[1:-1, 1:-1])
# heat conduction in second dimension (axis 1), right -> left:
+ UA_dim2[:, 1:] * (T_ext[1:-1, 2:] - T_ext[1:-1, 1:-1])
# heat conduction to ambient (currently set to 0):
+ UA_amb_shell * (T_amb - T_ext[1:-1, 1:-1])
)
/ mcp
)
# calculate heat transfer by advection in the fluid channels
dT_adv[:, ::2] = (
# advective heat transport (only axis 0), top -> bottom:
(
+dm_top * (cp_T[:-2] * T_ext[:-2, 1::2] - ui[:, ::2])
# advective heat transport (only axis 0), bottom -> top:
+ dm_bot * (cp_T[2:] * T_ext[2:, 1::2] - ui[:, ::2])
)
/ mcp[:, ::2]
)
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond.flat[idx] += dT_cond_port[i]
# advection
dT_adv.flat[idx] += (
dm_port[i]
* (cp_port[i] * T_port[i] - ui.flat[idx])
/ mcp.flat[idx]
)
# divide advective transfer by the number of channels:
dT_adv[:, ::2] /= channel_divisor
# sum up the differentials for conduction and advection
dT_total[:] = dT_cond + dT_adv
return dT_total
@nb.njit(cache=True)
def condensing_hex_solve(
T,
T_port,
ports_all,
res,
res_dm,
dm_io,
dm_port,
port_own_idx,
port_link_idx,
X_pred,
flow_scaling,
water_dm_range,
gas_dv_range,
int_comb_idx,
nvars_per_ftr,
pca_mean,
pca_components,
lm_intercept,
lm_coef,
stepnum,
):
"""
Calculate condensing flue gas HEX by using a PCA-transformed polynome LR.
Parameters
----------
T : TYPE
DESCRIPTION.
T_port : TYPE
DESCRIPTION.
ports_all : TYPE
DESCRIPTION.
res : TYPE
DESCRIPTION.
res_dm : TYPE
DESCRIPTION.
dm_io : TYPE
DESCRIPTION.
dm_port : TYPE
DESCRIPTION.
port_own_idx : TYPE
DESCRIPTION.
port_link_idx : TYPE
DESCRIPTION.
X_pred : TYPE
DESCRIPTION.
flow_scaling : TYPE
DESCRIPTION.
water_dm_range : TYPE
DESCRIPTION.
gas_dv_range : TYPE
DESCRIPTION.
int_comb_idx : TYPE
DESCRIPTION.
nvars_per_ftr : TYPE
DESCRIPTION.
pca_mean : TYPE
DESCRIPTION.
pca_components : TYPE
DESCRIPTION.
lm_intercept : TYPE
DESCRIPTION.
lm_coef : TYPE
DESCRIPTION.
stepnum : TYPE
DESCRIPTION.
Raises
------
ValueError
DESCRIPTION.
Returns
-------
None.
"""
_port_values_to_cont(
ports_all=ports_all, port_link_idx=port_link_idx, out=T_port
)
# extract inflowing temperatures for water (idx 0) and flue gas (idx 2)
X_pred[:, :2] = T_port[::2]
# extract water massflow (cell 3) and flue gas volume flow (cell 4) and
# scale with scaling factors
X_pred[:, 2:] = dm_io / flow_scaling
# make some flow checks:
# check for water/flue gas massflow bounds. only do something if violated
bypass = False # bypass is initialized to False
if (water_dm_range[0] != 0.0) and (X_pred[0, 2] < water_dm_range[0]):
# if flow smaller than lower bound, decide if using 0 or lower bound
# by rounding to the closer value:
X_pred[0, 2] = (
round(X_pred[0, 2] / water_dm_range[0]) * water_dm_range[0]
)
elif X_pred[0, 2] > water_dm_range[1]:
# bypassing excess mass flow, to avoid huge power output
# when outside HEX heat meters are calculated with unclipped flows:
# backup full flow for calculations:
# water_dm_full = X_pred[0, 2] # not needed anymore
# get excess flow over max. range. this amount is bypassed
water_dm_excess = X_pred[0, 2] - water_dm_range[1]
bypass = True # set bypassing to true
# clip the amount of water over the hex to the range
X_pred[0, 2] = water_dm_range[1]
if (gas_dv_range[0] != 0.0) and (X_pred[0, 3] < gas_dv_range[0]):
# if flow smaller than lower bound, decide if using 0 or lower bound
# by rounding to the closer value:
X_pred[0, 3] = round(X_pred[0, 3] / gas_dv_range[0]) * gas_dv_range[0]
elif X_pred[0, 3] > gas_dv_range[1]:
print(
'\nFluegas volume flow in condensing HEX exceeded. The '
'following value was encountered:'
)
print(X_pred[0, 3])
raise ValueError
# calculate results. but only if NO massflow is 0
if np.all(X_pred[0, 2:] != 0):
dm_water_thresh = 0.1 # threshhold below which no regr. preds. exist
n_samples = 1 # this is always one for this function
# only if water massflow greater 10%, else quad polynome
if X_pred[0, 2] > dm_water_thresh:
# transform input data to polynome, then to principal components
X_pf = transform_to_poly_nb(
X_pred, int_comb_idx, nvars_per_ftr, n_samples
)
X_PC = transform_pca_nb(X_pf, pca_mean, pca_components)
# predict
T_pred = poly_tranfs_pred(X_PC, lm_intercept, lm_coef)
# save results to temperature array
T[0, 0] = X_pred[0, 0] # t w in
T[1, 0] = T_pred[0, 0] # t w out
T[0, 1] = X_pred[0, 1] # t fg in
T[1, 1] = T_pred[0, 1] # t fg out
else: # for massflow below thresh, use quad polynome
T_pred_below_thresh = condensing_hex_quad_poly(
X_pred, # X vector
int_comb_idx,
nvars_per_ftr, # polynomial transf.
pca_mean,
pca_components, # PCA transformation
lm_intercept,
lm_coef, # linear model transformation
dm_water_thresh=0.1,
dx=0.01,
)
# save results to temperature array
T[0, 0] = X_pred[0, 0] # t w in
T[1, 0] = T_pred_below_thresh[0, 0] # t w out
T[0, 1] = X_pred[0, 1] # t fg in
T[1, 1] = T_pred_below_thresh[0, 1] # t fg out
else: # if ANY massflow is 0, all output temps are equal to input temps
T[:, 0] = X_pred[0, 0] # t w in & t w out = t w in
T[:, 1] = X_pred[0, 1] # t fg in & t fg out = t fg in
# if bypassing the hex with a part of the water flow:
if bypass:
# get heat capacity rates for bypassing, hex traversing and
# outflowing (mixed) water. hcr is dimensionless, since flows have been
# scaled before, thus value is same as leveraged Cp in unit J/kg/K
hcr_bypass = cp_water(X_pred[0, 0]) * water_dm_excess
hcr_hex_out = cp_water(T[1, 0]) * water_dm_range[1]
hcr_out = hcr_bypass + hcr_hex_out
# calculate outflowing (mix of bypass and hex traversing water) temp:
T_out = (hcr_bypass * X_pred[0, 0] + hcr_hex_out * T[1, 0]) / hcr_out
# set to the temperature result array:
T[1, 0] = T_out # t w out
res[stepnum[0]] = T
res_dm[stepnum[0]] = dm_io
# %% Simulation Env. implicit/explicit specific functions and tests:
@njit(nogil=GLOB_NOGIL, cache=True)
def hexnum_diff_impl(
T_ext,
T_port,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_fld,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld,
mcp,
rhocp,
cp_wll,
lam_wll,
ui, # material properties.
alpha_i, # alpha values
UA_dim1,
UA_dim2,
UA_dim1_wll,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
grid_spacing,
port_gsp,
d_h,
s_plate,
cell_dist,
dist_min, # lengths
A_channel,
V_cell_fld,
A_plate_eff,
A_p_fld_mean, # areas and vols
channel_divisor,
corr_Re,
process_flows, # bools
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
):
# generate views needed to make calculations easier:
T_sup = T_ext[1:-1, 1] # view to supply side
T_dmd = T_ext[1:-1, 3] # view to demand side
# T_wll = T_ext[1:-1, 2] # view to wall temperature
dm_sup = dm_io[:1] # view to supply side massflow
dm_dmd = dm_io[1:] # view to demand side massflow
process_flows[0] = _process_flow_multi_flow(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view( # only pass fluid columns to T_ext
T_ext=T_ext[:, 1::2], cp_T=cp_T, lam_T=lam_fld, rho_T=rho_T, ny_T=ny_T
)
_lambda_mean_view(lam_T=lam_fld, out=lam_mean)
UA_plate_tb_fld( # only pass the fluid columns to out
A_cell=A_channel,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
out=UA_dim1[:, ::2],
)
UA_plate_tb_wll( # only pass the wall column to out
UA_tb_wll=UA_dim1_wll, out=UA_dim1[:, 1]
)
phex_alpha_i_wll_sep_discretized(
dm=dm_sup / channel_divisor[0],
T_fld=T_sup,
T_wll=T_sup,
rho=rho_T[:, 0],
ny=ny_T[:, 0],
lam_fld=lam_fld[:, 0],
A=A_channel,
d_h=d_h,
x=cell_dist,
corr_Re=corr_Re,
alpha=alpha_i[:, 0],
)
phex_alpha_i_wll_sep_discretized(
dm=dm_dmd / channel_divisor[1],
T_fld=T_dmd,
T_wll=T_dmd,
rho=rho_T[:, 1],
ny=ny_T[:, 1],
lam_fld=lam_fld[:, 1],
A=A_channel,
d_h=d_h,
x=cell_dist,
corr_Re=corr_Re,
alpha=alpha_i[:, 1],
)
UA_dim2[:, 1:3] = UA_fld_wll_plate(
A=A_plate_eff, s_wll=s_plate / 2, alpha_fld=alpha_i, lam_wll=lam_wll
)
cell_temp_props_fld(
T_ext_fld=T_ext[:, 1::2],
V_cell=V_cell_fld,
cp_T=cp_T,
rho_T=rho_T,
rhocp_fld=rhocp[:, ::2],
mcp_fld=mcp[:, ::2],
ui_fld=ui[:, ::2],
)
specific_inner_energy_wll(T_wll=T_ext[1:-1, 2], cp_wll=cp_wll, ui=ui[:, 1])
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1, 1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_fld,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
UA_amb_shell = 0.0
dT_cond[:] = (
# heat conduction in first dimension (axis 0), top -> bottom:
(
+UA_dim1[:-1] * (T_ext[:-2, 1:-1] - T_ext[1:-1, 1:-1])
# heat conduction in first dimension (axis 0), bottom -> top:
+ UA_dim1[1:] * (T_ext[2:, 1:-1] - T_ext[1:-1, 1:-1])
# heat conduction in second dimension (axis 1), left -> right:
+ UA_dim2[:, :-1] * (T_ext[1:-1, :-2] - T_ext[1:-1, 1:-1])
# heat conduction in second dimension (axis 1), right -> left:
+ UA_dim2[:, 1:] * (T_ext[1:-1, 2:] - T_ext[1:-1, 1:-1])
# heat conduction to ambient (currently set to 0):
+ UA_amb_shell * (T_amb - T_ext[1:-1, 1:-1])
)
/ mcp
)
# calculate heat transfer by advection in the fluid channels
dT_adv[:, ::2] = (
# advective heat transport (only axis 0), top -> bottom:
(
+dm_top * (cp_T[:-2] * T_ext[:-2, 1::2] - ui[:, ::2])
# advective heat transport (only axis 0), bottom -> top:
+ dm_bot * (cp_T[2:] * T_ext[2:, 1::2] - ui[:, ::2])
)
/ mcp[:, ::2]
)
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond.flat[idx] += dT_cond_port[i]
# advection
dT_adv.flat[idx] += (
dm_port[i]
* (cp_port[i] * T_port[i] - ui.flat[idx])
/ mcp.flat[idx]
)
# divide advective transfer by the number of channels:
dT_adv[:, ::2] /= channel_divisor
# sum up the differentials for conduction and advection
dT_total[:] = dT_cond + dT_adv
return dT_total
@nb.njit(cache=True, nogil=GLOB_NOGIL)
def euler_forward(diff, diff_input_args, yprev, _h):
return yprev + _h * diff(*diff_input_args)
@nb.njit(cache=True, nogil=GLOB_NOGIL)
def hexnum_imp_root_diff(y, yprev, h, input_args):
input_args[0][1:-1, 1:-1] = y.reshape(input_args[0][1:-1, 1:-1].shape)
return y - yprev - h * hexnum_diff_impl(*input_args).ravel()
@nb.njit
def hexnum_imp_fixedpoint(y, y_prev, h, input_args): # fixed point function
"""
Find fixed point of the hexnum implicit function.
Warning: Fixed point iteration may be several"""
input_args[0][1:-1, 1:-1] = y.reshape(input_args[0][1:-1, 1:-1].shape)
return (
y_prev.reshape(input_args[0][1:-1, 1:-1].shape)
+ h * hexnum_diff_impl(*input_args)
).ravel()
@nb.njit
def fixed_point_to_root(y, fp_fun, y_prev, h, input_args):
return y - fp_fun(y, y_prev, h, input_args)
@nb.njit(cache=True, nogil=GLOB_NOGIL)
def hexnum_imp_fixedp_diff(y, yprev, h, input_args):
input_args[0][1:-1, 1:-1] = y
return yprev + h * hexnum_diff_impl(*input_args)
# @nb.njit(cache=True, nogil=GLOB_NOGIL)
def hexnum_imp_newt_diff(yprev, _h, input_args, rtol=1e-6):
"""
https://math.stackexchange.com/questions/152159/how-to-correctly-apply-newton-raphson-method-to-backward-euler-method
https://scicomp.stackexchange.com/questions/5042/how-to-implement-newtons-method-for-solving-the-algebraic-equations-in-the-back
"""
input_args[0][1:-1, 1:-1] = yprev
y_lastiter = yprev.copy()
err = 1.0
# initial guess:
diff = hexnum_diff_impl(*input_args)
y = yprev + _h * diff
f = np.zeros_like(y)
while np.any(err > rtol):
# y_lastiter = y.copy()
input_args[0][1:-1, 1:-1] = y
# y = (
# y_lastiter
# + (euler_forward(hexnum_diff_impl, input_args, yprev, _h)
# / hexnum_diff_impl(*input_args))
# )
diff = hexnum_diff_impl(*input_args)
f_lastiter = f
f = y - yprev - _h * diff
nz = f != 0.0 # make a mask with non zero values
slope = (f[nz] - f_lastiter[nz]) / (y[nz] - y_lastiter[nz])
# diff[diff == 0.] = yprev[diff == 0.]
# diff2 = diff * _h
# y = y_lastiter - ((yprev + diff) / (diff))
# y[np.abs(y) == np.inf] = y_lastiter[np.abs(y) == np.inf]
# y = y_lastiter - yprev / diff - 1.
# err = np.sqrt(np.sum((np.abs(y - y_lastiter))**2))
# err = (y - y_lastiter) / y_lastiter
y[nz] = y_lastiter[nz] - f[nz] / slope
err = (y - y_lastiter) / y_lastiter
y_lastiter = y.copy()
return y
# %% Simulation Env. old (mostly deprecated) solve methods:
@njit(nogil=GLOB_NOGIL, cache=True)
def solve_connector_3w_overload(arglist):
solve_connector_3w(*arglist)
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def solve_connector_3w(T, ports_all, cp_T, dm, port_link_idx, res, stepnum):
# depending on the flow conditions this 3w connector acts as a flow
# mixing or splitting device. This state has to be determined by
# checking the direction of the massflows through the ports.
# A negative sign means that the massflow is exiting through the
# respective port, a positive sign is an ingoing massflow.
# get connected port temperatures:
# get port array:
_port_values_to_cont(
ports_all=ports_all, port_link_idx=port_link_idx, out=T
)
# get cp-values of all temperatures:
get_cp_water(T, cp_T)
# save bool indices of massflows greater (in) and less (out) than 0:
# (using dm as massflow array only works since it is a view of _dm_io!)
dm_in = np.greater(dm, 0)
dm_out = np.less(dm, 0)
# if 2 ports > 0 are True, 3w connector is mixer:
if np.sum(dm_in) == 2:
# get cp of outflowing massflow (error of mean temp is <<0.5% compared
# to a heat cap. ratio calculation, thus negligible and ok):
cp_out = cp_water(np.sum(T[dm_in]) / 2)
# calc T_out by mixing the inflowing massflows (*-1 since outgoing
# massflows have a negative sign):
T_out = np.sum(dm[dm_in] * cp_T[dm_in] * T[dm_in]) / (
cp_out * -1 * dm[dm_out]
)
# pass on port values by switching temperatures:
# set old T_out to both in-ports
T[dm_in] = T[dm_out]
# set calculated T_out to out-port
T[dm_out] = T_out
# if 2 ports < 0 are True, 3w connector is splitter:
elif np.sum(dm_out) == 2:
# no real calculation has to be done here, just switching
# temperatures and passing them on to opposite ports
# calc the temp which will be shown at the inflowing port as a mean
# of the temps of outflowing ports (at in port connected part will
# see a mean value of both temps for heat conduction):
T_in = T[dm_out].sum() / 2
# pass inflowing temp to outflowing ports:
T[dm_out] = T[dm_in]
# pass mean out temp to in port:
T[dm_in] = T_in
# if one port has 0 massflow, sum of dm_in == 1:
elif np.sum(dm_in) == 1:
# get port with 0 massflow:
dm0 = np.equal(dm, 0)
# this port 'sees' a mean of the other two temperatures:
T[dm0] = T[~dm0].sum() / 2
# the out ports heat flow is dominated by convection, thus it
# only 'sees' the in flow temperature but not the 0 flow temp:
T[dm_out] = T[dm_in]
# the in ports heat flow is also dominated by convection, but here
# it is easy to implement the 0-flow port influence, since heat
# flow by convection of part connected to in port is not affected
# by connected temperature, thus also get a mean value:
T[dm_in] = T[~dm_in].sum() / 2
# if all ports have 0 massflow:
else:
# here all ports see a mean of the other ports:
# bkp 2 ports
T0 = (T[1] + T[2]) / 2
T1 = (T[0] + T[2]) / 2
# save means to port values:
T[2] = (T[0] + T[1]) / 2
T[0] = T0
T[1] = T1
# save results:
res[stepnum[0]] = T
# <EMAIL>((float64[:,:], float64[:,:], float64[:], float64[:], float64[:,:,:],
# float64[:,:], float64[:,:],
# float64[:], float64[:], float64[:], float64[:], float64[:,:],
# float64[:], float64[:], float64[:],
# int32[:], int32[:],
# float64, float64, float64, float64, float64[:],
# int32, int32, int32, float64, int32))
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def solve_platehex(
T,
T_port,
T_mean,
ports_all,
res,
dm_io,
dm_port,
cp_mean,
lam_mean,
rho_mean,
ny_mean,
lam_wll,
alpha_i,
UA_fld_wll,
UA_fld_wll_fld,
port_own_idx,
port_link_idx,
A_plate_eff,
A_channel,
d_h,
s_plate,
cell_dist,
num_A,
num_channels_sup,
num_channels_dmd,
corr_Re,
stepnum,
):
# get temperatures of connected ports:
_port_values_to_cont(
ports_all=ports_all, port_link_idx=port_link_idx, out=T_port
)
# get massflows
# only positive flows for side supply and set entry temperature
# depending on flow direction:
if dm_io[0] >= 0:
dm_sup = dm_io[0] # positive sup side flow
T_sup_in = T_port[0] # entry temp. sup is sup_in
T_sup_out = T_port[1] # out temp. sup is sup_out
dm_port[0] = dm_io[0]
dm_port[1] = 0.0
else:
dm_sup = -dm_io[0] # positive sup side flow
T_sup_in = T_port[1] # entry temp. sup is sup_out
T_sup_out = T_port[0] # out temp. sup is sup_in
dm_port[0] = 0.0
dm_port[1] = -dm_io[0]
# only positive flows for side demand and set entry temperature
# depending on flow direction:
if dm_io[1] >= 0:
dm_dmd = dm_io[1] # positive dmd side flow
T_dmd_in = T_port[2] # entry temp. dmd is dmd_in
T_dmd_out = T_port[3] # out temp. dmd is dmd_out
dm_port[2] = dm_io[1]
dm_port[3] = 0.0
else:
dm_dmd = -dm_io[1] # positive dmd side flow
T_dmd_in = T_port[3] # entry temp. dmd is dmd_out
T_dmd_out = T_port[2] # out temp. dmd is dmd_in
dm_port[2] = 0.0
dm_port[3] = -dm_io[1]
# do all the calculations only if both massflows are not 0
if dm_sup != 0 and dm_dmd != 0:
# get mean temperature of both fluid sides as a mean of the neighboring
# port temperatures which is a good approximation when there is a flow
# through the HEX (without flow no calc. will be done anyways):
T_mean[0] = (T_sup_in + T_sup_out) / 2 # sup side
T_mean[1] = (T_dmd_in + T_dmd_out) / 2 # dmd side
# get thermodynamic properties of water
# for mean cell temp:
water_mat_props(T_mean, cp_mean, lam_mean, rho_mean, ny_mean)
# for conduction between fluid cells and wall:
# get inner alpha value between fluid and wall from nusselt equations:
# supply side:
phex_alpha_i_wll_sep(
dm_sup / num_channels_sup,
T_mean[0],
T_mean[0],
rho_mean[0],
ny_mean[0],
lam_mean[0],
A_channel,
d_h,
cell_dist,
corr_Re,
alpha_i[0:1],
)
# demand side:
phex_alpha_i_wll_sep(
dm_dmd / num_channels_dmd,
T_mean[1],
T_mean[1],
rho_mean[1],
ny_mean[1],
lam_mean[1],
A_channel,
d_h,
cell_dist,
corr_Re,
alpha_i[1:2],
)
# get resulting UA from both fluid sides, assuming same values in all
# channels of one pass, to the midpoint (-> /2) of the separating wall.
# index [1, 1] for lam_wll selects own lam_wll to avoid overwriting by
# _get_port_connections method of simenv.
UA_fld_wll[:] = UA_fld_wll_plate(
A_plate_eff, s_plate / 2, alpha_i, lam_wll[0]
)
# get total UA value from fluid to fluid (in VDI Wärmeatlas this is kA)
# by calculating the series circuit of the UA fluid wall values with
# the number of effective heat transfer areas (num plates - 2)
UA_fld_wll_fld[0] = (
series_circuit_UA(UA_fld_wll[0], UA_fld_wll[1]) * num_A
)
# Heat exchanger dimensionless coefficients:
# heat capacity flows (ok, this is not dimensionless...)
dC_sup = dm_sup * cp_mean[0]
dC_dmd = dm_dmd * cp_mean[1]
# calculate NTU value of the supply side:
if dC_sup != 0:
NTU_sup = UA_fld_wll_fld[0] / dC_sup
else:
NTU_sup = np.inf
# calculate heat capacity flow ratio for the supply to demand side:
if dC_dmd != 0:
R_sup = dC_sup / dC_dmd
else:
R_sup = np.inf
# get dimensionless change in temperature
rs_ntus = (R_sup - 1) * NTU_sup # precalc. for speed
# for the supply side
if (
R_sup != 1 and rs_ntus < 100 # heat cap flow ratio not 1 and valid
): # range for exp
P_sup = (1 - np.exp(rs_ntus)) / (1 - R_sup * np.exp(rs_ntus))
elif rs_ntus > 100: # if exp in not-defined range
P_sup = 1 / R_sup # largely only depending on 1/R
# above a specific value. for float64 everything above around
# 50 to 100 is cut of due to float precision and quite exactly
# equal 1/R.
else: # heat cap flow ratio equal 1
P_sup = NTU_sup / (1 + NTU_sup)
# for the demand side:
P_dmd = P_sup * R_sup
# if P_sup has a NaN value, for example when a flow is zero or very
# close to zero (NaN check is: Number is not equal to itself!):
if P_sup != P_sup:
P_sup = 0
P_dmd = 0
# calculate supply and demand outlet temperatures from this and
# overwrite the estimate value taken from ports:
T_sup_out = T_sup_in - P_sup * ( # supply side outlet temp.
T_sup_in - T_dmd_in
)
T_dmd_out = T_dmd_in + P_dmd * ( # demand side outlet temp.
T_sup_in - T_dmd_in
)
# calculate heat flow from supply fluid to wall and demand fluid:
# dQ = dC_sup * (T_sup_in - T_sup_out)
else:
# else if at least one side is zero.
# fill with the values of connected ports where the flow is 0 (this
# is already done automatically in the beginning where temperature
# values are set depending on the flow direction, so do nothing
# for zero flow).
# pass on the value where the flow is not 0.
if dm_sup != 0: # check supply side for flow not zero
T_sup_out = T_sup_in # pass on if sup flow not 0
elif dm_dmd != 0: # if sup flow not zero
T_dmd_out = T_dmd_in # pass on if dmd flow not 0
# set new values to array for port interaction with other parts,
# depending on flow direction:
if dm_io[0] >= 0: # sup side normal flow
T[0] = T_sup_in # - 273.15
T[1] = T_sup_out # - 273.15
else: # sup side inversed flow
T[1] = T_sup_in # - 273.15
T[0] = T_sup_out # - 273.15
# only positive flows for side demand and set entry temperature
# depending on flow direction:
if dm_io[1] >= 0: # dmd side normal flow
T[2] = T_dmd_in # - 273.15
T[3] = T_dmd_out # - 273.15
else: # dmd side inversed flow
T[3] = T_dmd_in # - 273.15
T[2] = T_dmd_out # - 273.15
# save results:
res[stepnum[0]] = T
# dT_cond[1, 1] = 0
@jit(
(float64[:], int32[:], float64[:]),
nopython=True,
nogil=GLOB_NOGIL,
cache=True,
) # parallel=GLOB_PARALLEL useful
def _get_p_arr_pump(ports_all, port_link_idx, T):
"""
Values of requested ports are saved to temperature array.
"""
T[:] = ports_all[port_link_idx][::-1]
@njit(nogil=GLOB_NOGIL, cache=True)
def solve_mix_overload(arglist):
solve_mix(*arglist)
@nb.jit(
nopython=True, nogil=GLOB_NOGIL, cache=True
) # parallel=GLOB_PARALLEL useful
def solve_mix(port_array, _port_link_idx, dm_io, T):
# get port array:
T[:] = port_array[_port_link_idx]
# calc T_out by mixing A and B if there is a flow through the valve
if dm_io[2] != 0:
# get outlet temperature as mean of both inlet temperatures for cp
# calculation:
T[2] = (T[0] + T[1]) / 2
# get heat capacities:
cp = cp_water(T)
# get outlet temperature by mixing the massflows:
T_AB = (dm_io[0] * cp[0] * T[0] + dm_io[1] * cp[1] * T[1]) / (
dm_io[2] * cp[2]
)
# set mean outlet temp. to both in-ports for heat conduction
T[0:2] = T[2]
# set calculated T_out to out-port
T[2] = T_AB
else:
# else if dm of AB port is zero, the temperatures all are a mean of
# the other ports temperatures to enable heat calculation:
T_AB = (T[0] + T[1]) / 2
T_A = (T[1] + T[2]) / 2
T_B = (T[0] + T[2]) / 2
# set to temperature array:
T[0] = T_A
T[1] = T_B
T[2] = T_AB
@njit(nogil=GLOB_NOGIL, cache=True)
def solve_split_overload(arglist):
solve_split(*arglist)
@nb.jit(
(float64[:], int32[:], float64[:]),
nopython=True,
nogil=GLOB_NOGIL,
cache=True,
)
def solve_split(port_array, _port_link_idx, T):
T[:] = port_array[_port_link_idx]
T_in = T[0:2].sum() / 2
T[0:2] = T[2]
T[2] = T_in
@njit(nogil=GLOB_NOGIL, cache=True)
def solve_pump_overload(arglist):
solve_pump(*arglist)
@nb.njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def solve_pump(ports_all, port_link_idx, T, res, res_dm, dm, stepnum):
"""
Solve method of part pump.
"""
# get and invert temperatures
_get_p_arr_pump(ports_all=ports_all, port_link_idx=port_link_idx, T=T)
# save massflow to massflow result grid
res_dm[stepnum[0], 0] = dm[0]
# save temperatures to temperature result grid
res[stepnum[0]] = T
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def ctrl_deadtime(
deadtime,
timestep,
dt_arr,
pv_arr,
len_dt_arr,
dt_idx,
last_dt_idx,
delayed_pv,
sp,
pv,
):
dt_arr += timestep
# last deadtime index is saved for interpolation if in last
# step a new pv was found, otherwise last deadtime index will
# be increased by one to include roll by one element:
if dt_idx != -1:
last_dt_idx = dt_idx
else:
last_dt_idx += 1
# reset deadtime index with a value which will be kept if no
# new pv value reached (so the old one will be kept):
dt_idx = -1
# loop through deadtime array
for i in range(len_dt_arr):
# if time in deadtime array is equal or greater deadtime
# return index of the position (only the first occurrence
# will be found!)
if dt_arr[i] >= deadtime:
dt_idx = i
break
# calculate delayed pv (will not be overwritten after calc.
# until next step, thus can be reused if no new value is found)
# if a new value has reached deadtime delay in only one step:
if dt_idx == 0:
# interpolate delayed pv from previous pv, new pv and
# expired time and time between prev. and new pv:
delayed_pv = delayed_pv + (pv_arr[0] - delayed_pv) / (deadtime) * (
dt_arr[0]
)
# if a new value has reached deadtime delay after more than
# one step:
elif dt_idx > 0:
# if deadtime is hit exactly (for example with constant
# timesteps):
if dt_arr[dt_idx] == deadtime:
delayed_pv = pv_arr[dt_idx]
else:
# interpolate value if deadtime is overshot andnot hit:
delayed_pv = pv_arr[dt_idx - 1] + (
pv_arr[dt_idx] - pv_arr[dt_idx - 1]
) / (dt_arr[dt_idx] - dt_arr[dt_idx - 1]) * (
deadtime - dt_arr[dt_idx - 1]
)
# if deadtime delay was not reached:
else:
# interpolate delayed pv from previous pv, next pv and
# expired time and time till next pv:
delayed_pv = delayed_pv + (pv_arr[last_dt_idx] - delayed_pv) / (
deadtime - (dt_arr[last_dt_idx] - timestep)
) * (timestep)
# calculate error from delayed pv_value (delayed pv will not
# be overwritten until next step):
error = sp[0] - delayed_pv
# set all time values in deadtime array after found value to 0:
dt_arr[dt_idx:] = 0
# roll deadtime and pv array one step backwards:
dt_arr[1:] = dt_arr[0:-1]
pv_arr[1:] = pv_arr[0:-1]
# insert current pv into first slot of pv_arr:
pv_arr[0] = pv[0]
# set expired time of current pv to zero:
dt_arr[0] = 0
return error, delayed_pv, dt_idx, last_dt_idx
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def _heun_corrector_adapt(
res,
T,
df0,
df1,
trnc_err_cell_weight,
_h,
stepnum,
rtol,
atol,
err,
new_trnc_err,
):
# solve heun method and save to result:
res[stepnum] = res[stepnum - 1] + (_h / 2) * (df0 + df1)
# GET TRUCATION ERROR FOR HEUN COMPARED WITH LOWER ORDER EULER
# TO CALC. NEW STEPSIZE:
# truncation error approximation is the difference of the total
# heun result and the euler (predictor) result saved in T. The
# trunc. error is calculated by taking the root mean square
# norm of the differences for each part. This applies a root
# mean square error weighting over the cells.
# To get the systems truncation error, the norms have to be
# added up by taking the root of the sum of the squares.
# get each part's local relative error as euclidean matrix norm
# (sqrt not yet taken to enable summing up the part's errors)
# weighted by the relative and absolute tolerance. tolerance weighting as
# in:
# https://github.com/scipy/scipy/blob/ ...
# 19acfed431060aafaa963f7e530c95e70cd4b85c/scipy/integrate/_ivp/rk.py#L147
trnc_err = (
(
(res[stepnum] - T)
* trnc_err_cell_weight
/ (np.maximum(res[stepnum - 1], res[stepnum]) * rtol + atol)
)
** 2
).sum()
# sum these local relative errors up for all parts:
err += trnc_err
# now get root mean square error for part by dividing part's
# trnc_err by its amount of cells and taking the root:
new_trnc_err = (trnc_err / T.size) ** 0.5
# now also save to T arrays to be able to easily use
# memoryviews in diff functions:
T[:] = res[stepnum]
return err, new_trnc_err
# @nb.njit(nogil=GLOB_NOGIL, cache=True)
def _embedded_adapt_stepsize(
err,
sys_trnc_err,
num_cells_tot_nmrc,
step_accepted,
failed_steps,
safety,
order,
solver_state,
min_factor,
max_factor,
min_stepsize,
max_stepsize,
ports_all,
parr_bkp,
vN_max_step,
step_stable,
cnt_instable,
timeframe,
_h,
timestep,
stepnum,
):
# ADAPTIVE TIMESTEP CALCULATION:
# get all part's RMS error by dividing err by the amount of all
# cells in the system and taking the root:
err_rms = (err / num_cells_tot_nmrc) ** 0.5
# save to array to enable stepwise system error lookup:
sys_trnc_err[stepnum] = err_rms
# check for good timesteps:
# err_rms already has the relative and absolute tolerance included,
# thus only checking against its value:
if err_rms < 1:
# error is lower than tolerance, thus step is accepted.
step_accepted = True
# save successful timestep to simulation environment:
timestep = _h
# get new timestep (err_rms is inverted thus negative power):
_h *= min(
max_factor[0], max(1, (safety * err_rms ** (-1 / (order + 1))))
)
# check if step is not above max step:
if _h > max_stepsize:
_h = max_stepsize # reduce to max stepsize
# save to state that max stepsize was reached:
solver_state[stepnum] = 5
else:
# else save to state that error was ok in i steps:
solver_state[stepnum] = 4
elif err_rms == 0.0:
# if no RMS (most probably the step was too small so rounding
# error below machine precision led to cut off of digits) step
# will also be accepted:
step_accepted = True
# save successful timestep to simulation environment:
timestep = _h
# get maximum step increase for next step:
_h *= max_factor[0]
# save to state that machine epsilon was reached:
solver_state[stepnum] = 7
# check if step is not above max step:
if _h > max_stepsize:
_h = max_stepsize # reduce to max stepsize
else:
# else error was too big.
# check if stepsize already is at minimum stepsize. this can
# only be true, if stepsize has already been reduced to min.
# stepsize, thus to avoid infinite loop set step_accepted=True
# and skip the rest of the loop:
if _h == min_stepsize:
step_accepted = True
# save not successful but still accepted timestep to
# simulation environment:
timestep = _h
# save this special event to solver state:
solver_state[stepnum] = 6
else:
# else if stepsize not yet at min stepsize, reduce stepsize
# further by error estimate if this is not less than the
# minimum factor and redo the step.
_h *= max(min_factor, (safety * err_rms ** (-1 / (order + 1))))
# check if step is not below min step:
if _h < min_stepsize:
_h = min_stepsize # increase to min stepsize
# reset ports array for retrying step:
ports_all[:] = parr_bkp
# count failed steps at this step number:
failed_steps[stepnum] += 1
# catch von Neumann stability condition:
if not step_stable[0]:
# if von Neumann stability violated, do not accept step.
# This can happen even though the RMS-error is ok, since single
# non-stable parts can have a too small impact on the RMS. In
# this case _step_accepted will be overwritten.
step_accepted = False # redo the step
# inrease counter for failed loops
cnt_instable += 1
# set new step to maximum von Neumann step (calc. in parts):
_h = vN_max_step[0]
# count failed steps at this step number:
failed_steps[stepnum] += 1
# reset ports array for retrying step:
ports_all[:] = parr_bkp
# break loop if no solution was found after 50 tries:
if cnt_instable == 50:
# set timeframe to 0 to break the outer simulation loop
timeframe = 1e-9
# save error to solver state:
solver_state[stepnum] = 99
"""
TODO: Wie integriere ich das break hier?
"""
# break
return step_accepted, timestep, timeframe, cnt_instable
# %% CALCULATE DIMENSIONLESS NUMBERS:
@njit(nogil=GLOB_NOGIL, cache=True)
def rayleigh_number(T_s, T_inf, Pr, ny, Kelvin, flow_length):
"""
Calculate the Rayleigh number for the given parameters [1]_.
Parameters:
-----------
T_s : float, int, np.ndarray
Surface temperature in [°C] or [K].
T_inf : float, int, np.ndarray
Surrounding fluid temperature in [°C] or [K].
Pr : float, int, np.ndarray
Prandtl number of the surrounding fluid at the mean temperature:
$$(T_s + T_{inf}) / 2$$
For (dry) air this can be set to a constant value of ca.:
$$Pr = 0.708$$
ny : float, int, np.ndarray
Kinematic viscosity in [m^2 / s] of the surrounding fluid at the mean
temperature: $$(T_s + T_{inf}) / 2$$
Kelvin : float, int
If temperatures `T_s` and `T_inf` are given in [°C], Kelvin has to be
set to `Kelvin=273.15`. If `T_s` and `T_inf` are given in [K], Kelvin
has to be set to `Kelvin=0`
flow_length : float, int
Specific flow length in [m]. Has to be calculated depending on the part
geometry. See function calc_flow_length() for further information.
Notes:
------
.. [1] VDI Wärmeatlas 2013, VDI-Gesellschaft Verfahrenstechnik und
Chemieingenieurwesen, Düsseldorf, Deutschland, p. 754
"""
# Rayleigh number according to VDI Wärmeatlas 2013 chapter F1
# eq (7), replacing kappa with kappa = ny/Pr (F1 eq (8)) and beta
# with 1/T_inf (F1 eq (2)):
return (
np.abs(T_s - T_inf)
* 9.81
* flow_length ** 3
* Pr
/ ((T_inf + Kelvin) * ny ** 2)
)
# %% CALCULATE MATERIAL PROPERTIES:
# ---> water
# calc density from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_rho_water(T, rho):
# 4th degree
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
rho[:] = (
999.88785713136213
+ 4.9604454990529602e-02 * T
- 7.4722666453460717e-03 * T ** 2
+ 4.1094484438154484e-05 * T ** 3
- 1.2915789546323614e-07 * T ** 4
)
# 3rd degree
# rho[:] = (1000.0614995891804 + 1.3246507417626112e-02*T
# - 5.8171082149854319e-03*T**2 + 1.5262905345518088e-05*T**3)
# calc density from celsius temperature AND RETURN the result:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def rho_water(T):
# 4th degree
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
999.88785713136213
+ 4.9604454990529602e-02 * T
- 7.4722666453460717e-03 * T ** 2
+ 4.1094484438154484e-05 * T ** 3
- 1.2915789546323614e-07 * T ** 4
)
# calc heat conduction from celsius temperature AND RETURN IT:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def lambda_water(T):
# 3rd degree (4th degree not sufficiently better)
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
5.6987912853229539e-01
+ 1.7878370402545738e-03 * T
- 5.9998217273879795e-06 * T ** 2
- 8.6964577115093407e-09 * T ** 3
)
# calc heat conduction from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_lambda_water(T, lam):
# 3rd degree (4th degree not sufficiently better)
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
lam[:] = (
5.6987912853229539e-01
+ 1.7878370402545738e-03 * T
- 5.9998217273879795e-06 * T ** 2
- 8.6964577115093407e-09 * T ** 3
)
# calc specific heat capacity from celsius temperature (4th degree, about 10%
# slower but a tiny bit more accurate):
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_cp_water(T, cp):
# 4th degree
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
cp[:] = (
4215.4023574179992
- 2.8853943283519348 * T
+ 7.490580684801168e-02 * T ** 2
- 7.7807143441700321e-04 * T ** 3
+ 3.2109328970410432e-06 * T ** 4
)
# 3rd degree
# cp[:] = (4211.0855150125581 - 1.9815167178349438*T
# + 3.375770177242976e-02*T**2 - 1.3588485500876595e-04*T**3)
# calc specific heat capacity from celsius temperature AND RETURN IT
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def cp_water(T):
# 4th degree
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
4215.4023574179992
- 2.8853943283519348 * T
+ 7.490580684801168e-02 * T ** 2
- 7.7807143441700321e-04 * T ** 3
+ 3.2109328970410432e-06 * T ** 4
)
# calc kinematic viscosity from celsius temperature after VDI Wärmeatlas 2013
# table D2.1:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_ny_water(T, ny):
# 4th degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
ny[:] = (
1.7764473380494155e-06
- 5.5640275781265404e-08 * T
+ 1.0243072887494426e-09 * T ** 2
- 9.7954460136981165e-12 * T ** 3
+ 3.6460468745062724e-14 * T ** 4
)
# calc kinematic viscosity from celsius temperature AND RETURN IT, VDI
# Wärmeatlas 2013 table D2.1:
@njit(nogil=GLOB_NOGIL, cache=True)
def ny_water(T):
# 4th degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
1.7764473380494155e-06
- 5.5640275781265404e-08 * T
+ 1.0243072887494426e-09 * T ** 2
- 9.7954460136981165e-12 * T ** 3
+ 3.6460468745062724e-14 * T ** 4
)
# calc Prandtl number from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_Pr_water(T, Pr):
# 4th degree:
# Pr[:] = (12.909891117064289 - 0.4207372206483363*T
# + 7.4860282126284405e-03*T**2 - 6.854571430021334e-05*T**3
# + 2.4685760188512201e-07*T**4)
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
Pr[:] = (
12.5780108199379058
- 0.35124680571767508 * T
+ 4.3225480444706085e-03 * T ** 2
- 1.9174193923188898e-05 * T ** 3
)
# calc Prandtl number from celsius temperature AND RETURN IT
# (alot faster for single values):
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def Pr_water_return(T):
# 4th degree:
# Pr[:] = (12.909891117064289 - 0.4207372206483363*T
# + 7.4860282126284405e-03*T**2 - 6.854571430021334e-05*T**3
# + 2.4685760188512201e-07*T**4)
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
12.5780108199379058
- 0.35124680571767508 * T
+ 4.3225480444706085e-03 * T ** 2
- 1.9174193923188898e-05 * T ** 3
)
# calc isobaric expansion coefficient in [1/K] from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_beta_water(T, beta):
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
beta[:] = (
-5.87985364766666e-05
+ 1.5641955219950547e-05 * T
- 1.3587684743777981e-07 * T ** 2
+ 6.1220503308149086e-10 * T ** 3
)
# calc isobaric expansion coefficient in [1/K] from celsius temperature
# AND RETURN IT:
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def beta_water_return(T):
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
-5.87985364766666e-05
+ 1.5641955219950547e-05 * T
- 1.3587684743777981e-07 * T ** 2
+ 6.1220503308149086e-10 * T ** 3
)
# calc Reynolds number
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_Re_water(v, L, ny, Re):
Re[:] = np.abs(v) * L / ny
# calc Reynolds number and RETURN the result
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def Re_water_return(v, L, ny):
return np.abs(v) * L / ny
# ---> dry air:
# calc density from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_rho_dryair(T, rho):
# 2nd degree
rho[:] = (
1.2767987012987012
- 0.0046968614718614701 * T
+ 1.4296536796536256e-05 * T ** 2
)
# calc heat conductivity from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_lam_dryair(T, lam):
# 2nd degree
lam[:] = (
0.024358670995670989
+ 7.6533982683982561e-05 * T
- 4.2099567099572201e-08 * T ** 2
)
# calc heat conductivity from celsius temperature and return it:
@njit(nogil=GLOB_NOGIL, cache=True)
def lam_dryair_return(T):
# 2nd degree
return (
0.024358670995670989
+ 7.6533982683982561e-05 * T
- 4.2099567099572201e-08 * T ** 2
)
# calc kinematic viscosity from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_ny_dryair(T, ny):
# 2nd degree
ny[:] = (
1.3500069264069257e-05
+ 8.8810389610389459e-08 * T
+ 1.0974025974025443e-10 * T ** 2
)
# calc kinematic viscosity from celsius temperature and return it:
@njit(nogil=GLOB_NOGIL, cache=True)
def ny_dryair_return(T):
# 2nd degree
return (
1.3500069264069257e-05
+ 8.8810389610389459e-08 * T
+ 1.0974025974025443e-10 * T ** 2
)
# ---> humid air:
# saturation pressure in [Pa] of humid air for total pressures < 2MPa
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def humid_air_saturation_pressure(T):
# 6th degree
return (
+1.56927617e-09 * T ** 6
+ 2.32760367e-06 * T ** 5
+ 3.19028425e-04 * T ** 4
+ 2.51824584e-02 * T ** 3
+ 1.42489091e00 * T ** 2
+ 4.55277840e01 * T ** 1
+ 5.99770272e02
)
# 10th degree
# return (- 1.30339138e-16*T**10 + 7.49527386e-14*T**9 - 1.59881730e-11*T**8
# + 1.54764869e-09*T**7 - 5.56609536e-08*T**6 + 1.46597641e-06*T**5
# + 4.21883898e-04*T**4 + 2.43290034e-02*T**3 + 1.38204573e+00*T**2
# + 4.58581434e+01*T + 6.02909924e+02)
# mass of water in fully saturated air in [kg H2O / kg Air] for a pressure of
# 0.1 MPa, only valid for -30 <= T <= 80 !
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def humid_air_sat_water_mass(T):
r"""
Calculate the mass of water in fully saturated air (at 100% relative
humidity) in :math:`[f]= \mathtt{kg_{H_2O}}/\mathtt{kg_{Luft}}`,
valid for a pressure of :math:`0.1\mathtt{\,MPa}` and a temperature range
of :math:`-30\mathtt{\,°C}\leq T \leq 80\mathtt{\,°C}`.
"""
# assert np.all(-30 <= T) and np.all(T <= 80)
# 6th degree
# return (1.56927617e-09*T**6 + 2.32760367e-06*T**5 + 3.19028425e-04*T**4
# + 2.51824584e-02*T**3 + 1.42489091e+00*T**2 + 4.55277840e+01*T
# + 5.99770272e+02)
# 10th degree
return (
+3.47491188e-19 * T ** 10
- 6.50956001e-17 * T ** 9
+ 3.68271647e-15 * T ** 8
+ 2.06252891e-14 * T ** 7
- 7.11474217e-12 * T ** 6
+ 1.29052920e-10 * T ** 5
+ 6.62755505e-09 * T ** 4
+ 8.79652019e-08 * T ** 3
+ 8.16034548e-06 * T ** 2
+ 2.98380899e-04 * T
+ 3.79413965e-03
)
# %% part shape specific calculations:
def calc_flow_length(*, part_shape, vertical, **kwargs):
"""
Calculate the shape specific flow length of a part for the calculation
of heat-transfer specific numbers, like the Rayleigh number.
"""
err_str = (
'`part_shape=' + str(part_shape) + '` was passed to '
'`calc_flow_length()`. The following shapes are supported:\n'
'\'plane\', \'cylinder\', \'sphere\'.'
)
assert part_shape in ['plane', 'cylinder', 'sphere'], err_str
err_str = (
'`vertical=' + str(vertical) + '` was passed to '
'`calc_flow_length()`. `vertical` must be a bool value, '
'depicting the orientation of the surface of which the flow '
'length shall be calculated. For a sphere this argument will '
'be ignored.'
)
assert type(vertical) == bool, err_str
err_str_len = (
'The part shape specific length parameters to be passed to '
'`calc_flow_length()` depend on the part\'s shape and '
'orientation. The following parameters are needed to calculate '
'the flow length for each shape:\n'
' plane, vertical=True: `height=X`\n'
' plane, vertical=False (horizontal): `width=X`, `depth=Y`. '
'Pass the diameter as value for width and depth for a circular '
'disk.\n'
' cylinder, vertical=True: `height=X`\n'
' cylinder, vertical=False (horizontal): `diameter=X`\n'
' sphere: `diameter=X`'
)
if part_shape in ('plane', 'cylinder') and vertical:
assert 'height' in kwargs and isinstance(
kwargs['height'], (int, float)
), err_str_len
return kwargs['height'] # VDI Wärmeatlas 2013, F2.1
elif part_shape == 'plane' and not vertical:
# VDI Wärmeatlas 2013, F2.3
assert 'width' in kwargs and isinstance(
kwargs['width'], (int, float)
), err_str_len
assert 'depth' in kwargs and isinstance(
kwargs['depth'], (int, float)
), err_str_len
return (kwargs['width'] * kwargs['depth']) / (
2 * (kwargs['width'] + kwargs['depth'])
)
elif part_shape == 'cylinder' and not vertical:
assert 'diameter' in kwargs and isinstance(
kwargs['diameter'], (int, float)
), err_str_len
return kwargs['diameter'] * np.pi / 2 # VDI Wärmeatlas 2013, F2.4.1
else:
assert 'diameter' in kwargs and isinstance(
kwargs['diameter'], (int, float)
), err_str_len
return kwargs['diameter'] # VDI Wärmeatlas 2013, F2.4.2
# caller to calculate Reynolds number for a round pipe/TES:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def pipe_get_Re(dm, rho, ny, A, d_i, Re):
get_Re_water(dm / (rho * A), d_i, ny, Re)
# manual inlining function to calculate Reynolds number for a round pipe/TES:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def pipe_get_Re2(dm, rho, ny, A, d_i, Re):
Re[:] = dm * d_i / (rho * A * ny)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def pipe_alpha_i(dm, T, rho, ny, lam_fld, A, d_i, x, alpha):
"""
Calculates the inner alpha value in [W/(m**2K)] between the fluid inside a
pipe and the pipe wall for each cell of a round pipe or thermal energy
storage of diameter `d_i` and length `Len`.
In this case, the wall is considererd in the same row of the temperature
array as the fluid and thus can't have temperatures different from the
fluid temperature.
Parameters:
-----------
dm : np.ndarray, float, integer
Massflow in the pipe/TES in [kg/s].
rho : np.ndarray
Fluid density in [kg/m**3].
ny : np.ndarray
Fluid kinematic viscosity in [m**2/s].
lam_fld : np.ndarray
Fluid heat conductivity in [W/(mK)].
A : float, integer
Inner pipe cross section in [m**2] for round pipes or hydraulic cross
section for pipes of other shapes.
d_i : float, integer
Inner pipe diameter in [m] for round pipes or hydraulic diameter for
pipes of other shapes.
x : float, integer
Distance of cell from start of the pipe [m]. If the massflow `dm` is
negative, the inverse (the distance from the other end of the pipe) is
taken.
alpha : np.ndarray
Array to save the resulting alpha value in [W/(m**2K)] for all cells
of the pipe/TES.
"""
# save shape:
shape = rho.shape
# shape = (100,)
# preallocate arrays:
Re = np.zeros(shape)
# Pr = np.zeros((100,))
Pr_f = np.zeros(T.shape)
Nu = np.zeros(shape)
# get Reynolds and Prandtl number:
get_Re_water(dm / (rho * A), d_i, ny, Re)
get_Pr_water(T, Pr_f)
# get Peclet number to replace calculations of Re*Pr
Pe = Re * Pr_f
# use reversed x if first cell of dm is negative (this applies only to
# parts where the massflow is the same in all cells, since these are the
# only cells with a cell-specific x-array and a single-cell-massflow. For
# all other parts, this reversing does not change anything):
if dm[0] < 0:
xi = x[::-1] # create reversed view
else:
xi = x[:] # create view
# get a mask for the turbulent flows:
turb = Re > 2300
# equations for laminar Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 3.1.1 equation (3), (1) and (2)
Nu[~turb] = (
49.371 # 49.371 is 3.66**3 of eq (1) + 0.7**3 of eq (3)
+ (1.077 * (Pe[~turb] * d_i / xi[~turb]) ** (1 / 3) - 0.7)
** 3 # eq (2)
) ** (1 / 3)
# equations for turbulent Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 4.1 equations (27) and (28):
f = (1.8 * np.log10(Re[turb]) - 1.5) ** (-2)
Nu[turb] = (
(f / 8 * Pe[turb])
/ (1 + 12.7 * (Pr_f[turb] ** (2 / 3) - 1) * (f / 8) ** (0.5))
* (1 + (d_i / xi[turb]) ** (2 / 3) / 3)
)
# alpha value is Nusselt number * fluid lambda / d_i,
# VDI Wärmeatlas 2013, Chapter G1 - 4.1:
alpha[:] = Nu * lam_fld / d_i
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def pipe_alpha_i_wll_sep(dm, T, rho, ny, lam_fld, A, d_i, x, alpha):
"""
Calculates the inner alpha value in [W/(m**2K)] between the fluid inside a
pipe and the pipe wall for each cell of a round pipe or thermal energy
storage of diameter `d_i` and length `Len`.
In this case, the wall is considererd in a separate row of the temperature
array and can thus have temperatures different from the fluid temperature.
Parameters:
-----------
dm : np.ndarray, float, integer
Massflow in the pipe/TES in [kg/s].
rho : np.ndarray
Fluid density in [kg/m**3].
ny : np.ndarray
Fluid kinematic viscosity in [m**2/s].
lam_fld : np.ndarray
Fluid heat conductivity in [W/(mK)].
A : float, integer
Inner pipe cross section in [m**2] for round pipes or hydraulic cross
section for pipes of other shapes.
d_i : float, integer
Inner pipe diameter in [m] for round pipes or hydraulic diameter for
pipes of other shapes.
Len : float, integer
Total pipe length in [m].
alpha : np.ndarray
Array to save the resulting alpha value in [W/(m**2K)] for all cells
of the pipe/TES.
"""
# save shape:
shape = rho.shape
# shape = (100,)
# preallocate arrays:
Re = np.zeros(shape)
# Pr = np.zeros((100,))
Pr = np.zeros(T.shape)
Nu = np.zeros(shape)
# get Reynolds and Prandtl number:
get_Re_water(dm / (rho * A), d_i, ny, Re)
get_Pr_water(T, Pr)
# get correction factor for the difference in wall and fluid temperature
# following VDI Wärmeatlas 2013, Chapter G1 - 3.1.3 equation (13):
K = (Pr[:, 0] / Pr[:, 1]) ** 0.11
# save Prandtl number of first row (fluid row) to array for fluid Pr number
Pr_f = Pr[:, 0]
# get Peclet number to replace calculations of Re*Pr
Pe = Re * Pr_f
# use reversed x if first cell of dm is negative (this applies only to
# parts where the massflow is the same in all cells, since these are the
# only cells with a cell-specific x-array and a single-cell-massflow. For
# all other parts, this reversing does not change anything):
if dm[0] < 0:
xi = x[::-1] # create reversed view
else:
xi = x[:] # create view
# get a mask for the turbulent flows:
turb = Re > 2300
# equations for laminar Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 3.1.1 equation (3), (1) and (2)
Nu[~turb] = (
49.371 # 49.371 is 3.66**3 of eq (1) + 0.7**3 of eq (3)
+ (1.077 * (Pe[~turb] * d_i / xi[~turb]) ** (1 / 3) - 0.7)
** 3 # eq (2)
) ** (1 / 3)
# equations for turbulent Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 4.1 equations (27) and (28):
f = (1.8 * np.log10(Re[turb]) - 1.5) ** (-2)
Nu[turb] = (
(f / 8 * Pe[turb])
/ (1 + 12.7 * (Pr_f[turb] ** (2 / 3) - 1) * (f / 8) ** (0.5))
* (1 + (d_i / xi[turb]) ** (2 / 3) / 3)
)
# alpha value is Nusselt number * correction factor * fluid lambda / d_i,
# VDI Wärmeatlas 2013, Chapter G1 - 4.1:
alpha[:] = Nu * K * lam_fld / d_i
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def phex_alpha_i_wll_sep(
dm, T_fld, T_wll, rho, ny, lam_fld, A, d_h, x, corr_Re, alpha
):
"""
Calculates the inner alpha value in [W/(m**2K)] between the fluid inside a
plate heat exchanger and the (rectangular) heat exchanger channel wall for
each cell of a plate heat exchanger.
In this case, the wall is considererd in a separate row of the temperature
array and can thus have temperatures different from the fluid temperature.
Parameters:
-----------
dm : np.ndarray, float, integer
Massflow in the pipe/TES in [kg/s].
rho : np.ndarray
Fluid density in [kg/m**3].
ny : np.ndarray
Fluid kinematic viscosity in [m**2/s].
lam_fld : np.ndarray
Fluid heat conductivity in [W/(mK)].
A : float, integer
Inner pipe cross section in [m**2] for round pipes or hydraulic cross
section (fluid area perpendicular to the flow direction) for pipes of
other shapes.
d_i : float, integer
Inner pipe diameter in [m] for round pipes or hydraulic diameter for
pipes of other shapes.
x : float, integer
Total plate heat exchanger length in [m].
alpha : np.ndarray
Array to save the resulting alpha value in [W/(m**2K)] for all cells
of the pipe/TES.
"""
# save shape:
# shape = rho.shape
shape = alpha.shape
# preallocate arrays:
Re = np.zeros(shape) # not needed, since for a pipe/hex this is a scalar
# Pr = np.zeros((100,))
Nu = np.zeros(shape)
# get Reynolds and Prandtl number:
get_Re_water(dm / (rho * A), d_h, ny, Re) # hydraulic diameter as length!
# Re = Re_water_return(dm / (rho * A), d_h, ny) # hydraulic diameter as len!
Pr_f = Pr_water_return(T_fld)
Pr_wll = Pr_water_return(T_wll)
# apply correction difference for turbulators on Reynolds number:
Re += corr_Re # [0]
# get correction factor for the difference in wall and fluid temperature
# following VDI Wärmeatlas 2013, Chapter G1 - 3.1.3 equation (13):
K = (Pr_f / Pr_wll) ** 0.11
# get Peclet number to replace calculations of Re*Pr
Pe = Re * Pr_f
# get a mask for the turbulent flows:
turb = Re > 2300
# equations for mean laminar Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 3.1.2 equation (12) with (4), (5) and (11)
Pe_dx = Pe[~turb] * d_h / x # precalculate this
Nu[~turb] = (
49.371 # 49.371 is 3.66**3 of eq (4) + 0.7**3 of eq (12)
+ (1.615 * (Pe_dx) ** (1 / 3) - 0.7) ** 3 # equation (5)
+ ((2 / (1 + 22 * Pr_f)) ** (1 / 6) * (Pe_dx) ** 0.5) ** 3 # eq(11)
) ** (1 / 3)
# equations for mean turbulent Nusselt number following VDI Wärmeatlas
# 2013 Chapter G1 - 4.1 equations (27) and (26):
f = (1.8 * np.log10(Re[turb]) - 1.5) ** (-2)
Nu[turb] = (
(f / 8 * Pe[turb])
/ (1 + 12.7 * (Pr_f ** (2 / 3) - 1) * (f / 8) ** (0.5))
* (1 + (d_h / x) ** (2 / 3))
)
# alpha value is Nusselt number * correction factor * fluid lambda / d_i,
# VDI Wärmeatlas 2013, Chapter G1 - 4.1:
alpha[:] = Nu * K * lam_fld / d_h
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def phex_alpha_i_wll_sep_discretized(
dm, T_fld, T_wll, rho, ny, lam_fld, A, d_h, x, corr_Re, alpha
):
"""
Calculates the inner alpha value in [W/(m**2K)] between the fluid inside a
plate heat exchanger and the (rectangular) heat exchanger channel wall for
each cell of a plate heat exchanger.
In this case, the wall is considererd in a separate row of the temperature
array and can thus have temperatures different from the fluid temperature.
Parameters:
-----------
dm : np.ndarray, float, integer
Massflow in the pipe/TES in [kg/s].
rho : np.ndarray
Fluid density in [kg/m**3].
ny : np.ndarray
Fluid kinematic viscosity in [m**2/s].
lam_fld : np.ndarray
Fluid heat conductivity in [W/(mK)].
A : float, integer
Inner pipe cross section in [m**2] for round pipes or hydraulic cross
section (fluid area perpendicular to the flow direction) for pipes of
other shapes.
d_i : float, integer
Inner pipe diameter in [m] for round pipes or hydraulic diameter for
pipes of other shapes.
x : float, integer
Total plate heat exchanger length in [m].
alpha : np.ndarray
Array to save the resulting alpha value in [W/(m**2K)] for all cells
of the pipe/TES.
"""
# save shape:
# shape = rho.shape
shape = alpha.shape
# preallocate arrays:
Re = np.zeros(shape) # not needed, since for a pipe/hex this is a scalar
# Pr = np.zeros((100,))
Nu = np.zeros(shape)
# get Reynolds and Prandtl number:
get_Re_water(dm / (rho * A), d_h, ny, Re) # hydraulic diameter as length!
# Re = Re_water_return(dm / (rho * A), d_h, ny) # hydraulic diameter as len!
Pr_f = Pr_water_return(T_fld)
Pr_wll = Pr_water_return(T_wll)
# apply correction difference for turbulators on Reynolds number:
Re += corr_Re # [0]
# get correction factor for the difference in wall and fluid temperature
# following VDI Wärmeatlas 2013, Chapter G1 - 3.1.3 equation (13):
K = (Pr_f / Pr_wll) ** 0.11
# get Peclet number to replace calculations of Re*Pr
Pe = Re * Pr_f
# get a mask for the turbulent flows:
turb = Re > 2300
# equations for mean laminar Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 3.1.2 equation (12) with (4), (5) and (11)
Pe_dx = Pe[~turb] * d_h / x[~turb] # precalculate this
Nu[~turb] = (
49.371 # 49.371 is 3.66**3 of eq (4) + 0.7**3 of eq (12)
+ (1.615 * (Pe_dx) ** (1 / 3) - 0.7) ** 3 # equation (5)
+ ((2 / (1 + 22 * Pr_f[~turb])) ** (1 / 6) * (Pe_dx) ** 0.5)
** 3 # eq(11)
) ** (1 / 3)
# equations for mean turbulent Nusselt number following VDI Wärmeatlas
# 2013 Chapter G1 - 4.1 equations (27) and (26):
f = (1.8 * np.log10(Re[turb]) - 1.5) ** (-2)
Nu[turb] = (
(f / 8 * Pe[turb])
/ (1 + 12.7 * (Pr_f[turb] ** (2 / 3) - 1) * (f / 8) ** (0.5))
* (1 + (d_h / x[turb]) ** (2 / 3))
)
# alpha value is Nusselt number * correction factor * fluid lambda / d_i,
# VDI Wärmeatlas 2013, Chapter G1 - 4.1:
alpha[:] = Nu * K * lam_fld / d_h
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def cylinder_alpha_inf(T_s, T_inf, flow_length, vertical, r_total, alpha_inf):
"""
Calculates the outer alpha value in [W/(m**2K)], between the outer cylinder
wall and the fluid of the environment, of a cylinder in a standard
environment on the outer surface.
Parameters:
-----------
r_total : float, int
Total radius of the cylinder including wall and additional material
layer like insulation.
alpha_inf : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value for the
calculation of the current U*A-value. Thus this array is
T_inf : float, int, np.ndarray
Ambient temperature in [°C] or [K]. If given as array, it must be a
single cell!
flow_length : float, int
Equivalent low length of the horizontal pipe or vertical pipe/TES in
[m].
vertical : bool
Giving information if this pipe/TES is vertical or horizontal. If
vertical,
"""
# Kelvin temperature:
Kelvin = 273.15
# Prandtl number of DRY air is nearly constant and thus set to:
Pr = 0.708
# f_Pr = 0.347
# get mean temperature of wall and ambient air:
T_mean = (T_inf + T_s) / 2
# get kin. viscosity and lambda for mean temperature:
ny = np.zeros(T_mean.shape)
lam = np.zeros(T_mean.shape)
get_ny_dryair(T_mean, ny)
get_lam_dryair(T_mean, lam)
# get Rayleigh number according to VDI Wärmeatlas 2013 chapter F1
# eq (7), replacing kappa with kappa = ny/Pr (F1 eq (8)) and beta
# with 1/T_inf (F1 eq (2)):
Ra = (
np.abs(T_s - T_inf)
* 9.81
* flow_length ** 3
* Pr
/ ((T_inf + Kelvin) * ny ** 2)
)
# check if the cylinder is vertical or horizontal:
if vertical:
# get Prandtl number influence function for vertical surfaces according
# to VDI Wärmeatlas 2013 chapter F2 equation (2):
# f_Pr = (1 + (0.492 / Pr)**(9/16))**(-16/9) this is const for const Pr
f_Pr = 0.3466023585520853
# get the Nusselt number for a vertical cylinder by use of VDI
# Wärmeatlas 2013 chapter F2.1 eq(1) and eq(3):
Nu = (
0.825 + 0.387 * (Ra * f_Pr) ** (1 / 6)
) ** 2 + 0.435 * flow_length / (2 * r_total)
else:
# get Prandtl number influence function for horizontal cylinders
# according to VDI Wärmeatlas 2013 chapter F2.4 equation (13):
# f_Pr = (1 + (0.559 / Pr)**(9/16))**(-16/9) this is const for const Pr
f_Pr = 0.3269207911296459
# get the Nusselt number for a horizontal cylinder by use of VDI
# Wärmeatlas 2013 chapter F2.4 eq(11):
Nu = (0.752 + 0.387 * (Ra * f_Pr) ** (1 / 6)) ** 2
# get alpha:
alpha_inf[:] = Nu * lam / flow_length
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def plane_alpha_inf(T_s, T_inf, flow_length, vertical, top):
"""
Calculates the outer alpha value in [W/(m**2K)], between the outer cylinder
wall and the fluid of the environment, of a cylinder in a standard
environment on the outer surface.
Parameters:
-----------
r_total : float, int
Total radius of the cylinder including wall and additional material
layer like insulation.
out : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value for the
calculation of the current U*A-value. Thus this array is
T_inf : float, int, np.ndarray
Ambient temperature in [°C] or [K]. If given as array, it must be a
single cell!
flow_length : float, int
Equivalent low length of the horizontal pipe or vertical pipe/TES in
[m].
vertical : bool
Giving information if this pipe/TES is vertical or horizontal. If
vertical,
"""
# check if the plane is vertical or horizontal:
if vertical:
return vert_plane_alpha_inf(T_s, T_inf, flow_length)
else:
return hor_plane_alpha_inf(T_s, T_inf, flow_length, top)
@njit(nogil=GLOB_NOGIL, cache=True)
def vert_plane_alpha_inf(T_s, T_inf, flow_length):
"""
Calculates the outer alpha value in [W/(m**2K)] between the vertical plane
surface wall and the fluid of the standard environment.
"""
# Kelvin temperature:
Kelvin = 273.15
# Prandtl number of DRY air is nearly constant and thus set to:
Pr = 0.708
# f_Pr = 0.347
# get mean temperature of wall and ambient air:
T_mean = (T_inf + T_s) / 2
# get kin. viscosity and lambda for mean temperature:
ny = np.zeros(T_mean.shape)
lam = np.zeros(T_mean.shape)
get_ny_dryair(T_mean, ny)
get_lam_dryair(T_mean, lam)
# # get Rayleigh number according to VDI Wärmeatlas 2013 chapter F1
# # eq (7), replacing kappa with kappa = ny/Pr (F1 eq (8)) and beta
# # with 1/T_inf (F1 eq (2)):
# Ra = ((T_s - T_inf) * 9.81 * flow_length**3 * Pr
# / ((T_inf + Kelvin) * ny**2))
# get Prandtl number influence function for vertical surfaces according
# to VDI Wärmeatlas 2013 chapter F2.1 equation (2):
# f_Pr = (1 + (0.492 / Pr)**(9/16))**(-16/9) this is const for const Pr
f_Pr = 0.3466023585520853
# get the Nusselt number for a vertical cylinder by use of VDI
# Wärmeatlas 2013 chapter F2.1 eq(1):
Nu = (
0.825
+ 0.387
* (rayleigh_number(T_s, T_inf, Pr, ny, Kelvin, flow_length) * f_Pr)
** (1 / 6)
) ** 2
# get alpha:
return Nu * lam / flow_length
# @njit(float64(float64, float64, float64, nb.boolean),
@njit(nogil=GLOB_NOGIL, cache=True)
def hor_plane_alpha_inf(T_s, T_inf, flow_length, top):
"""
Calculates the outer alpha value in [W/(m**2K)] between the plane surface
wall and the fluid of the standard environment of a horizontal plane.
This is only implemented for single scalar values! Not to be used with
arrays!
This is a reST style.
:param param1: this is a first param
:param param2: this is a second param
:returns: this is a description of what is returned
:raises keyError: raises an exception :math:`a=b`
"""
# Kelvin temperature:
Kelvin = 273.15
# Prandtl number of DRY air is nearly constant and thus set to:
Pr = 0.708
# f_Pr = 0.347
# get mean temperature of wall and ambient air:
T_mean = (T_inf + T_s) / 2
# get kin. viscosity and lambda for mean temperature:
ny = ny_dryair_return(T_mean)
lam = lam_dryair_return(T_mean)
Nu = np.empty(T_mean.shape)
Ra = rayleigh_number( # get Rayleigh-number:
T_s, T_inf, Pr, ny, Kelvin, flow_length
)
# calculation following VDI Wärmeatlas 2013
for i in range(T_s.shape[0]):
if (top[i] and T_s[i] >= T_inf) or (not top[i] and T_s[i] < T_inf):
# VDI F2.3.1
# heat conduction from the top of the plate to fluid OR from the fluid
# to the bottom of the plate
# get Prandtl number influence function for hor. surfaces according
# to VDI Wärmeatlas 2013 chapter F2.3.1 equation (9):
# f_Pr = (1 + (0.322 / Pr)**(11/20))**(-20/11)this is const for const Pr
f_Pr = 0.40306002707296223
# Ra_f_Pr = rayleigh_number( # get Ra*f_Pr for turbulence check
# T_s[i], T_inf, Pr, ny[i], Kelvin, flow_length) * f_Pr
Ra_f_Pr = Ra[i] * f_Pr
# get the Nusselt number for a hor. plane, VDI Wärmeatlas 2013:
if Ra_f_Pr <= 7e4: # laminar flow
Nu[i] = 0.766 * (Ra_f_Pr) ** (1 / 5) # VDI F2.3.1 eq (7)
else: # turbulent flow
Nu[i] = 0.15 * (Ra_f_Pr) ** (1 / 3) # VDI F2.3.1 eq (8)
else: # VDI F2.3.2
# heat conduction from the fluid to the top of the plate OR from the
# bottom of the plate to the fluid
# get Prandtl number influence function for vertical surfaces according
# to VDI Wärmeatlas 2013 chapter F2.1 equation (2):
# f_Pr = (1 + (0.492 / Pr)**(9/16))**(-16/9) this is const for const Pr
f_Pr = 0.3466023585520853
# Ra_f_Pr = rayleigh_number( # get Ra*f_Pr
# T_s[i], T_inf, Pr, ny[i], Kelvin, flow_length) * f_Pr
# Ra_f_Pr = Ra[i] * f_Pr
# get Nusselt number, only valid for 1e3 <= Ra*f_Pr <= 1e10, but there
# is no known correlation for turbulent convection!
Nu[i] = 0.6 * (Ra[i] * f_Pr) ** (1 / 5) # VDI F2.3.2 eq (10)
# return alpha:
return Nu * lam / flow_length
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_ins_amb_cyl(
A_i, r_ln_wll, r_ln_ins, r_rins, alpha_i, alpha_inf, lam_wll, lam_ins, out
):
"""
Calculates the U*A-value for the heat flow to or from the fluid inside a
cylinder like a pipe or a round TES to or from the ambient in radial
direction.
Layers which are considered: fluid, wall material, insulation or any
other additional material layer, ambient.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_ln_wll : float, int
Radial thickness factor of the wall heat conductivity referred to the
reference area. Must be pre-calculated with:
r_ln_wll = r_i * np.log(r_o / r_i)
r_ln_ins : float, int
Radial thickness factor of the insulation heat conductivity referred to
the reference area. Must be pre-calculated with:
r_ln_ins = r_i * np.log((r_o + s_ins) / r_o)
r_rins : float, int
Radial thickness factor of the insulation-to-ambient heat transfer
coefficient referred to the reference area. Must be pre-calculated
with:
r_rins = r_i / (r_o + s_ins)
alpha_i : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the fluid inside the
pipe and the wall. The shape must equal the fluid temperature array
shape, if given as array.
alpha_inf : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value for the
calculation of the current U*A-value. Thus this array is
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
lam_ins : int, float, np.ndarray
Outer material layer heat conductivity in [W / (mK)]. The shape must
equal the fluid temperature array shape, if given as array.
out : float, int, np.ndarray
Total heat transfer coefficient in [W/K] result output array.
"""
out[:] = A_i / (
1 / alpha_i
+ r_ln_wll / lam_wll
+ r_ln_ins / lam_ins
+ r_rins / alpha_inf
)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_amb_cyl(A_i, r_ln_wll, r_ro, alpha_i, alpha_inf, lam_wll, UA):
"""
Calculates the U*A-value for the heat flow to or from the fluid inside a
cylinder like a pipe or a round TES to or from the ambient in radial
direction.
Layers which are considered: fluid, wall material, ambient.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_ln_wll : float, int
Radial thickness factor of the wall heat conductivity referred to the
reference area. Must be pre-calculated with:
r_ln_wll = r_i * np.log(r_o / r_i)
r_ro : float, int
Radial thickness factor of the wall-to-ambient heat transfer
coefficient referred to the reference area. Must be pre-calculated
with:
r_ro = r_i / r_o
alpha_i : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the fluid inside the
pipe and the wall. The shape must equal the fluid temperature array
shape, if given as array.
alpha_inf : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape,
if given as array.
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
"""
UA[:] = A_i / (1 / alpha_i + r_ln_wll / lam_wll + r_ro / alpha_inf)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_cyl(A_i, r_i, r_o, alpha_i, lam_wll, UA):
"""
Calculates the U*A-value for the heat flow to or from the fluid inside a
cylinder like a pipe or a round TES to or from the wall in radial
direction. The wall is considered as a single finite volume element per
cell, thus the heat flow is calculated to the mid-point (radius wise, not
mass wise, thus r_mid = (r_o + r_i) / 2) of the wall.
Layers which are considered: fluid, wall material.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_i : float, int
Radius in [m] of the fluid-wall-contact-area.
r_o : float, int
Radius in [m] of the outer wall surface.
alpha_i : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the fluid inside the
pipe and the wall. The shape must equal the fluid temperature array
shape, if given as array.
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
"""
# the log mean outer diameter is taken for length of lam_wll:
# np.log((r_o / r_i + 1) / 2) = np.log((r_o + r_i)/ 2 / r_i)
# with r_wll = (r_o + r_i) / 2
print('UA_fld_wll -> replace np.log with const!')
UA[:] = A_i / (1 / alpha_i + r_i * np.log((r_o / r_i + 1) / 2) / lam_wll)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_wll_ins_amb_cyl(
A_i, r_i, r_o, r_ln_ins, r_rins, alpha_inf, lam_wll, lam_ins, UA
):
"""
Calculates the U*A-value for the heat flow to or from the wall of a
cylinder like a pipe or a round TES to or from the ambient in radial
direction. The wall is considered as a single finite volume element per
cell, thus the heat flow is calculated from/to the mid-point of the wall.
Layers which are considered: wall material, insulation or any other
additional material layer, ambient.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_i : float, int
Radius in [m] of the fluid-wall-contact-area.
r_o : float, int
Radius in [m] of the outer wall surface.
r_ln_ins : float, int
Radial thickness factor of the insulation heat conductivity referred to
the reference area. Must be pre-calculated with:
r_ln_ins = r_i * np.log((r_o + s_ins) / r_o)
r_rins : float, int
Radial thickness factor of the insulation-to-ambient heat transfer
coefficient referred to the reference area. Must be pre-calculated
with:
r_rins = r_i / (r_o + s_ins)
alpha_inf : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape,
if given as array.
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
lam_ins : int, float, np.ndarray
Outer material layer heat conductivity in [W / (mK)]. The shape must
equal the fluid temperature array shape, if given as array.
"""
# the log mean outer diameter is taken for length of lam_wll:
# np.log(2 / (r_i / r_o + 1)) = np.log(r_o * 2 / (r_o + r_i))
# with r_wll = (r_o + r_i) / 2
UA[:] = A_i / (
r_i * np.log(2 / (r_i / r_o + 1)) / lam_wll
+ r_ln_ins / lam_ins
+ r_rins / alpha_inf
)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_wll_amb_cyl(A_i, r_i, r_o, alpha_inf, lam_wll, UA):
"""
Calculates the U*A-value for the heat flow to or from the wall of a
cylinder like a pipe or a round TES to or from the ambient in radial
direction. The wall is considered as a single finite volume element per
cell, thus the heat flow is calculated to the mid-point of the wall.
Layers which are considered: wall material, ambient.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_i : float, int
Radius in [m] of the fluid-wall-contact-area.
r_o : float, int
Radius in [m] of the outer wall surface.
alpha_inf : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape,
if given as array.
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
"""
# the log mean outer diameter is taken for length of lam_wll:
# np.log(2 / (r_i / r_o + 1)) = np.log(r_o * 2 / (r_o + r_i))
# with r_wll = (r_o + r_i) / 2
UA[:] = A_i / (
r_i * np.log(2 / (r_i / r_o + 1)) / lam_wll + r_i / (r_o * alpha_inf)
)
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_plate(A, s_wll, alpha_fld, lam_wll):
"""
Calculates the U*A-value for the heat flow to or from a fluid at a plate
to or from the ambient.
Layers which are considered: fluid, wall material.
The reference area must always be the cross section area.
Parameters:
-----------
A : float, int
The fluid-wall-contact area in [m^2].
s_wll : float, int
Wall thickness in [m].
alpha_fld : int, float
Heat transfer coefficient in [W/(m^2K)] between the fluid and the wall.
lam_wll : int, float
Wall heat conductivity in [W/(mK)].
UA : np.ndarray
Result array where U*A in [W/K] will be saved to.
"""
return A / (1 / alpha_fld + s_wll / lam_wll)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_ins_amb_plate(
A, s_wll, s_ins, alpha_fld, alpha_inf, lam_wll, lam_ins
):
"""
Calculates the U*A-value for the heat flow to or from a fluid at a plate
with or without insulation to or from the ambient.
Layers which are considered: fluid, wall material, insulation, ambient.
The reference area must always be the cross section area.
Parameters:
-----------
A : float, int
The fluid-wall-contact area in [m^2].
s_wll : float, int
Wall thickness in [m].
s_ins : float, int
Insulation thickness in [m]. Can be zero.
alpha_fld : int, float
Heat transfer coefficient in [W/(m^2K)] between the fluid and the wall.
alpha_inf : int, float
Heat transfer coefficient in [W/(m^2K)] between the outer layer and
the ambient.
lam_wll : int, float
Wall heat conductivity in [W/(mK)].
lam_ins : int, float
Insulation heat conductivity in [W/(mK)].
lam_fld : int, float
Fluid heat conductivity in [W/(mK)].
UA : np.ndarray
Result array where U*A in [W/K] will be saved to.
"""
return A / (
1 / alpha_fld + s_wll / lam_wll + s_ins / lam_ins + 1 / alpha_inf
)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_wll_ins_amb_plate(A, s_wll, s_ins, lam_wll, lam_ins, alpha_inf):
"""
Calculates the U*A-value for the heat flow to or from a plate with or
without insulation to or from the ambient.
Layers which are considered: wall material, insulation, ambient.
The reference area must always be the cross section area.
Parameters:
-----------
A : float, int
The fluid-wall-contact area in [m^2].
s_wll : float, int
Wall thickness in [m].
s_ins : float, int
Insulation thickness in [m].
lam_wll : int, float
Wall heat conductivity in [W/(mK)].
lam_ins : int, float
Insulation heat conductivity in [W/(mK)].
alpha_inf : int, float
Heat transfer coefficient in [W/(m^2K)] between the insulation and the
ambient.
UA : np.ndarray
Result array where U*A in [W/K] will be saved to.
"""
return A / (s_wll / lam_wll + s_ins / lam_ins + 1 / alpha_inf)
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def surface_temp_steady_state_inplace(T, T_inf, A_s, alpha_inf, UA, T_s):
"""
Parameters:
-----------
A_s : float, int
The outer surface area (air-contact-area) PER CELL. Calculated with:
A_s = np.pi * r_s * 2 * grid_spacing
alpha_inf : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value for the
calculation of the current U*A-value. Thus this array is
UA : float, int, np.ndarray
Total heat transfer coefficient in [W/K].
T_inf : float, int, np.ndarray
Ambient temperature in [°C] or [K]. If given as array, it must be a
single cell!
"""
# get outer surface temperature, following WTP Formelsammlung Chapter 3.3
# with sigma = (T-T_inf) / (T_i - T_inf) instead of the constant heat
# production formula. This formula is only for steady state, thus an error
# will be incorporated. To get the outer layer temperature, the heatflow
# from the fluid through the pipe-wall (and insulation) to ambient is set
# equal with the heatflow from the outer surface (index o) to ambient:
# (T_s - T_inf) * alpha_inf * A_s = U * A_s * (T_i - T_inf)
# Since UA already incorporates the inner fluid-wall-contact-surface as
# reference area, alpha_inf needs to be adjusted by its area.
T_s[:] = T_inf + (T - T_inf) * UA / (alpha_inf * A_s)
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def surface_temp_steady_state(T, T_inf, A_s, alpha_inf, UA):
"""
Parameters:
-----------
A_s : float, int
The outer surface area (air-contact-area) PER CELL. Calculated with:
A_s = np.pi * r_s * 2 * grid_spacing
alpha_inf : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value for the
calculation of the current U*A-value. Thus this array is
UA : float, int, np.ndarray
Total heat transfer coefficient in [W/K].
T_inf : float, int, np.ndarray
Ambient temperature in [°C] or [K]. If given as array, it must be a
single cell!
"""
# get outer surface temperature, following WTP Formelsammlung Chapter 3.3
# with sigma = (T-T_inf) / (T_i - T_inf) instead of the constant heat
# production formula. This formula is only for steady state, thus an error
# will be incorporated. To get the outer layer temperature, the heatflow
# from the fluid through the pipe-wall (and insulation) to ambient is set
# equal with the heatflow from the outer surface (index o) to ambient:
# (T_s - T_inf) * alpha_inf * A_s = U * A_s * (T_i - T_inf)
# Since UA already incorporates the inner fluid-wall-contact-surface as
# reference area, alpha_inf needs to be adjusted by its area.
return T_inf + (T - T_inf) * UA / (alpha_inf * A_s)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def series_circuit_UA(*args):
"""
Calculates the total U*A-value for a series circuit of two or more U*A
values.
Parameters:
-----------
UA : float, int, np.ndarray
U*A value (heat conductivity) in [W/K] for each part of the series
circuit. If given as np.ndarray, all arrays have to be of the same
shape.
Returns:
--------
UA_series : float, np.ndarray
Total U*A value (heat conductivity) in [W/K] of the series.
"""
UA_series = 1 / args[0] # get inverse of first value
arg_iter = iter(args) # make iterator out of args
next(arg_iter) # skip first entry since it is already taken
for arg in arg_iter: # iterate over the rest of args
UA_series += 1 / arg # sum up inverse values
return 1 / UA_series # return inverse of sum
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def parallel_circuit_UA(*args):
"""
Calculates the total U*A-value for a parallel circuit of two or more U*A
values.
Parameters:
-----------
UA : float, int, np.ndarray
U*A value (heat conductivity) in [W/K] for each part of the parallel
circuit. If given as np.ndarray, all arrays have to be of the same
shape.
Returns:
--------
UA_series : float, np.ndarray
Total U*A value (heat conductivity) in [W/K] of the parallel circuit.
"""
UA_parallel = args[0] # get first value
arg_iter = iter(args) # make iterator out of args
next(arg_iter) # skip first entry since it is already taken
for arg in arg_iter: # iterate over the rest of args
UA_parallel += arg # sum up values
return UA_parallel # return sum
# ---> GENERAL FUNCTIONS:
# logarithmic mean temperature difference:
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def log_mean_temp_diff(T_A_one, T_A_two, T_B_one, T_B_two):
"""
Calculate the logarithmic mean temperature difference (LMTD) of two fluid
streams `one` and `two` of a heat exchanger with two ends `A` and `B`.
Parameters:
-----------
T_A_one : float, int, np.array
Fluid temperature of stream one at end A.
T_A_two : float, int, np.array
Fluid temperature of stream two at end A.
T_B_one : float, int, np.array
Fluid temperature of stream one at end B.
T_B_two : float, int, np.array
Fluid temperature of stream two at end B.
"""
Delta_T_A = T_A_one - T_A_two
Delta_T_B = T_B_one - T_B_two
lmtd = (Delta_T_A - Delta_T_B) / (np.log(Delta_T_A / Delta_T_B))
return lmtd
# get simple moving average of the array x and N cells:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def moving_avg(x, N):
arr = np.zeros(x.shape[0] + 1)
arr[1:] = x
cumsum = np.cumsum(arr)
return (cumsum[N:] - cumsum[:-N]) / float(N)
# fill the edges of x to new_length, so that input x is placed in the middle
# of the output array. if the number of new cells is not even, the array is
# shifted one cell to the end:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def fill_edges(x, new_length):
old_length = x.shape[0] # get old length
residual = new_length - old_length # get difference in lengths
x_new = np.zeros(new_length) # create new array
start = residual // 2 + residual % 2 # get start point where to insert
x_new[start : start + old_length] = x # fill new array in the middle
x_new[:start] = x[0] # fill before start with first value
x_new[old_length + start :] = x[-1] # fill at end with last value
return x_new
# this function calls simple moving average on array x and N cells AND fills
# the edges with the last and first value:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def moving_avg_fill_edges(x, N):
return fill_edges(moving_avg(x, N), x.shape[0])
# get window weighted moving average over array x with window weight wght and
# the possibility to fill the edges with the last correct value to get an array
# of the same shape as x:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def weighted_moving_avg(x, wght, fill_edges=False):
# get number of cells to calculate average in each step and get total
# average array length:
N = wght.size
length = x.shape[0] - N + 1
# if edges shall be filled, create an array like the input array and calc.
# new starting point where the "real" moving average is starting:
if fill_edges:
wa_len = x.shape[0] # get length
residual = wa_len - length # calc. remaining edge points to be filled
start = residual // 2 + residual % 2 # calc. starting point
wa = np.zeros(wa_len) # create result array
else:
start = 0 # start at 0
wa = np.zeros(length) # create result array
# loop over array:
for i in range(length):
wa[i + start] = (x[i : i + N] * wght).sum() # calc weighted mean
# fill edges before start with first value and after end with last value
if fill_edges:
wa[:start] = wa[start]
wa[length + start :] = wa[i + start]
return wa
@nb.njit(parallel=GLOB_PARALLEL)
def root_finder(poly_coeff, roots):
"""
Finds the roots of a polynome for an array of root values `roots`.
This means that a polynome, given by its polynome coefficient array
`poly_coeff`, is reversed at each value of `roots`. A polynome defining
the saturated water mass in air for a given temperature, this returns the
Taupunkt temperature for a given water mass.
Since the results have a shape of n-1 for a polynome of degree n, the
results have to be filtered. This may be done in the following way:
>>> # set all imaginary dominated values to zero:
>>> rts_arr[np.abs(rts_arr.imag) > 1e-12] = 0.
>>> # set values above an upper and lower boundary to zero:
>>> rts_arr[rts_arr > 85] = 0.
>>> rts_arr[rts_arr < 10] = 0.
>>> # extract all non-zero values:
>>> rts_arr.real[rts_arr.real != 0]
>>> # check if the shape is correct, else use other imaginary and real
>>> # bounds for setting to zero:
>>> assert rts_arr.shape == roots.shape
Parameters:
poly_coeff : np.ndarray
Polynomial coefficients to be reversed. Should be given as
**dtype=np.complex128** to avoid typing errors.
roots : np.ndarray
Roots to solve the polynomial for.
"""
polcoeffs = poly_coeff.copy()
lin_coeff = polcoeffs[-1]
rts_arr = np.zeros(
(roots.shape[0], poly_coeff.shape[0] - 1), dtype=np.complex128
)
for i in nb.prange(roots.shape[0]):
polcoeffs[-1] = lin_coeff - roots[i]
rts_arr[i, :] = np.roots(polcoeffs)
return rts_arr
# %% Empiric relations, polynomes etc. for startup times, regression...
@nb.njit
def lim_growth(x, s, b0, k):
"""
Function for limited growth. Used in several fits, thus it is implemented
here as a raw function, which can be used in closures, inlining etc.
Parameters
----------
x : float, int, np.ndarray
x values of the growth function.
s : float, optional
Limit of the growth function.
b0 : float, optional
Starting value. Values of 0 are **NOT RECOMMENDED**.
k : float, optional
Curvature parameter.
Returns
-------
float, np.ndarray
Value at point `x`.
"""
return s - (s - b0) * k ** x
@nb.njit(cache=True)
def chp_startup_th(
time, s=1.0, b0=4.3715647889609857e-4, k=8.61423130773867e-3
):
"""
Thermal power output and/or efficiency factor during EC Power XRGi20
CHP Startup.
See auswertung_bhkw.chp_fits for generation of the fit.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the startup progress
shall be evaluated. 0 ist the CHP start time.
s : float, optional
Maximum power to reach, maximum modulation or efficiency.
If set to 1, will return the result as a fraction of 1, else the
absolute value will be returned. The default is 1..
b0 : float, optional
Starting value. Cannot be set to zero.
The default is 4.3715647889609857e-4.
k : float, optional
Curvature parameter. The default is 8.61423130773867e-3.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return s / (1 + (s / b0 - 1) * np.e ** (-k * s * time))
@nb.njit(cache=True)
def chp_startup_el(
time,
s=4.6611096613889975,
b0=-3.6832212021813398e-09,
k=0.9997484824090741,
):
"""
Electrical power output and/or efficiency factor during EC Power XRGi20
CHP Startup. Limited growth, close to linear growth, startup to full power
(99% of modulation) within 950 seconds was found to be matching
measurement data.
See auswertung_bhkw.chp_fits for generation of the fit.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the startup progress
shall be evaluated. 0 ist the CHP start time.
s : float, optional
Maximum power to reach, maximum modulation or efficiency.
If set to 1, will return the result as a fraction of 1, else the
absolute value will be returned. The default is 4.6611.
b0 : float, optional
Starting value. Cannot be set to zero.
The default is -3.68322e-9.
k : float, optional
Curvature parameter. The default is 0.9997484824090741.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return lim_growth(time, s, b0, k)
# return s - (s - b0) * k**time
@nb.njit(cache=True)
def chp_startup_gas(time):
"""
Gas power input and/or efficiency factor during EC Power XRGi20
CHP Startup. Compound of thermal and electrical startup factors, scaled by
the efficiencies given in the datasheet. With this, full gas power input
is reached 287s after startup. The resultung efficiency of a startup from
0 to 100% is 60%, including the extraction of remaining heat during
shutdown, the 0-1-0 efficiency is 69.1%.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the startup progress
shall be evaluated. 0 ist the CHP start time.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return chp_startup_el(time) / 0.32733 + chp_startup_th(time) / 0.6334
@nb.njit(cache=True)
def chp_thermal_power(
modulation, s=0.60275, b0=0.972917, k=3.5990506789130166
):
"""
Thermal power output and/or efficiency factor in dependence of the
electrical power modulation of an EC Power XRGi20 CHP plant.
Limited growth fit was found to be matching measurement data.
See auswertung_bhkw.chp_fits for generation of the fit.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the startup progress
shall be evaluated. 0 ist the CHP start time.
s : float, optional
Maximum power to reach, maximum modulation or efficiency.
If set to 1, will return the result as a fraction of 1, else the
absolute value will be returned. The default is 1..
b0 : float, optional
Starting value. Cannot be set to zero.
The default is 0.
k : float, optional
Duration until full power is reached parameter. The default is 960.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return lim_growth(modulation, s, b0, k)
# return s - (s - b0) * k**modulation
@nb.njit(cache=True)
def chp_gas_power(
modulation, s=-1.17, b0=0.995828402366862, k=1.9507547298681704
):
"""
Gas power input (**lower heating value**) in dependency of the
electrical power modulation of an EC Power XRGi20 CHP plant.
Limited growth fit was found to be matching measurement data.
See auswertung_bhkw.chp_fits for generation of the fit.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the startup progress
shall be evaluated. 0 ist the CHP start time.
s : float, optional
Maximum power to reach, maximum modulation or efficiency.
If set to 1, will return the result as a fraction of 1, else the
absolute value will be returned. The default is 1..
b0 : float, optional
Starting value. Cannot be set to zero.
The default is 0.
k : float, optional
Duration until full power is reached parameter. The default is 960.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return lim_growth(modulation, s, b0, k)
@nb.njit
def chp_shutdown_th(time, a=-1.2532286835042036e-09, b=927.5198588530006):
"""
Thermal power output/efficiency of a CHP plant.
Thermal power output and/or efficiency factor during EC Power XRGi20
CHP switchoff. Cubic fit chosen for the measurement data.
See auswertung_bhkw.chp_fits for generation of the fit.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the switchoff progress
shall be evaluated. 0 ist the CHP start time.
a : float, optional
Maximum power to reach, maximum modulation or efficiency.
If set to 1, will return the result as a fraction of 1, else the
absolute value will be returned. The default is 1..
b : float, optional
Slope parameter. The default is -1.035329352327848e-3.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return a * (time - b) ** 3
@nb.njit
def quad_get_c(x, dy, b, c_base):
"""Get c parameter for quad polynome for condensing hex."""
return c_base - dy / (2 * b)
@nb.njit
def quad_get_b(y0, a, c):
"""Get b parameter for quad polynome for condensing hex."""
return (np.expand_dims(y0, -1) - a) / c ** 2
@nb.njit
def condensing_hex_quad_poly(
X_pred,
int_comb_idx,
nvars_per_ftr, # polynomial transf.
pca_mean,
pca_components, # PCA transformation
lm_intercept,
lm_coef, # linear model transformation
dm_water_thresh=0.1,
dx=0.01,
):
"""
Calculate condensing HEX temperatures below the valid massflow threshold.
**ONLY VALID below the valid massflow range**, typically from 0-10% of the
maximum massflow.
Parameters
----------
dx : TYPE, optional
Delta x to determin slope from. The default is .01.
Returns
-------
None.
"""
# extract n samples:
n_samples = X_pred.shape[0]
# prediction arrays at the boundary and +dx for the slope
# extract and save massflow values:
dm_bkp = X_pred[:, 2:3].copy() # :3 extracts it as 2D arr and avoids resh.
X_pred_bc = np.vstack( # prediction x array at the boundary
(
X_pred[:, 0],
X_pred[:, 1],
np.full((X_pred.shape[0],), dm_water_thresh),
X_pred[:, 3],
)
).T
X_pred_dx = X_pred_bc.copy() # prediction x arr with dx for slope
X_pred_dx[:, 2] += dx
y0 = X_pred[:, 1] # extract fg entry temperature
# make predictions at dm_water_thresh, the boundary of the valid
# region
X_pf_bc = transform_to_poly_nb(
X_pred_bc, int_comb_idx, nvars_per_ftr, n_samples
)
X_PC_bc = transform_pca_nb(X_pf_bc, pca_mean, pca_components)
# predict
y_hat_bc = poly_tranfs_pred(X_PC_bc, lm_intercept, lm_coef)
# make predictions at dm_water_thresh+dx for generating the slope
X_pf_dx = transform_to_poly_nb(
X_pred_dx, int_comb_idx, nvars_per_ftr, n_samples
)
X_PC_dx = transform_pca_nb(X_pf_dx, pca_mean, pca_components)
# predict
y_hat_dx = poly_tranfs_pred(X_PC_dx, lm_intercept, lm_coef)
dy = (y_hat_bc - y_hat_dx) / dx # get the slopes
# set c to dm_water_thresh for the first iteration of both temperatures
c = np.array([[dm_water_thresh, dm_water_thresh]], dtype=np.float64) #
for i in range(1):
b = quad_get_b(y0=y0, a=y_hat_bc, c=c)
c = quad_get_c(x=dm_bkp, dy=dy, b=b, c_base=dm_water_thresh)
T_pred_below_thresh = y_hat_bc + b * (dm_bkp - dm_water_thresh) ** 2
return T_pred_below_thresh
# @njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_chp_core_modulation(
process_flows,
power_modulation,
T,
T_chp_in,
T_chp_in_max,
T_chp_in_max_emrgncy,
mod_lower,
min_on_time,
min_off_time,
max_ramp_el,
startup_in_progress,
shutdown_in_progress,
chp_state,
startup_at_time,
shutdown_at_time,
startup_duration,
shutdown_duration,
chp_on_perc,
remaining_heat,
bin_pow_fac,
startup_factor_th,
startup_factor_el,
shutdown_factor_th,
startuptsteps,
chp_off_perc,
dt_time_temp_exc,
max_temp_exc_time,
stepnum,
time_vec,
timestep,
):
"""
Process masssflows for parts with multiple flow channels.
Massflows are being processed for parts which have multiple separated flow
channels. The massflow in each flow channel must be invariant.
The massflow through ports in `dm_io` is aquired by update_FlowNet.
"""
# process flows is only executed ONCE per timestep, afterwards the bool
# process_flows is set to False.
if process_flows[0]: # only if flows not already processed
# get current elapsed time
curr_time = time_vec[stepnum[0] - 1] + timestep
# get state of the last step
state_last_step = chp_state[stepnum[0] - 1]
# check for modulation range and set on-off-integer:
if power_modulation[0] < mod_lower:
# binary power multiplication factor to enable off-state
# for modulations < mod_lower, f.i. to avoid modulations below
# 50%.
bin_pow_fac = 0.0
chp_on = False # chp is off
else:
bin_pow_fac = 1.0
chp_on = True # chp is on
# detect changes in the state to save start/stop times
if (state_last_step != 0.0) != chp_on:
if not chp_on: # if last step chp was on and now off
# assert that minimum run time is fullfilled. if not,
# avoid switching off by keeping min. modulation
if min_on_time > (curr_time - startup_at_time):
# if minimum run time not reached, set chp to on
bin_pow_fac = 1.0
chp_on = True
power_modulation[0] = mod_lower
else: # else allow shutdown
shutdown_at_time = curr_time # chp was shutdown
shutdown_in_progress = True
# print('shutdown at {0:.3f} s'.format(curr_time))
else: # if last step chp was off and now it is on
# assert that minimum off time is fulfilled AND
# (both ok -> OR statetment) inlet temp. is not exceeding
# max temp.. If any is True, avoid CHP startup
if (
(min_off_time > (curr_time - shutdown_at_time))
or (T_chp_in[0] > T_chp_in_max)
or np.any(T > T_chp_in_max_emrgncy)
):
# if minimum off time not reached or temperature too
# high, set chp to off
bin_pow_fac = 0.0
chp_on = False
power_modulation[0] = 0.0
else: # else allow switching on
startup_at_time = curr_time # chp was started
startup_in_progress = True
# print('start at {0:.3f} s'.format(curr_time))
elif chp_on:
# if CHP was on last step AND is on now, check for ramps
# get difference of modulation and absolute ramp per second
mod_diff = state_last_step - power_modulation[0]
mod_ramp_abs = np.abs(mod_diff) / timestep
# if absolute ramp is higher than max ramp, limit change to
# ramp
if mod_ramp_abs > max_ramp_el:
if mod_diff <= 0.0: # ramp up too fast
power_modulation[0] = ( # set ramp to max ramp
state_last_step + max_ramp_el * timestep
)
else: # ramp down too fast
power_modulation[0] = ( # set ramp to max ramp
state_last_step - max_ramp_el * timestep
)
# if chp is on, check if inlet temperature was exceeded or any
# temperature is above emergency shutdown temp., then shutdown
if chp_on and (
(T_chp_in[0] > T_chp_in_max) or np.any(T > T_chp_in_max_emrgncy)
):
# if max inlet temp. is exceeded, check max. allowed time for
# exceeding and if too large, shutdown CHP due to overtemp.,
# independend of min. run times and other parameters.
# also if inlet temp. is above an emergency threshold.
if (dt_time_temp_exc > max_temp_exc_time) or np.any(
T > T_chp_in_max_emrgncy
):
power_modulation[0] = 0.0
bin_pow_fac = 0.0
chp_on = False
shutdown_at_time = curr_time
shutdown_in_progress = True
# emergeny_shutdown = True
# print('emergency shutdown at {0:.3f} s'.format(curr_time))
else: # if timer not exceeded
# delta t how long the temp. has been exceeded. after the
# if-else check, since +timestep is at the end of the
# step, thus relevant for the next step.
dt_time_temp_exc += timestep
else: # else if temp. not exceeded, reset timer
dt_time_temp_exc = 0.0
# save chp state:
chp_state[stepnum[0]] = bin_pow_fac * power_modulation[0]
# process startup and shutdown procedure
# is the CHP switched on? If yes, startup time is larger than
# shutdown time.
if startup_at_time > shutdown_at_time:
# if chp shutdown was quite recent, thus heat is remaining
# -> shorten startup procedure
if shutdown_factor_th > chp_off_perc:
# if shutdown was recent, take the shutdown factor and
# look where in startup can be found. then add this
# timestep where it was found to the startup time
# (=increase startup duration) to account for remaining
# heat in the system
remaining_heat = np.argmin(
np.abs(startuptsteps - shutdown_factor_th)
)
# and reset shutdown factor to zero and set shutdown in
# progress False to avoid doing this twice:
shutdown_factor_th = 0.0
shutdown_in_progress = False
# get startup duration:
startup_duration = ( # on since
curr_time - startup_at_time + remaining_heat
)
# do factor calculations only, if startup not yet finished,
# else do nothing, since factors are already set to 1
if startup_in_progress:
# power multiplication factors:
startup_factor_th = chp_startup_th(startup_duration)
startup_factor_el = chp_startup_el(startup_duration)
# limit values to 0<=x<=1
startup_factor_th = (
0.0
if startup_factor_th < 0.0
else 1.0
if startup_factor_th > 1.0
else startup_factor_th
)
startup_factor_el = (
0.0
if startup_factor_el < 0.0
else 1.0
if startup_factor_el > 1.0
else startup_factor_el
)
# check if thermal startup is completed, else go on
if startup_factor_th > chp_on_perc:
# if thermal startup is completed, set all startups as
# completed
startup_in_progress = False
startup_factor_th = 1.0
startup_factor_el = 1.0
remaining_heat = 0.0
else: # if shutdown was more recent
shutdown_duration = curr_time - shutdown_at_time # off since
if shutdown_in_progress:
shutdown_factor_th = chp_shutdown_th(shutdown_duration)
if shutdown_factor_th < chp_off_perc:
# shutdown finished. reset values
shutdown_in_progress = False
shutdown_factor_th = 0.0
# return process flows bool to disable processing flows until next step
return (
bin_pow_fac,
startup_at_time,
shutdown_at_time,
startup_in_progress,
shutdown_in_progress,
startup_factor_th,
startup_factor_el,
shutdown_factor_th,
dt_time_temp_exc,
)
# %% Simulation environment functions:
@nb.njit
def predictor_step(diff_fun, args, h, y_prev):
return y_prev + h * diff_fun(*args, h)
@nb.njit
def solve_pred_loop(h, diff_funs, all_args, all_y_prev, interm_res):
ndiffs = len(diff_funs)
for n in range(ndiffs):
interm_res[n][:] = predictor_step(
diff_funs[n], all_args[n], h, all_y_prev[n]
).ravel()
return interm_res
@nb.jit(parallel=GLOB_PARALLEL)
def heun_predictor(_h, solve_num, parts, stepnum, i):
for part in solve_num:
# if first try, add last step's part truncation error to part:
if i == 0:
parts[part]._trnc_err += parts[part].__new_trnc_err
# get results from last timestep and pass them to
# current-timestep-temperature-array:
parts[part].T[:] = parts[part].res[stepnum - 1]
# calculate differential at result:
parts[part]._df0 = solve_num[part](_h)
# calculate and save predictor step:
parts[part].T[:] = parts[part].T + _h * parts[part]._df0
# %% regression based functions
def make_poly_transf_combs_for_nb(
mdl, n_features=None, pipeline=True, poly_step='polynomialfeatures'
):
"""Construct regressor combinations for polynomial."""
if pipeline:
# get polynomial features from pipeline
poly_feats = mdl.named_steps[poly_step]
else:
poly_feats = mdl
if n_features is None:
n_features = getattr(poly_feats, 'n_input_features_', None)
if n_features is None:
raise ValueError
# extract combinations as persisten tuple
cmbntns = tuple(
poly_feats._combinations(
n_features,
poly_feats.degree,
poly_feats.interaction_only,
poly_feats.include_bias,
)
)
# make to integer indexing array and fill with dummy false value
int_cmb_idx = (
np.zeros((len(cmbntns), len(cmbntns[-1])), dtype=np.int64) - 99
)
# create tuple with number of variables per combination
nvars_per_ftr = tuple([len(combo) for combo in cmbntns])
# make combinations tuple to integer index array, leaving blank cells
# filled with dummy value (which is ought to raise an error if called)
for i, c in enumerate(cmbntns):
int_cmb_idx[i, : nvars_per_ftr[i]] = c
return int_cmb_idx, nvars_per_ftr
def extract_pca_results(mdl, pipeline=True, pca_step='pca'):
"""Extract PCA result vectors/matrices for transformation."""
if pipeline:
# get polynomial features from pipeline
pca_mdl = mdl.named_steps[pca_step]
else:
pca_mdl = mdl
return pca_mdl.mean_, pca_mdl.components_
# the following 3 functions are numba compatible:
@nb.njit
def transform_to_poly_nb(X, int_comb_idx, nvars_per_ftr, n_samples):
"""Transform X vector to polynomial for predictions."""
XP = np.ones((n_samples, int_comb_idx.shape[0]), dtype=np.float64)
for n in range(XP.shape[0]):
for i in range(XP.shape[1]):
XP[n, i] = (
XP[n, i] * X[n][int_comb_idx[i, : nvars_per_ftr[i]]].prod()
)
return XP
@nb.njit
def transform_pca_nb(XP, pca_mean, pca_components):
"""Generate PCA transformation matrix."""
return np.dot(XP - pca_mean, pca_components.T)
@nb.njit
def poly_tranfs_pred(XP_pca_transf, intercept, coef):
"""Predict PCA transformed polynomial."""
return intercept + np.dot(XP_pca_transf, coef.T)
# %% root solvers to use in/with numba funtions
@nb.njit
def root_array_secant(func, x0, args, tol, maxiter):
"""
A vectorized version of Newton, Halley, and secant methods for arrays.
Do not use this method directly. This method is called from `newton`
when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
Taken and adapted from scipy.optimize.newton
This solver may be slower than the excplicit secant solver, but it is
stable and has a higher precision. In contrast
**This is the preferred solver for solving implicit differential
equations.**
"""
# Explicitly copy `x0` as `p` will be modified inplace, but the
# user's array should not be altered.
p = x0.copy()
# failures = np.ones_like(p).astype(bool)
# nz_der = np.ones_like(failures)
failures = p != -1234.4321 # bool array creation for numba
nz_der = failures.copy()
# print('using secant method')
# Secant method
dx = np.finfo(np.float64).eps ** 0.33
p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
q0 = func(p, *args)
q1 = func(p1, *args)
# active = np.ones_like(p, dtype=bool)
active = failures.copy()
for _ in range(maxiter):
nz_der = q1 != q0
# stop iterating if all derivatives are zero
if not nz_der.any():
p = (p1 + p) / 2.0
break
# Secant Step
dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
# only update nonzero derivatives
p[nz_der] = p1[nz_der] - dp
active_zero_der = ~nz_der & active
p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
active &= nz_der # don't assign zero derivatives again
failures[nz_der] = np.abs(dp) >= tol # not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
p1, p = p, p1
q0 = q1
q1 = func(p1, *args)
return p
@nb.njit
def root_array_newton(func, x0, fprime, args, tol, maxiter):
"""
A vectorized version of Newton, Halley, and secant methods for arrays.
Do not use this method directly. This method is called from `newton`
when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
Taken from scipy.optimize.newton.
Also accepts a derivative function in `fprime`.
"""
# Explicitly copy `x0` as `p` will be modified inplace, but the
# user's array should not be altered.
p = x0.copy()
# failures = np.ones_like(p).astype(bool)
# nz_der = np.ones_like(failures)
failures = p != -1234.4321 # bool array creation for numba
nz_der = failures.copy()
if fprime is not None:
# print('using newton raphson method')
# Newton-Raphson method
for iteration in range(maxiter):
# first evaluate fval
fval = func(p, *args)
# If all fval are 0, all roots have been found, then terminate
if not fval.any():
failures = fval.astype(bool)
break
fder = fprime(p, *args)
nz_der = fder != 0
# stop iterating if all derivatives are zero
if not nz_der.any():
break
# Newton step
dp = fval[nz_der] / fder[nz_der]
# only update nonzero derivatives
p[nz_der] -= dp
failures[nz_der] = np.abs(dp) >= tol # items not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
else:
# print('using secant method')
# Secant method
dx = np.finfo(np.float64).eps ** 0.33
p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
q0 = func(p, *args)
q1 = func(p1, *args)
# active = np.ones_like(p, dtype=bool)
active = failures.copy()
for iteration in range(maxiter):
nz_der = q1 != q0
# stop iterating if all derivatives are zero
if not nz_der.any():
p = (p1 + p) / 2.0
break
# Secant Step
dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
# only update nonzero derivatives
p[nz_der] = p1[nz_der] - dp
active_zero_der = ~nz_der & active
p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
active &= nz_der # don't assign zero derivatives again
failures[nz_der] = np.abs(dp) >= tol # not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
p1, p = p, p1
q0 = q1
q1 = func(p1, *args)
return p
@nb.njit
def root_array_newton_fast(func, x0, fprime, args, tol, maxiter):
"""
A vectorized version of Newton, Halley, and secant methods for arrays.
Do not use this method directly. This method is called from `newton`
when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
**ONLY USE THIS WHEN ACCURACY IS NOT IMPORTANT!!**
"""
# Explicitly copy `x0` as `p` will be modified inplace, but the
# user's array should not be altered.
p = x0.copy()
# failures = np.ones_like(p).astype(bool)
# nz_der = np.ones_like(failures)
nz_der = p != -1234.4321 # bool array creation for numba
if fprime is not None:
# print('using newton raphson method')
# Newton-Raphson method
for iteration in range(maxiter):
# first evaluate fval
fval = func(p, *args)
# If all fval are 0, all roots have been found, then terminate
if not fval.any():
failure = False
break
fder = fprime(p, *args)
nz_der = fder != 0
# stop iterating if all derivatives are zero
if not nz_der.any():
break
# Newton step
dp = fval[nz_der] / fder[nz_der]
# only update nonzero derivatives
p[nz_der] -= dp
failure = ((dp - tol) ** 2).mean() > tol # items not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failure:
break
else:
# print('using secant method')
# Secant method
dx = np.finfo(np.float64).eps ** 0.33
p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
q0 = func(p, *args)
q1 = func(p1, *args)
# active = np.ones_like(p, dtype=bool)
active = nz_der.copy()
for iteration in range(maxiter):
nz_der = q1 != q0
# stop iterating if all derivatives are zero
if not nz_der.any():
p = (p1 + p) / 2.0
break
# Secant Step
dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
# only update nonzero derivatives
p[nz_der] = p1[nz_der] - dp
active_zero_der = ~nz_der & active
p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
active &= nz_der # don't assign zero derivatives again
# failures[nz_der] = np.abs(dp) >= tol # not yet converged
failure = ((dp - tol) ** 2).mean() > tol
# stop iterating if there aren't any failures, not incl zero der
if not failure:
break
p1, p = p, p1
q0 = q1
q1 = func(p1, *args)
return p, iteration, q1
@nb.njit(cache=True)
def root_secant(f, x0, h, yprev, input_args, tol=1e-3, max_iter=100):
"""
Solve for root using the secant method.
This is a pure basic secant method without approximation of the Jacobian.
Parameters
----------
f : TYPE
DESCRIPTION.
x0 : TYPE
DESCRIPTION.
h : TYPE
DESCRIPTION.
yprev : TYPE
DESCRIPTION.
input_args : TYPE
DESCRIPTION.
tol : TYPE, optional
DESCRIPTION. The default is 1e-3.
max_iter : TYPE, optional
DESCRIPTION. The default is 100.
Returns
-------
TYPE
DESCRIPTION.
"""
# set bracket to +-10% of starting point
p0 = x0 * (1 + 1e-1)
p1 = x0 * (1 - 1e-1)
# store y values instead of recomputing them
fp0 = f(p0, yprev, h, input_args)
fp1 = f(p1, yprev, h, input_args)
false_mask = fp0 * fp1 >= 0
p0[false_mask], p1[false_mask] = p1[false_mask], p0[false_mask]
# gt_eps = np.ones_like(false_mask, dtype=np.bool)
eps = 1e-8 # np.finfo(np.float64).eps * 1e4
# succesful vars:
# tol_ok = np.abs(fp1) <= tol
# iterate up to maximum number of times
for _ in range(max_iter):
# see whether the answer has converged (MSE)
if ((fp1 - tol) ** 2).mean() < tol:
return p1
# check if epsilon is reached or no diff
gt_eps = (np.abs(fp1) > eps) | (np.abs(fp0) > eps) | (fp0 != fp1)
# do calculation
p2 = (p0 * fp1 - p1 * fp0) / (fp1 - fp0)
# shift variables (prepare for next loop) and except lower eps values
p0[gt_eps], p1[gt_eps] = p1[gt_eps], p2[gt_eps]
# shift for next step
fp0, fp1 = fp1, f(p1, yprev, h, input_args)
return p1 # return if not converged
```
#### File: multisim/_utility/plotting.py
```python
import copy as _copy
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable as _mal
import numpy as _np
import pandas as _pd
import scipy.stats as _sst
from . import stat_error_measures as _sem
def heatmap_from_df(
df,
ax=None,
figsize=(16 / 2.54, 10 / 2.54),
cbar=True,
cbar_ax=None,
cmap='plasma',
ylabel=None,
cbar_label=None,
label_style='SI',
vmin=None,
vmax=None,
linewidth=0,
limit_to_valid_data=True,
log_cbar=False,
plt_kwds={},
cbar_kwds={},
extend_over=None,
extend_under=None,
):
"""
Plot heatmap from dataframe.
Parameters
----------
df : pandas DataFrame
DESCRIPTION.
ax : matplotlib.axes, optional
Axes to plot on. If not provided, a new figure will be created. The
default is None.
figsize : tuple, optional
Figure size in inches. The default is (16 / 2.54, 10 / 2.54).
cbar : bool, optional
Plot colorbar? The default is True.
cbar_ax : matplotlib.axes, optional
Axes to plot colorbar on. The default is None.
cmap : str, optional
Colormap to use. The default is 'plasma'.
ylabel : None, str, optional
String to use as ylabel. The default is None.
cbar_label : None, str, optional
String to use as colorbar label. The default is None.
label_style : str, optional
Label formatting style to use with `si_axlabel`. The default is 'SI'.
vmin : None, int, float, optional
vmin to pass to matplotlib.pcolormesh. The default is None.
vmax : None, int ,float, optional
vmax to pass to matplotlib.pcolormesh. The default is None.
linewidth : int, float, optional
linewidth (lines between filled areas) to pass to
matplotlib.pcolormesh. The default is 0.
limit_to_valid_data : bool, optional
Find first valid indices for x and y axis. Cuts out np.nan areas.
The default is True.
log_cbar : bool, optional
Logarithmic scaling for colorbar. The default is False.
plt_kwds : dict, optional
Additional arguments to pass on to matplotlib.pcolormesh. The default
is {}.
cbar_kwds : dict, optional
Additional arguments to pass on to the colorbar. The default is {}.
extend_over : None, str, tuple, optional
Set color for out-of-bound values larger than vmax. Will be applied to
`cmap.set_over()`. The default is None.
extend_under : None, str, tuple, optional
Set color for out-of-bound values lower than vmin. Will be applied to
`cmap.set_under()`. The default is None.
Returns
-------
fig : matplotlib.figure
Figure containing the plot.
ax : matplotlib.axes
Axes containing the plot.
"""
assert isinstance(
df, _pd.DataFrame
), '`df` must be a pandas DataFrame.' # assert that df is a pd dataframe
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.gca()
else:
fig = ax.get_figure()
# cut to non-nan data range
if limit_to_valid_data:
# transposed plotting, thus transposed index finding
fvxi = df.T.first_valid_index() # first valid x entry
fvyi = df.first_valid_index() # first valid y entry
lvxi = df.T.last_valid_index() # last valid x entry
lvyi = df.last_valid_index() # last valid y entry
df = df.copy().loc[fvyi:lvyi, fvxi:lvxi]
# set ax frequency if period index data is given
if isinstance(df.index, _pd.PeriodIndex):
ax.xaxis.freq = df.index.freq.rule_code
if isinstance(df.columns, _pd.PeriodIndex):
ax.yaxis.freq = df.columns.freq.rule_code
X, Y = _np.meshgrid(range(df.shape[0]), df.columns)
X, Y = _np.meshgrid(df.index, df.columns)
# make log colorbar
if log_cbar:
plt_kwds['norm'] = mpl.colors.LogNorm()
# extend colorbar
# get cbar and copy it only for extension to avoid altering other
# figure's cbars
if extend_over is not None or extend_under is not None:
import copy
cmap = copy.copy(plt.cm.get_cmap(cmap))
if extend_over is not None:
if extend_over == 'cmap':
extend_over = cmap.colors[-1]
cmap.set_over(extend_over)
cbar_kwds['extend'] = 'max' if extend_under is None else 'both'
if extend_under is not None:
if extend_under == 'cmap':
extend_under = cmap.colors[0]
cmap.set_under(extend_under)
cbar_kwds['extend'] = 'min' if extend_over is None else 'both'
cax = ax.pcolormesh(
X,
Y,
df.T,
vmax=vmax,
vmin=vmin,
cmap=cmap,
linewidth=linewidth,
antialiased=False,
**plt_kwds
)
if ylabel is not None:
assert isinstance(ylabel, (tuple, list)) and len(ylabel) == 2
si_axlabel(
ax=ax,
label=ylabel[0],
unit=ylabel[1],
which='y',
style=label_style,
)
if cbar:
if cbar_ax is None:
cbar = fig.colorbar(cax, ax=ax, **cbar_kwds)
else:
cbar = fig.colorbar(mappable=cax, cax=cbar_ax, **cbar_kwds)
if cbar_label is not None:
assert (
isinstance(cbar_label, (tuple, list)) and len(cbar_label) == 2
)
si_axlabel(
ax=cbar,
label=cbar_label[0],
unit=cbar_label[1],
which='cbar',
style=label_style,
)
ax.autoscale(tight=True)
return fig, ax
def prediction_realization_scatter(
y,
y_hat,
ax=None,
aspect='equal',
ax_scaling_tight=False,
errors=('R2', 'MSE', 'CV(RMSE)', 'NMBE'),
scttr_kwds=dict(c='C0', s=6, fc='none', ec='C0', alpha=1.0),
diag_kwds=dict(color='k', ls='-', lw=1),
plt_err_range='RMSE',
err_rng_kwds=dict(color='k', ls='--', lw=1),
err_kwds=dict(
bbox=dict(boxstyle="round", fc="w", ec="k", alpha=0.5, pad=0.2)
),
err_loc='bottom right',
legend_kwds=dict(),
auto_label=True,
language='eng',
plot_every=1,
fig_kwds=dict(figsize=(8 / 2.54, 8 / 2.54)),
err_vals=None,
):
"""
Plot prediction-realization (PR) scatter plot.
Parameters
----------
y : np.array, pd.Series, pd.DataFrame
Realization/measurement/oberserved data.
y_hat : np.array, pd.Series, pd.DataFrame
Predicted/forecast/simulated data.
ax : matplotlib.axes, optional
Axes to plot on. If not provided, a new figure will be created. The
default is None.
aspect : str, int, float, optional
Aspect ratio to use for axes. The default is 'eqaul'.
ax_scaling_tight : bool, optional
Use tight scaling for the axes. The default is False.
errors : tuple, optional
Statistical error measures to print in the plot. The default is
('R2', 'MSE', 'CV(RMSE)', 'NMBE').
scttr_kwds : dict, optional
Additional arguments to pass on to matplotlib.scatter. The default
is dict(c='C0', s=6, fc='none', ec='C0', alpha=1.0).
diag_kwds : dict, optional
Additional arguments to pass on to plotting the halfing diagonal. The
default is dict(color='k', ls='-', lw=1).
plt_err_range : str, optional
Plot error range around the diagonal. The default is 'RMSE'.
err_rng_kwds : dict, optional
Additional arguments to pass on to plotting the error range. The
default is dict(color='k', ls='--', lw=1).
err_kwds : dict, optional
Additional arguments to define the box-style around the error measures.
The default is dict(
bbox=dict(boxstyle="round", fc="w", ec="k", alpha=0.5, pad=0.2)).
err_loc : str, optional
Where to place the box with the error measures. The default is
'bottom right'.
err_rng_kwds : dict, optional
Additional arguments to pass on to legend creation. The default is
dict().
auto_label : bool, optional
Label x- and y-axis automatically using SI-style.
language : str, optional
Language to use for labeling. The default is 'eng'.
plot_every : int, optional
Plot every n points to reduce plot size. The default is 1.
fig_kwds : dict, optional
Additional arguments to pass on to figure creation. The
default is dict(figsize=(8 / 2.54, 8 / 2.54)).
err_vals : None, tuple, optional
Error values to use for annotation. The default is None.
Returns
-------
fig : matplotlib.figure
Figure containing the plot.
ax : matplotlib.axes
Axes containing the plot.
"""
if ax is None:
fig, ax = plt.subplots(1, 1, **fig_kwds)
else:
fig = ax.get_figure()
pr_min, pr_max = (
_np.min([y.min(), y_hat.min()]),
_np.max([y.max(), y_hat.max()]),
)
minmax_range = pr_max - pr_min
assert language in ('eng', 'de')
if language == 'eng':
datap = 'Data points'
bsline = 'Bisecting line'
xlabel = 'Measurement'
ylabel = 'Prediction'
elif language == 'de':
datap = 'Datenpunkte'
bsline = '$y = x$'
xlabel = 'Messwerte'
ylabel = 'Vorhersage'
ax.scatter(y[::plot_every], y_hat[::plot_every], label=datap, **scttr_kwds)
ax.plot(
[pr_min - 0.05 * minmax_range, pr_max + 0.05 * minmax_range],
[pr_min - 0.05 * minmax_range, pr_max + 0.05 * minmax_range],
label=bsline,
**diag_kwds
)
annotate_errors(
ax=ax,
y=y,
y_hat=y_hat,
errors=errors,
err_loc=err_loc,
err_vals=err_vals,
err_kwds=err_kwds,
)
ax.set_aspect(aspect)
if ax_scaling_tight:
ax.autoscale(tight=True)
else:
lims = (pr_min - 0.05 * minmax_range, pr_max + 0.05 * minmax_range)
ax.set_xlim(lims)
ax.set_ylim(lims)
ax.set_ylim(ax.get_xlim())
if auto_label:
si_axlabel(ax, label=ylabel)
si_axlabel(ax, label=xlabel, which='x')
ax.grid(True)
ax.legend(**legend_kwds)
fig.tight_layout(pad=0)
return fig, ax
def prediction_realization_2d_kde(
y,
y_hat,
steps=100,
ax=None,
contour=True,
cmap='Blues',
norm=True,
vmin=None,
vmax=None,
cont_lines=True,
cbar=True,
line_color='k',
fontsize=8,
aspect='equal',
extend='both',
cm_under='w',
cm_over='k',
errors=('R2', 'CV(RMSE)', 'NMBE'),
plt_kwds={},
**err_kwds
):
"""
Plot 2-dimensional prediction-realization (PR) KDE plot.
Uses gaussian kernel density estimate
Parameters
----------
y : np.array, pd.Series, pd.DataFrame
Realization/measurement/oberserved data.
y_hat : np.array, pd.Series, pd.DataFrame
Predicted/forecast/simulated data.
steps : int, optional
Steps to use for calculation the gaussian kernel density estimate. The
default is 100.
ax : matplotlib.axes, optional
Axes to plot on. If not provided, a new figure will be created. The
default is None.
contour : bool, optional
Plot as a contour plot. The default is True.
cmap : str, optional
Colormap to use. The default is 'Blues'.
norm : bool, optional
Normalize linearly to vmin-vmax range **OR** set to 'log' for
logarithmic normalization between vmin and vmax. The default is True.
vmin : None, int, float, optional
Minimum value to display. The default is None.
vmax : None, int ,float, optional
Maximum value to display. The default is None.
cont_lines : bool, optional
Display contour lines if plotting as a contour plot. The default
is True.
cbar : bool, optional
Plot colorbar. The default is True.
line_color : str, optional
Color for halfing diagonal. The default is 'k'.
fontsize : int, optional
Font size for error measures. The default is 8.
aspect : str, int, float, optional
Aspect ratio to use for axes. The default is 'eqaul'.
extend : str, optional
Extend values or clip under vmin, over vmax or both. The default
is 'both'.
cm_under : str, optional
Color to use for values clipped under vmin. The default is 'w'.
cm_over : str, optional
Color to use for values clipped over vmax. The default is 'k'.
errors : tuple, optional
Statistical error measures to put into axis. The default
is ('R2', 'CV(RMSE)', 'NMBE').
plt_kwds : dict, optional
Additional arguments to pass on to the plotting methods (either
matplotlib.imshow, matplotlib.contour or matplotlib.contourf). The
default is dict().
**err_kwds : keyword arguments, optional
Additional keyword arguments to pass to error calculation
Returns
-------
{'fig': fig, 'ax': ax, 'mappable': im, 'cbar': cbar} : dict
Dictionary of created plotting objects.
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
assert isinstance(steps, (int, complex))
# calculate gaussian kernel
steps = 100 * 1j if isinstance(steps, int) else steps
m1, m2 = y.copy(), y_hat.copy()
xmin, xmax, ymin, ymax = m1.min(), m1.max(), m2.min(), m2.max()
X, Y = _np.mgrid[xmin:xmax:steps, ymin:ymax:steps]
positions = _np.vstack([X.ravel(), Y.ravel()])
values = _np.vstack([m1, m2])
kernel = _sst.gaussian_kde(values)
Z = _np.reshape(kernel(positions).T, X.shape)
# copy cmap and extend if requested
_cmap = _copy.copy(mpl.cm.get_cmap(cmap))
if extend in ('both', 'max'):
_cmap.set_over(cm_over)
if extend in ('both', 'min'):
_cmap.set_under(cm_under)
if norm and norm != 'log':
vmin_, vmax_ = Z.min(), Z.max()
# overwrite norm values if given explicitly
vmin = vmin if vmin is not None else vmin_
vmax = vmax if vmax is not None else vmax_
cont_kwds = {}
elif norm == 'log':
if _np.any(Z < 0.0):
raise ValueError('Values below 0 not supported for log scale')
# add min val to allow log sampling, since 0 is also not supported
Z += 1e-9
vmin_, vmax_ = Z.min(), Z.max()
vmin = vmin if vmin is not None else vmin_
vmax = vmax if vmax is not None else vmax_
cont_kwds = {
'norm': mpl.colors.LogNorm(vmin=vmin, vmax=vmax),
'locator': mpl.ticker.LogLocator(),
}
if not contour: # just plot the kde as an image/pcolormesh
imkwds = (
{ # imshow does not support locator, thus only norm
'norm': mpl.colors.LogNorm(vmin, vmax)
}
if norm == 'log'
else {}
)
im = ax.imshow(
_np.rot90(Z),
cmap=_cmap,
**plt_kwds,
extent=[xmin, xmax, ymin, ymax],
vmin=vmin,
vmax=vmax,
**imkwds
)
else: # plot it as a filled contour
try: # This has many problems with log, thus also try tricont
im = ax.contourf(
X,
Y,
Z,
cmap=_cmap,
extend=extend,
vmin=vmin,
vmax=vmax,
**cont_kwds
)
except ValueError:
im = ax.tricontourf(
X.reshape(-1),
Y.reshape(-1),
Z.reshape(-1),
cmap=_cmap,
extend=extend,
vmin=vmin,
vmax=vmax,
**cont_kwds
)
if cont_lines: # add contour lines
clines = ax.contour(
X,
Y,
Z,
**plt_kwds,
extend=extend,
vmin=vmin,
vmax=vmax,
colors=line_color,
**cont_kwds
)
ax.clabel(clines, inline=1, fontsize=fontsize)
# Plot colorbar
if cbar:
cax_div = _mal(ax).append_axes('right', size='5%', pad=0.05)
cbar = plt.colorbar(mappable=im, cax=cax_div)
# cbar.update_bruteforce(im)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.set_aspect(aspect)
ax.grid(True)
annotate_errors(ax=ax, y=y_hat, y_hat=y, errors=errors, err_kwds=err_kwds)
return {'fig': fig, 'ax': ax, 'mappable': im, 'cbar': cbar}
def si_axlabel(
ax,
label,
unit=None,
which='y',
style='SI',
spacing=r'\;',
invert_position=False,
):
r"""
Generate axlabels which conform to several naming conventions.
Generates a SI- and DIN-conform label for the axis, for example
`label='Temperatur'` and unit='°C' will produce the string
**`r'$Temperature\;/\;\mathrm{°C}$'`**.
Supported styles
----------------
**`style='SI'`** (default)
**`r'$Temperature\\;/\\;\\mathrm{°C}$'`**
**`style='in'`**
**`r'$Temperature\\;in\\;\\mathrm{°C}$'`**,
**`style='parentheses'`**
**`r'$Temperature,\\;(\\mathrm{°C})$'`** (not encouraged),
**`style='IEEE'`
**`r'$Temperature\\;(\\mathrm{°C})$'`**, recommended for IEEE articles.
If multiple quantities shall be labeled, f.i.
``Temperatur / °C, Leistung / kW``, `label` and `unit` must be tuples or
lists of the same length, containing the required labels/units.
The spacing between label, unit and, if any, other signs, is set with
`spacing='...'`, defaulting to `'\;'`.
Parameters
----------
ax : ax reference
Ax on which the labels has to be placed.
label : str, tuple, list
Ax label. Can contain LaTeX equations, but the LaTeX equation
identifiers `$equation...$` must be omitted. F.i.
`label='\dot{Q}_{TWE}'`.
unit : str, tuple, list, optional
SI unit of the quantitiy to label, f.i. `unit='kWh'`. If not given, the
divisor sign will also be dropped.
which : str
Which axis to decorate. Can be `'y'`, `'x'`, `'z'` or `'cbar'`. For
`'cbar'`, the cbar reference has to be passed with the `ax` parameter.
style : str
Formatting style to apply to the label. Defaults to 'SI' (recommended).
Other allowed formats are 'in', f.i. Power in kW, or 'parentheses',
Power in (kW), (not recommended).
For IEEE publications the style 'IEEE' is recommended, producing
`'Power (kW)'` (like 'parentheses' but without the word `'in'`).
spacing : str
Spacing to apply to the label. Refer to LaTeX equation spacing
manual for a list of options. Default is '\;', which is a full space.
"""
assert style in ('SI', 'in', 'parentheses', 'IEEE')
if isinstance(label, str):
label = (label,)
assert isinstance(unit, str) or unit in ('', 'none', None)
unit = (unit,)
elif isinstance(label, (tuple, list)):
assert (unit in ('', 'none', None)) or isinstance(unit, (tuple, list))
if isinstance(unit, (tuple, list)):
assert len(unit) == len(label)
else:
unit = (unit,) * len(label)
full_axlabel = ''
for lbl, unt in zip(label, unit):
assert '$' not in lbl and unt is None or '$' not in unt, (
'Do not pass a latex equation sign ($) to the labeling function. '
'It will be added automatically. If equation output does not '
'work, instead pass a raw string with `r\'label/unit\'`.'
)
unit_ = None if unt in ('', 'none') else unt
# replace spaces in label string with latex space string
label_ = lbl.replace(' ', spacing)
# replace raw percentage signs with \%
if unit_ is not None:
unit_ = unit_.replace(r'\%', '%').replace('%', r'\%')
# construct string
if style == 'SI':
if unit_ is not None:
axlabel = r'${0}{3}{2}{3}\mathrm{{{1}}}$'.format(
label_, unit_, '/', spacing
)
else:
axlabel = r'${0}$'.format(label_)
elif style == 'in':
if unit_ is not None:
axlabel = r'${0}{2}in{2}\mathrm{{{1}}}$'.format(
label_, unit_, spacing
)
else:
axlabel = r'${0}$'.format(label_)
elif style == 'parentheses':
if unit_ is not None:
axlabel = r'${0}{2}in{2}(\mathrm{{{1}}})$'.format(
label_, unit_, spacing
)
else:
axlabel = r'${0}$'.format(label_)
elif style == 'IEEE':
if unit_ is not None:
axlabel = r'${0}{2}(\mathrm{{{1}}})$'.format(
label_, unit_, spacing
)
else:
axlabel = r'${0}$'.format(label_)
full_axlabel += axlabel
full_axlabel = full_axlabel.replace('$$', ',' + spacing)
# set to axis label
if which == 'y':
ax.set_ylabel(full_axlabel)
if invert_position:
ax.yaxis.set_label_position('right')
ax.tick_params(
left=False, right=True, labelleft=False, labelright=True
)
elif which == 'x':
ax.set_xlabel(full_axlabel)
if invert_position:
ax.xaxis.set_label_position('top')
ax.tick_params(
bottom=False, top=True, labelbottom=False, labeltop=True
)
elif which == 'z':
ax.set_zlabel(full_axlabel)
elif which == 'cbar':
ax.set_label(full_axlabel)
def sinum_frmt(x):
"""
Generate SIunitx style formatted number output for plotting tables to TeX.
This function can be used as a formatter for printing DataFrames to Latex
tabes when using the Latex package SIunitx.
Parameters
----------
x : int, float
Value to format.
Returns
-------
str
Formatted value.
"""
if isinstance(x, (int, float)):
if x < 1e3:
return r'\num{' + '{0:.3G}'.format(x) + '}'
elif x >= 1e3:
return r'\num{' + '{0:.0f}'.format(x) + '}'
else: # x is nan
return '-'
else:
return x
def annotate_errors(
ax,
y,
y_hat,
errors,
err_loc='bottom right',
err_vals=None,
fontsize=8,
err_kwds=dict(
bbox=dict(boxstyle="round", fc="w", ec="k", alpha=0.5, pad=0.2)
),
):
"""
Annotate statistical error measures in a plot.
Parameters
----------
ax : matplotlib.axes
Axes to add error measures to.
y : np.array, pd.Series, pd.DataFrame
Realization/measurement/oberserved data.
y_hat : np.array, pd.Series, pd.DataFrame
Predicted/forecast/simulated data.
errors : tuple
Errors to calculate.
err_loc : str, optional
Location where to print errors in the axes. The default
is 'bottom right'.
err_vals : None, dict, optional
Instead of calculating errors, use these values. The default is None.
fontsize : int, optional
Fontsize to use for printing errors. The default is 8.
err_kwds : dict, optional
Box style to use for printing errors. The default is
dict(bbox=dict(boxstyle="round", fc="w", ec="k", alpha=0.5, pad=0.2)).
Returns
-------
None.
"""
annot_str = ''
if 'R2' in errors:
if err_vals is None or 'R2' not in err_vals:
r2 = _sem.r_squared(y, y_hat)
else:
r2 = err_vals['R2']
annot_str += r'$R^2={0:.3f}$'.format(r2) + '\n'
if 'MSE' in errors:
if err_vals is None or 'MSE' not in err_vals:
mse = _sem.mean_squared_err(y, y_hat)
else:
mse = err_vals['MSE']
annot_str += r'$MSE={0:.3G}$'.format(mse) + '\n'
if 'CV(RMSE)' in errors:
if err_vals is None or 'CV(RMSE)' not in err_vals:
cvrmse = _sem.cv_rmse(y, y_hat)
else:
cvrmse = err_vals['CV(RMSE)']
annot_str += r'$CV(RMSE)={0:.3f}$'.format(cvrmse) + '\n'
if 'NMBE' in errors:
if err_vals is None or 'NMBE' not in err_vals:
nmbe = _sem.normalized_err(y, y_hat, err_method='MSD', norm='mean')
else:
nmbe = err_vals['NMBE']
annot_str += r'$NMBE={0:.3f}$'.format(nmbe)
if annot_str[-1:] == '\n':
annot_str = annot_str[:-1]
annotate_axes(
ax, annotations=[annot_str], loc=err_loc, fontsize=fontsize, **err_kwds
)
def annotate_axes(
axes,
fig=None,
annotations=None,
loc='top left', # xy=[(.05, .95)],
bbox=dict(boxstyle="round", fc="w", ec="k", alpha=0.8, pad=0.3),
txt_offset=8,
xy_offset=(0, 0),
fontsize=9,
**kwds
):
"""
Create axes annotations like (a), (b), (c)...
Parameters
----------
axes : tuple, matplotlib.Axes
Axes to print annotations to. If tuple, label each consequently.
fig : matplotlib.Figure, optional
Figure of which axis should be labeled. The default is None.
annotations : list, tuple, string, optional
Annotations to use. If None, the lower case alphabet will be used.
The default is None.
loc : string, optional
Location in each axes to print annotations to. The default
is 'top left'.
bbox : dict, optional
Boxstyle to use for annotations. The default is
dict(boxstyle="round", fc="w", ec="k", alpha=.8, pad=0.35).
txt_offset : int, float, optional
Text offset from axes border in points. The default is 8.
xy_offset : tuple, optional
Additional xy-offset from axes border in points. The default is (0, 0).
fontsize : int, optional
Fontsize for annotating the axes. The default is 9.
**kwds : keyword arguments
Additional alignment arguments to pass on the matplotlib.annotate.
Returns
-------
None.
"""
if isinstance(fig, mpl.figure.Figure):
assert axes is None, (
'If a figure is given with `fig`, axes need to be set to None '
'with `axes=None`.'
)
axes = fig.get_axes()
# if single axes given, store in list for convenience
if not isinstance(axes, (list, tuple)):
axes = [axes]
# check if annotations are given and if not, use the alphabet:
if annotations is None:
annt = ['{0})'.format(ch) for ch in 'abcdefghijklmnopqrstuvwxyz']
else:
annt = annotations
# check if annotations are given and if not, calculate positions:
# if len(xy) == 1:
# xy_perc = xy[0] # backup percentag values
# xy = []
# for ax in axes:
# xlim, ylim = ax.get_xlim(), ax.get_ylim()
# xy.append(
# ((xlim[1] - xlim[0]) * xy_perc[0] + xlim[0],
# (ylim[1] - ylim[0]) * xy_perc[1] + ylim[0]))
# catch other args:
xycoords = kwds['xycoords'] if 'xycoords' in kwds else 'axes fraction'
ha = kwds['ha'] if 'ha' in kwds else 'center'
va = kwds['va'] if 'va' in kwds else 'center'
zorder = kwds['zorder'] if 'zorder' in kwds else 500
assert loc in (
'top left',
'top right',
'bottom left',
'bottom right',
'top center',
'lower center',
'center left',
'center right',
)
if loc == 'top left':
xy = (0, 1)
xytext = (txt_offset + xy_offset[0], -txt_offset + xy_offset[1])
ha, va = 'left', 'top'
elif loc == 'top right':
xy = (1, 1)
xytext = (-txt_offset + xy_offset[0], -txt_offset + xy_offset[1])
ha, va = 'right', 'top'
elif loc == 'bottom left':
xy = (0, 0)
xytext = (txt_offset + xy_offset[0], txt_offset + xy_offset[1])
ha, va = 'left', 'bottom'
elif loc == 'bottom right':
xy = (1, 0)
xytext = (-txt_offset + xy_offset[0], txt_offset + xy_offset[1])
ha, va = 'right', 'bottom'
elif loc == 'top center':
xy = (0.5, 1)
xytext = (0 + xy_offset[0], -txt_offset + xy_offset[1])
ha, va = 'center', 'top'
elif loc == 'lower center':
xy = (0.5, 0)
xytext = (0 + xy_offset[0], txt_offset + xy_offset[1])
ha, va = 'center', 'bottom'
elif loc == 'center left':
xy = (0, 0.5)
xytext = (txt_offset + xy_offset[0], 0 + xy_offset[1])
ha, va = 'left', 'center'
elif loc == 'center right':
xy = (1, 0.5)
xytext = (-txt_offset + xy_offset[0], 0 + xy_offset[1])
ha, va = 'right', 'center'
# overwrite align if given:
ha = kwds['ha'] if 'ha' in kwds else ha
va = kwds['va'] if 'va' in kwds else va
# iterate over axes:
for i, ax in enumerate(axes):
ax.annotate(
text=annt[i],
xy=xy,
xycoords=xycoords,
xytext=xytext,
textcoords='offset points',
ha=ha,
va=va,
zorder=zorder,
bbox=bbox,
fontsize=fontsize,
)
```
#### File: JoElfner/multisim/setup.py
```python
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'MultiSim'
DESCRIPTION = 'Thermal simulation tool for heating appliances.'
URL = 'https://github.com/JoElfner/multisim'
download_url = 'https://github.com/JoElfner/multisim/archive/v{0}.tar.gz'
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
REQUIRES_PYTHON = '>=3.7.9'
VERSION = '0.11.0'
download_url = download_url.format(VERSION)
# What packages are required for this module to be executed?
REQUIRED = [
'matplotlib (>=3.3.2)',
'numba (>=0.51.2)',
'numpy (>=1.19.2)',
'pandas (>=1.0.5)',
'scipy (>=1.5)',
'scikit-learn (>=0.23.1)',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
def write_version_py(version, filename='multisim/version.py'):
"""Write version to file for module wide access."""
cnt = """# THIS FILE IS GENERATED FROM MultiSim SETUP.PY
version = '%(version)s'
"""
with open(filename, 'w') as f:
f.write(cnt % {'version': version})
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for it!
base_dir = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(base_dir, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# same with changelog:
try:
with open(os.path.join(base_dir, "CHANGELOG.rst")) as f:
# Remove :issue:`ddd` tags that breaks the description rendering
changelog = f.read()
except FileNotFoundError:
changelog = 'changelog not found'
long_description = "\n\n".join([long_description, changelog])
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(base_dir, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
write_version_py(about['__version__'])
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(base_dir, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system(
'{0} setup.py sdist bdist_wheel --python-tag py{1}'.format(
sys.executable, REQUIRES_PYTHON.replace('.', '')[-3:-1]
)
)
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload --config-file .pypirc dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
download_url=download_url,
packages=find_packages(
exclude=["tests", "*.tests", "*.tests.*", "tests.*"]
),
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
# $ setup.py publish support.
cmdclass={'upload': UploadCommand},
)
```
#### File: multisim/tests/not_included_test_sim_add_part.py
```python
import os
import pytest
import sys
# from .context import multisim as ms
import multisim as ms
# disable writing bytecode to avoid creating pycache
sys.dont_write_bytecode = True
# %% Init sim model as isolated fixture
# make the tested state a fixture for further testing
@pytest.fixture(
scope='module',
autouse=True,
params=[
dict(
set_tf=dict(timeframe=500, adaptive_steps=True),
set_ds=dict(
save=True,
save_every_n_steps=50,
start_date='2020-01-01',
resample_final=True,
resample_freq='1s',
),
set_slvr=dict(solver='heun', allow_implicit=True),
),
dict(
set_tf=dict(timeframe=500, adaptive_steps=True),
set_ds=dict(
save=True,
save_every_n_steps=50,
start_date='2020-01-01',
resample_final=False,
resample_freq='1s',
),
set_slvr=dict(solver='heun', allow_implicit=True),
),
dict(
set_tf=dict(timeframe=500, adaptive_steps=False, timestep=1.0),
set_ds=dict(
save=True,
save_every_n_steps=50,
start_date='2020-01-01',
resample_final=False,
resample_freq='1s',
),
set_slvr=dict(solver='heun', allow_implicit=True),
),
dict(
set_tf=dict(timeframe=500, adaptive_steps=False, timestep=1.0),
set_ds=dict(
save=False,
save_every_n_steps=50,
start_date='2020-01-01',
resample_final=True,
resample_freq='1s',
),
set_slvr=dict(solver='heun', allow_implicit=True),
),
dict(
set_tf=dict(timeframe=500, adaptive_steps=True),
set_ds=dict(
save=True,
save_every_n_steps=50,
start_date='2020-01-01',
resample_final=True,
resample_freq='1s',
),
set_slvr=dict(solver='heun', allow_implicit=False),
),
dict(
set_tf=dict(timeframe=500, adaptive_steps=True),
set_ds=dict(
save=True,
save_every_n_steps=50,
start_date='2020-01-01',
resample_final=False,
resample_freq='1s',
),
set_slvr=dict(solver='heun', allow_implicit=False),
),
dict(
set_tf=dict(timeframe=500, adaptive_steps=False, timestep=1.0),
set_ds=dict(
save=True,
save_every_n_steps=50,
start_date='2020-01-01',
resample_final=False,
resample_freq='1s',
),
set_slvr=dict(solver='heun', allow_implicit=False),
),
],
)
def make_sim(request):
# init sim
sim = ms.Models(suppress_printing=False)
# set timeframe:
sim.set_timeframe(
**request.param['set_tf'],
min_stepsize=1e-3,
max_stepsize=10.0,
rtol=1e-3,
atol=1e-3,
max_factor=10,
min_factor=1e-2
)
# set disksaving:
sim.set_disksaving(
**request.param['set_ds'],
path=r'./test_output_files//',
create_new_folder=True,
overwrite=True,
complevel=5,
complib='zlib'
)
# set solver:
sim.set_solver(**request.param['set_slvr'])
# and isolate it to allow reverting for each new test
@pytest.fixture
def isolation(fn_isolation, autouse=True):
pass
# %% Test adding parts
# test adding TES
# =============================================================================
# @pytest.mark.parametrize(
# ('volume,gridpoints,tes_do,tes_new_ports,tes_init,store_results'), [
# (.5, 10, 2., newpor, tinit, stres8, ),
# (2.1, 100, 1., newpor, tinit, stres8, ),
# (.5, 10, 2., newpor, tinit, stres8, ),
# (2.1, 100, 1., newpor, tinit, stres8, ),
# (.5, 10, 2., newpor, tinit, stres8, ),
# (2.1, 100, 1., newpor, tinit, stres8, ),])
# def test_add_tes(
# make_sim, volume, gridpoints, tes_do, tes_new_ports,
# tes_init, store_results):
# make_sim.add_part(
# ms.ap.Tes, name='tes', volume=volume,
# grid_points=gridpoints, outer_diameter=tes_do,
# shell_thickness=5e-3, new_ports=tes_new_ports,
# insulation_thickness=0.2, insulation_lambda=.035,
# T_init=tes_init, T_amb=25, material='carbon_steel',
# pipe_specs={
# 'all': {'pipe_type': 'EN10255_medium', 'DN': 'DN40'}},
# store_results=store_results)
# =============================================================================
# # test disk saving init
# @pytest.mark.parametrize(
# ('save,evry_step,name,startdate,resample,freq,new_folder,overwrite,'
# 'complvl,complib'), [
# (True, 1., 'simmi', '2020-01-01', True, '1s',
# True, False, 9, 'zlib'),
# (True, 1000, None, '2020-01-01', True, '100s',
# False, True, 0, 'gzip'),
# (True, 100, 25., 1970, True, 25,
# True, False, 9, 'zlib'),
# (False, 10000, 'simmi', '2020-01-01', True, '1s',
# True, False, 9, 'zlib')])
# def test_diskaving(
# make_sim, save, evry_step, name, startdate, resample, freq,
# new_folder, overwrite, complvl, complib):
# make_sim.set_disksaving(
# save=save, save_every_n_steps=evry_step, sim_name=name,
# path=r'./test_output_files//',
# start_date=startdate, resample_final=resample, resample_freq=freq,
# create_new_folder=new_folder,
# overwrite=overwrite, complevel=complvl, complib=complib
# )
# %% run test
# pytest.main()
# %% remove all files created by test:
# remove files created by simulation
fls_tof = os.listdir('./test_output_files')
for f in fls_tof:
os.remove(os.path.join('./test_output_files', f))
``` |
{
"source": "JoelForamitti/ipysimulate",
"score": 2
} |
#### File: ipysimulate/ipysimulate/control.py
```python
import traitlets
import ipywidgets
import threading
import time
import ipysimulate
from .tools import make_list
from .parameters import Range, IntRange, Values
# See js/lib/control.js for the frontend counterpart to this file.
semver_range = "~" + ipysimulate.__version__ # Retrieve version
# Prepare parameter classes
range_types = (Range, )
intrange_types = (IntRange, )
value_types = (Values, )
try:
import agentpy as ap
if ap.__version__ >= '0.0.8':
range_types += (ap.Range, )
intrange_types += (ap.IntRange, )
value_types += (ap.Values, )
except ImportError as e:
pass
@ipywidgets.register
class Control(ipywidgets.DOMWidget):
""" Control panel widget for an interactive simulation.
Arguments:
model:
A :ref:`simulation model <simulation_model>` with discrete steps.
parameters (dict, optional):
Dictionary of parameter names and values (default None).
Entries of type :class:`Range`, :class:`IntRange`,
and :class:`Values` will be displayed as interactive widgets.
variables (str of list of str, optional):
Model attributes to display in the control panel (default None).
"""
# Traitlet declarations ------------------------------------------------- #
_view_name = traitlets.Unicode('ControlView').tag(sync=True)
_view_module = traitlets.Unicode('ipysimulate').tag(sync=True)
_view_module_version = traitlets.Unicode(semver_range).tag(sync=True)
_model_name = traitlets.Unicode('ControlModel').tag(sync=True)
_model_module = traitlets.Unicode('ipysimulate').tag(sync=True)
_model_module_version = traitlets.Unicode(semver_range).tag(sync=True)
is_running = traitlets.Bool(False).tag(sync=True)
do_reset = traitlets.Bool(False).tag(sync=True)
_variables = traitlets.Dict().tag(sync=True)
parameters = traitlets.Dict().tag(sync=True)
data_paths = traitlets.List().tag(sync=True)
_pwidgets = traitlets.List().tag(sync=True)
t = traitlets.Integer(0).tag(sync=True)
name = traitlets.Unicode().tag(sync=True)
# Initiation - Don't start any threads here ----------------------------- #
def __init__(self, model, parameters=None, variables=None):
super().__init__() # Initiate front-end
self.on_msg(self._handle_button_msg) # Handle front-end messages
self.thread = None # Placeholder for simulation threads
self._pre_pwidgets = []
self._pdtypes = {}
self.parameters = {}
if parameters:
for k, v in parameters.items():
if isinstance(v, value_types):
self._create_select(k, v)
self.parameters[k] = v.vdef
elif isinstance(v, intrange_types):
self._create_slider(k, v, int_slider=True)
self.parameters[k] = v.vdef
elif isinstance(v, range_types):
self._create_slider(k, v)
self.parameters[k] = v.vdef
else:
self.parameters[k] = v
self._pwidgets = self._pre_pwidgets
self.model = model
self.model.set_parameters(self.parameters)
self._callbacks = []
self._var_keys = make_list(variables)
self._variables = {k: None for k in self._var_keys}
self.charts = []
# Callbacks ------------------------------------------------------------- #
def add_callback(self, func, *args, **kwargs):
self._callbacks.append((func, args, kwargs))
# Parameter widgets ----------------------------------------------------- #
def _create_slider(self, k, v, int_slider=False):
pwidget = {
'name': k,
'type': 'slider',
'vmin': v.vmin,
'vmax': v.vmax,
'vdef': v.vdef
}
if int_slider:
pwidget['step'] = max([1, int((v.vmax - v.vmin) / 100)])
self._pdtypes[k] = int
else:
pwidget['step'] = (v.vmax - v.vmin) / 100
self._pdtypes[k] = float
self._pre_pwidgets.append(pwidget)
def _create_select(self, k, v):
pwidget = {
'name': k,
'type': 'select',
'values': v.values,
'vdef': v.vdef
}
# TODO Better way to infer dtypes
self._pdtypes[k] = type(v.values[0])
self._pre_pwidgets.append(pwidget)
# Methods to be called from the front-end ------------------------------- #
def _handle_button_msg(self, _, content, buffers):
""" Handles messages from the front-end
by calling method of same name as msg. """
getattr(self, content.get('event', ''))(**content)
def update_parameter(self, k, v):
self.model.p[k] = self._pdtypes[k](v)
def setup_simulation(self, **kwargs):
""" Call model setup. """
self.thread = threading.Thread(target=self.run_setup)
self.thread.start()
def continue_simulation(self, **kwargs):
""" Start background thread that runs simulation. """
self.thread = threading.Thread(target=self.run_simulation)
self.thread.start()
def increment_simulation(self, **kwargs):
""" Do a single simulation step. """
self.thread = threading.Thread(target=self.run_step)
self.thread.start()
def reset_simulation(self, **kwargs):
""" Reset graphs and simulation. """
self.thread = threading.Thread(target=self.reset)
self.thread.start()
# Methods to be called only within threads ------------------------------ #
def sync_data(self):
""" Retrieve new data from simulation and send it to front-end. """
self._variables = {k: getattr(self.model, k) for k in self._var_keys}
for chart in self.charts:
chart.sync_data()
for callback, args, kwargs in self._callbacks:
callback(*args, **kwargs)
def reset(self):
""" Reset simulation by clearing front-end data,
calling `model.sim_reset()`, and sending initial data to front-end."""
for chart in self.charts:
chart.reset_data()
self.run_setup() # Reset backend model by calling setup again
def run_setup(self):
""" Initiate simulation by calling `model.sim_setup()`
and sending initial data to front-end. """
self.model.sim_setup()
self.t = self.model.t
self.sync_data()
def run_step(self):
""" Run a single simulation step by calling `model.sim_step()`,
and sending new data to front-end. """
self.model.sim_step()
self.t = self.model.t
self.sync_data()
def run_simulation(self):
""" Start or continue the simulation by repeatedly calling
:func:`Control.run_single_step` as long as `model.active` is True. """
self.is_running = True
if 'fps' in self.model.p:
while self.model.running:
start = time.time()
self.run_step()
wait = 1 / self.model.p.fps + start - time.time()
if wait > 0:
time.sleep(wait)
if not self.is_running:
break
else:
while self.model.running:
self.run_step()
if not self.is_running:
break
self.is_running = False
if self.do_reset:
self.reset_simulation()
self.do_reset = False
```
#### File: ipysimulate/ipysimulate/simulation.py
```python
class Simulation:
""" An example simulation for ipysimulate. """
@property
def is_running(self):
return False
def set_parameters(self):
pass
def run_setup(self):
pass
def run_step(self):
pass
def reset(self):
pass
```
#### File: ipysimulate/ipysimulate/tools.py
```python
def make_list(element, keep_none=False):
""" Turns element into a list of itself
if it is not of type list or tuple. """
if element is None and not keep_none:
element = [] # Convert none to empty list
if not isinstance(element, (list, tuple, set)):
element = [element]
elif isinstance(element, (tuple, set)):
element = list(element)
return element
``` |
{
"source": "JoelForamitti/UvsD_ABM",
"score": 2
} |
#### File: JoelForamitti/UvsD_ABM/UvsD_ABM_Parameters.py
```python
import numpy as np
# Simulation Settings
scenarios = ["No_Policy","Downtax","Downmarket","DownDR","Uptax","Upmarket","UpDR"] # Which scenarios should be compared?
fixed_seed = True # Use fixed seed for random values?
glob_seed = 13 # Seed for random parameters
print_errors = True # Display error messages?
## 000 Parameters
if fixed_seed == True: np.random.seed(glob_seed)
param_range = {
'num_vars': 15,
'names': ['$λ$', '$Δθ$','$ΔA_0$,$ΔB_0$', 'Δμ', 'Δη','θ','ϑ','$χ^M$','μ','η','γ','β','$t^*$','ϕ','$χ^S$'],
'bounds': [[0.5 , 0.9 ], #0 - λ - Abatement potential
[0.1 , 0.5 ], #1 - Δθ - Abatement Cost Heterogeneity
[0.1 , 0.5 ], #2 - ΔA_0,ΔB_0 - Heterogeneity of production factors
[0.1 , 0.5 ], #3 - Δμ - Heterogeneity of emission price adaption rate
[0.1 , 0.5 ], #4 - Δη - Heterogeneity of profitability target
[15 , 20 ], #5 - θ - Abatement cost factor
[0.1 , 0.5 ], #6 - ϑ (vartheta) - Dosi Mark-up adaptation rate
[0.1 , 0.5 ], #7 - χ - Market share adaptation rate
[0.05 , 0.1 ], #8 - μ - Price adaption rate
[0.1 , 0.5 ], #9 - η - Profitability rate
[0.4 , 0.5 ], #10 - γ - Demand sensitivity
[0.01 , 0.1 ], #11 - β - Production cost increase upstream
[3 , 10 ], #12 - t* - supply forecast
[0.3 , 0.7 ], #13 - ϕ - coverage factor
[0.1 , 0.5 ]] #7 - χup - Market share adaptation rate upstream
}
class c_parameters:
def __init__(self,variable_parameters,open_economy):
# Toggle model dynamics
self.calibrate = True
self.multipl_adapt = False
self.open_economy = open_economy
# Fixed parameters
self.T = 200 # Number of rounds
self.Np = self.Nf = 30 # Number of final goods producers
self.t_start= 50 # Delay until policy starts
self.t_impl = 100 # Number of implementation periods
self.D0 = 1 # Maximum demand
self.A00 = 1 # Fuel and Emission intensity
self.B00 = 1 # Downstream Production costs
self.B00_up = 1 # Upstream Production costs
self.qI_d = 0.1 # Desired inventory
self.ω = [1, 1] # Market-share evolution weights
self.λ_n = 20 # Number of technological steps
self.α0 = 0 # Abatement cost ground level
self.E_max = 0.1 # Emission Target
self.m0 = 0.1 # Dosi mark-up rate in the beginning
#self.eta = 0.1 # Profitability target
self.pe0 = 0.1 # Initial trading price for permits
# Calibration parameters
self.calibration_treshold = 10**(-2) * 0.2
self.calibration_max_runs = 30
self.tax = 100 # Upper bound for tax
# Variable parameters
self.λ_max, self.Δα, self.ΔAB, self.Δδ, self.Δeta, self.α, self.ϑ,self.χ,self.δ,self.eta, self.γ, self.B_up_incr, self.supply_forecast, self.η_c,self.χup = variable_parameters
self.δ_up = self.δ
self.Nf = int(self.Nf)
# Implement limited coverage
if self.open_economy == False: self.η_c = 0
self.Nf_c = int(round(self.Nf * (1 - self.η_c)))
self.Np_c = int(round(self.Np * (1 - self.η_c)))
# Error log
self.error = False
self.log = []
def generate_random_par(self):
a = [self.δ * ( 1 + self.Δδ * ( np.random.random() - 0.5 ) ) for i in range(self.Np)]
b = [self.A00 * ( 1 + self.ΔAB * ( np.random.random() - 0.5 ) ) for i in range(self.Np)]
c = [self.B00 * ( 1 + self.ΔAB * ( np.random.random() - 0.5 ) ) for i in range(self.Np)]
d = [self.α * ( 1 + self.Δα * ( np.random.random() - 0.5 ) ) for i in range(self.Np)]
e = [self.B00_up * ( 1 + self.ΔAB * ( np.random.random() - 0.5 ) ) for i in range(self.Nf)]
f = [self.δ_up * ( 1 + self.Δδ * ( np.random.random() - 0.5 ) ) for i in range(self.Nf)]
g = [self.eta * ( 1 + self.Δeta * ( np.random.random() - 0.5 ) ) for i in range(self.Np)]
return [a,b,c,d,e,f,g]
def load_random_par(self,random_par):
self.δ,self.A0,self.B0,self.α,self.B0_up,self.δ_up,self.eta = random_par
self.λ = [] # Abatement List
for i in range(self.Np): self.λ.append( self.generate_λ(self.α[i],self.A0[i]) )
# Abatement cost curve
def generate_λ(self,α,A0):
λ =[]
for i in range(self.λ_n):
a=(A0*self.λ_max)/self.λ_n
MAC=a*α*(i+1) + self.α0
b=a*MAC
λ.append([a,b])
return λ
# Manage errors
def report_error(self,statement):
self.error = True
if statement not in self.log:
self.log.append(statement)
# Scenarios
def load_sc(self,scenario):
# Load Scenario
getattr(self,scenario)()
def No_Policy(self):
self.mode = "No Policy"
self.label = "No Policy"
self.regpoint = "None"
def UpDR(self):
self.mode = "Direct_Regulation"
self.label = "Upstream direct regulation"
self.regpoint = "upstream"
def DownDR(self):
self.mode = "Direct_Regulation"
self.label = "Downstream direct regulation"
self.regpoint = "downstream"
def Upmarket(self):
self.mode = "Permits"
self.label = "Upstream permit market"
self.regpoint = "upstream"
def Downmarket(self):
self.mode = "Permits"
self.label = "Downstream permit market"
self.regpoint = "downstream"
def Uptax(self):
self.mode = "Tax"
self.label = "Upstream tax"
self.regpoint = "upstream"
def Downtax(self):
self.mode = "Tax"
self.label = "Downstream tax"
self.regpoint = "downstream"
``` |
{
"source": "joelfrederico/bprof",
"score": 3
} |
#### File: bprof/bprof/profile.py
```python
class BaseFunction:
def __init__(self, name, n_calls, internal_ns):
self._name = name
self._n_calls = n_calls
self._internal_ns = internal_ns
@property
def name(self):
return self._name
@property
def n_calls(self):
return self._n_calls
@property
def internal_ns(self):
return self._internal_ns
class Lines:
def __init__(self, line_str, n_calls, internal, external):
self._line_str = line_str
self._n_calls = n_calls
self._internal = internal
self._external = external
@property
def text(self):
return self._line_str
@property
def n_calls(self):
return self._n_calls
@property
def internal(self):
return self._internal
@property
def external(self):
return self._external
@property
def total(self):
return self.internal + self.external
class Function(BaseFunction):
def __init__(self, name, lines, n_calls, internal_ns):
self._name = name
self._lines = lines
self._n_calls = n_calls
self._internal_ns = internal_ns
@property
def lines(self):
return self._lines
@property
def name(self):
return self._name
@property
def n_calls(self):
return self._n_calls
@property
def internal_ns(self):
return self._internal_ns
@property
def total(self):
tot = 0
for line in self.lines:
tot += line.total
return tot + self.internal_ns
class Profile:
@staticmethod
def from_data(data):
profile = Profile()
profile._functions = []
for key, fdata in data['functions'].items():
lines = []
for line in fdata['lines']:
line = Lines(line['line_str'], line['n_calls'],
line['internal_ns'], line['external_ns'])
lines.append(line)
func = Function(lines=lines, name=fdata['name'],
n_calls=fdata['n_calls'],
internal_ns=fdata['internal_ns'])
profile._functions.append(func)
return profile
@property
def functions(self):
return self._functions
``` |
{
"source": "joelfrederico/coronalyzer",
"score": 3
} |
#### File: joelfrederico/coronalyzer/app.py
```python
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import scipy.optimize as spopt
import json
app = dash.Dash(__name__)
server = app.server
world_df = pd.read_csv('data/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
def get_label(row):
label = row.loc['Country/Region']
state = row.loc['Province/State']
if pd.notna(state):
label += f" ({state})"
return label
with open('data/los_angeles.json') as f:
data = json.load(f)
new_case_data = data['New Cases'][1:]
indices = data['Date'][1:]
total_dat = []
total = 0
for (i, val) in enumerate(new_case_data):
total += val
total_dat.append(total)
indices.extend(['Province/State', 'Country/Region'])
total_dat.extend(['Los Angeles', 'US'])
la_df = pd.DataFrame(data=[total_dat], columns=indices)
world_df = world_df.append(la_df)
world_df.reset_index(inplace=True)
dropdown = []
for (i, row) in world_df.iterrows():
label = get_label(row)
dropdown.append({'label': label, 'value': i})
total_fig = go.Figure()
total_fig.update_layout(title='Total Cases', yaxis_title="Cases")
def resid(x, N, t):
return N - 2**((t - x[1])/x[0])
def get_jac(x, N, t):
t_sub_t0 = t - x[1]
exp_part = 2**(t_sub_t0/x[0])
res = np.array(
[np.log(2)*t_sub_t0*exp_part/x[0]**2, exp_part*np.log(2)/x[0]])
return res.transpose()
def get_rate(series):
t = series.index - pd.to_datetime('1/1/2020')
t = t.values / pd.Timedelta('1d')
result = spopt.least_squares(
resid, [3, t[0]], jac=get_jac,
kwargs={'N': series, 't': t})
return 1/result.x[0]
rate_fig = go.Figure()
rate_fig.update_layout(
title='Exponential Growth: 3 Day Fit',
yaxis_title=r'Doubling Growth Factor',
showlegend=True)
rate_fig_7 = go.Figure()
rate_fig_7.update_layout(
title='7 Day Fit',
yaxis_title=r'Exponential Growth: Doubling Growth Factor',
showlegend=True)
app.layout = html.Div([
html.H2('Coronalyzer'),
html.Div([
dcc.Dropdown(
id='dropdown',
options=dropdown,
value=[226, 137, 62, 238],
multi=True)
]),
dcc.Graph(
id='total-cases-1',
figure=total_fig
),
dcc.Graph(
id='rate',
figure=rate_fig
),
dcc.Graph(
id='rate7',
figure=rate_fig_7
),
html.Div(id='output')
])
def row_to_series(row_df):
series = row_df[5:]
series.index = pd.DatetimeIndex(series.index)
series = series[series > 0]
return series
def create_fig_data(row):
row_df = world_df.loc[row]
series = row_to_series(row_df)
return dict(x=series.index, y=series,
type='scatter', name=get_label(row_df))
def create_rate_data(row, window):
row_df = world_df.loc[row]
series = row_to_series(row_df)
series = series.rolling(window).apply(get_rate)
return dict(x=series.index, y=series,
type='scatter', name=get_label(row_df))
@app.callback(
Output('total-cases-1', 'figure'),
[Input('dropdown', 'value')],
[State('total-cases-1', 'figure')])
def update_output(values, fig):
data = []
for value in values:
data.append(create_fig_data(value))
fig['data'] = data
return fig
@app.callback(
Output('rate', 'figure'),
[Input('dropdown', 'value')],
[State('rate', 'figure')])
def update_rate(values, fig):
data = []
for value in values:
data.append(create_rate_data(value, '3d'))
fig['data'] = data
return fig
@app.callback(
Output('rate7', 'figure'),
[Input('dropdown', 'value')],
[State('rate7', 'figure')])
def update_rate7(values, fig):
data = []
for value in values:
data.append(create_rate_data(value, '7d'))
fig['data'] = data
return fig
if __name__ == '__main__':
app.run_server(debug=True)
``` |
{
"source": "joelfrederico/mytools",
"score": 2
} |
#### File: joelfrederico/mytools/nosetest.py
```python
import sys
import os
import nose
import unittest.mock as mock
import builtins
pyqt4 = mock.Mock()
plt = mock.Mock()
figcanv = mock.Mock()
navtool = mock.Mock()
orig_import = __import__
def import_mock(name, globals=None, locals=None, fromlist=(), level=0):
if name == 'matplotlib.backends.backend_qt4agg':
print(fromlist)
if name == 'PyQt4':
return pyqt4
elif name == 'matplotlib.pyplot':
return plt
elif name == 'matplotlib.backends.backend_qt4agg' and fromlist == ('FigureCanvasQTAgg',):
return figcanv
elif name == 'matplotlib.backends.backend_qt4' and fromlist == ('NavigationToolbar2QT',):
return navtool
return orig_import(name, globals, locals, fromlist, level)
with mock.patch('builtins.__import__', import_mock) as mc:
nose.main()
```
#### File: scisalt/logging/mylogger.py
```python
import logging as _logging
import inspect as _inspect
__all__ = ['mylogger', 'log']
def mylogger(name=None, filename=None, indent_offset=7, level=_logging.DEBUG, stream_level=_logging.WARN, file_level=_logging.INFO):
"""
Sets up logging to *filename*.debug.log, *filename*.log, and the terminal. *indent_offset* attempts to line up the lowest indent level to 0. Custom levels:
* *level*: Parent logging level.
* *stream_level*: Logging level for console stream.
* *file_level*: Logging level for general file log.
"""
if name is not None:
logger = _logging.getLogger(name)
else:
logger = _logging.getLogger()
logger.setLevel(level)
fmtr = IndentFormatter(indent_offset=indent_offset)
fmtr_msgonly = IndentFormatter('%(funcName)s:%(lineno)d: %(message)s')
ch = _logging.StreamHandler()
ch.setLevel(stream_level)
ch.setFormatter(fmtr_msgonly)
logger.addHandler(ch)
if filename is not None:
debugh = _logging.FileHandler(filename='{}_debug.log'.format(filename), mode='w')
debugh.setLevel(_logging.DEBUG)
debugh.setFormatter(fmtr_msgonly)
logger.addHandler(debugh)
fh = _logging.FileHandler(filename='{}.log'.format(filename), mode='w')
fh.setLevel(file_level)
fh.setFormatter(fmtr)
logger.addHandler(fh)
return logger
def log(logger, level):
def log(msg):
return logger.log(level=level, msg=msg)
return log
class IndentFormatter(_logging.Formatter):
def __init__( self, fmt=None, datefmt=None, indent_offset=6):
if fmt is None:
fmt = '%(indent)s==========================================================\n%(indent)s%(levelname)s - %(name)s:%(funcName)s:%(lineno)d\n%(indent)s%(message)s'
super(IndentFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
self.baseline = len(_inspect.stack()) + indent_offset
def format( self, rec ):
stack = _inspect.stack()
stackdepth = len(stack)
stackdiff = stackdepth - self.baseline
rec.indent = '\t' * stackdiff
# rec.function = stack[8][3]
out = _logging.Formatter.format(self, rec)
del rec.indent
# del rec.function
return out
```
#### File: scisalt/matplotlib/Imshow_Slider_Array_mod.py
```python
import os as _os
on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import matplotlib.widgets as _wdg
import matplotlib as _mpl
import numpy as _np
import matplotlib.pyplot as _plt
from .setup_figure import setup_figure # noqa
from .imshow import imshow
from .imshow import scaled_figsize
from .colorbar import colorbar
class Imshow_Slider_Array(object):
"""
Convenience class for viewing images.
Plots images to an instance of :class:`matplotlib.axes.Axes`, with sliders for controlling bounds, with *\*\*kwargs* passed through to :meth:`matplotlib.axes.Axes.imshow`.
Parameters
----------
images :
An array of images.
usecbar : bool
Determines if colorbar is shown. Color bars can slow down the viewer significantly.
kwargs :
Passed through to :meth:`matplotlib.axes.Axes.imshow`.
"""
def __init__(self, images, usecbar=False, **kwargs):
# ======================================
# Save input info
# ======================================
self._images = images
self._num_imgs = len(images)
self._image_ind = 0
self._kwargs = kwargs
self._usecbar = usecbar
# ======================================
# Create figure
# ======================================
# self.fig, self.gs = setup_figure(20, 10, figsize=scaled_figsize(self.image))
self.fig, self.gs = setup_figure(20, 10)
self._ax_img = self.fig.add_subplot(self.gs[0:-4, :])
self.ax_min = self.fig.add_subplot(self.gs[-3, 1:-1])
self.ax_max = self.fig.add_subplot(self.gs[-2, 1:-1])
self.ax_img = self.fig.add_subplot(self.gs[-1, 1:-1])
self._reset(**kwargs)
_plt.connect('key_press_event', self._keypress)
def _keypress(self, event):
self._event = event
newval = None
if event.key == 'right':
newval = self.imageslider.val + 1
if newval > self.num_imgs:
newval = self.num_imgs
elif event.key == 'left':
newval = self.imageslider.val - 1
if newval < 0:
newval = 0
if newval is not None:
self.imageslider.set_val(newval)
# if event.key in ['A', 'a']:
# self.RectangleSelector.set_active(not self.RectangleSelector.active)
# elif event.key in ['D', 'd']:
# try:
# self._rect.remove()
# _plt.draw()
# self._eclick = None
# self._erelease = None
# self._selfunc_results = None
# except:
# pass
def _fix_axes(self):
shape_x, shape_y = self.image.shape
ratio = shape_x / shape_y
figsize = self._kwargs.get('figsize', _mpl.rcParams['figure.figsize'])
figsize_ratio = figsize[0]/figsize[1]
if ratio > figsize_ratio:
x_lim = [0, shape_x]
y_lim = shape_x / figsize_ratio
y_lim = [(shape_y-y_lim)/2, (shape_y+y_lim)/2]
else:
x_lim = shape_y * figsize_ratio
x_lim = [(shape_x-x_lim)/2, (shape_x+x_lim)/2]
y_lim = [0, shape_y]
self.ax.set_xlim(x_lim)
self.ax.set_ylim(y_lim)
def _reset(self, **kwargs):
# ======================================
# Strip kwargs for vmin, vmax
# in order to set sliders correctly
# ======================================
minslide = kwargs.get('vmin', self.imgmin)
maxslide = kwargs.get('vmax', self.imgmax)
# ======================================
# Slider Logic
# ======================================
if self.imgmin > 0:
slidermin = kwargs.pop('smin', 0)
else:
slidermin = kwargs.pop('smin', self.imgmin)
if self.imgmax < 0:
slidermax = kwargs.pop('smax', 0)
else:
slidermax = kwargs.pop('smax', self.imgmax)
# ======================================
# Imshow
# ======================================
# self._AxesImage = self.ax.imshow(self.image, **kwargs)
self._AxesImage = imshow(self.image, ax=self.ax, add_cbar=False, **kwargs)
self._cax, self._cb = colorbar(self.ax, self._AxesImage)
self._fix_axes()
# ======================================
# Add minimum slider
# ======================================
self.minslider = _wdg.Slider(self.ax_min, 'Min', slidermin, slidermax, minslide)
# ======================================
# Add maximum slider
# ======================================
self.maxslider = _wdg.Slider(self.ax_max, 'Max', slidermin, slidermax, maxslide, slidermin=self.minslider)
self.minslider.slidermax = self.maxslider
self.minslider.on_changed(self._update_clim)
self.maxslider.on_changed(self._update_clim)
# ======================================
# Add image slider
# ======================================
self.imageslider = _wdg.Slider(ax=self.ax_img, label='Image', valmin=1, valmax=self.num_imgs, valinit=0, valfmt=u'%d')
self.imageslider.on_changed(self._update_image)
# if self.usecbar:
# self.fig.colorbar(self.AxesImage, ax=self.ax, use_gridspec=True)
# self.fig.tight_layout()
def _update_image(self, value):
ind = _np.int(_np.round(value)-1)
self._image_ind = ind
# ======================================
# Get old axes settings
# ======================================
xlim = self.ax.get_xlim()
ylim = self.ax.get_ylim()
# ======================================
# Clear old axes
# ======================================
self.ax.cla()
self._cax.cla()
# ======================================
# Plot new data
# ======================================
self._AxesImage = imshow(self.image, ax=self.ax, add_cbar=False, **self._kwargs)
self._cb = _plt.colorbar(self._AxesImage, cax=self._cax)
# ======================================
# Fix the axes
# ======================================
self._fix_axes()
# ======================================
# Keep the colorbar to previous settings
# ======================================
self._update_clim(0)
# ======================================
# Get old axes settings
# ======================================
self.ax.set_xlim(xlim)
self.ax.set_ylim(ylim)
def set_cmap(self, cmap):
"""
Sets color map to *cmap*.
"""
self.AxesImage.set_cmap(cmap)
@property
def num_imgs(self):
"""
The number of images.
"""
return self._num_imgs
@property
def AxesImage(self):
"""
The :class:`matplotlib.image.AxesImage` from :meth:`matplotlib.axes.Axes.imshow`.
"""
return self._AxesImage
@property
def ax(self):
"""
The :class:`matplotlib.axes.Axes` used for :meth:`matplotlib.axes.Axes.imshow`.
"""
return self._ax_img
# ======================================
# Get max of image
# ======================================
@property
def imgmax(self):
"""
Highest value of input image.
"""
if not hasattr(self, '_imgmax'):
imgmax = _np.max(self.images[0])
for img in self.images:
imax = _np.max(img)
if imax > imgmax:
imgmax = imax
self._imgmax = imgmax
return self._imgmax
# ======================================
# Get min of image
# ======================================
@property
def imgmin(self):
"""
Lowest value of input image.
"""
if not hasattr(self, '_imgmin'):
imgmin = _np.min(self.images[0])
for img in self.images:
imin = _np.min(img)
if imin > imgmin:
imgmin = imin
self._imgmin = imgmin
return _np.min(self.image)
# ======================================
# Update the clims
# ======================================
def _update_clim(self, val):
cmin = self.minslider.val
cmax = self.maxslider.val
# print('Cmin: {}, Cmax: {}'.format(cmin, cmax))
self.AxesImage.set_clim(cmin, cmax)
# ======================================
# Easily get and set slider
# ======================================
@property
def clim_min(self):
"""
Slider value for minimum
"""
return self.minslider.val
@clim_min.setter
def clim_min(self, val):
self.minslider.set_val(val)
@property
def clim_max(self):
"""
Slider value for maximum
"""
return self.maxslider.val
@clim_max.setter
def clim_max(self, val):
self.maxslider.set_val(val)
@property
def images(self):
"""
The array of images.
"""
return self._images
@property
def image(self):
"""
The image loaded.
"""
return self._images[self._image_ind]
```
#### File: scisalt/matplotlib/NonUniformImage_axes.py
```python
import os as _os
on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import numpy as _np
def NonUniformImage_axes(img):
"""
Returns axes *x, y* for a given image *img* to be used with :func:`scisalt.matplotlib.NonUniformImage`.
Returns
-------
x, y : float, float
"""
xmin = 0
xmax = img.shape[1]-1
ymin = 0
ymax = img.shape[0]-1
x = _np.linspace(xmin, xmax, img.shape[1])
y = _np.linspace(ymin, ymax, img.shape[0])
return x, y
```
#### File: scisalt/matplotlib/NonUniformImage.py
```python
import os as _os
on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import matplotlib.image as _mplim
import matplotlib.cm as _cm
import numpy as _np
from .colorbar import colorbar as _cb
from .setup_axes import setup_axes as _setup_axes
from .SciImage import SciImage as _SI
def NonUniformImage(x, y, z, ax=None, fig=None, cmap=None, alpha=None, scalex=True, scaley=True, add_cbar=True, **kwargs):
"""
Used to plot a set of coordinates.
Parameters
----------
x, y : :class:`numpy.ndarray`
1-D ndarrays of lengths N and M, respectively, specifying pixel centers
z : :class:`numpy.ndarray`
An (M, N) ndarray or masked array of values to be colormapped, or a (M, N, 3) RGB array, or a (M, N, 4) RGBA array.
ax : :class:`matplotlib.axes.Axes`, optional
The axis to plot to.
fig : :class:`matplotlib.figure.Figure`, optional
The figure to plot to.
cmap : :class:`matplotlib.colors.Colormap`, optional
The colormap to use.
alpha : float, optional
The transparency to use.
scalex : bool, optional
To set the x limits to available data
scaley : bool, optional
To set the y limits to available data
add_cbar : bool, optional
Whether ot add a colorbar or not.
Returns
-------
img : :class:`matplotlib.image.NonUniformImage`
Object representing the :class:`matplotlib.image.NonUniformImage`.
"""
if ax is None and fig is None:
fig, ax = _setup_axes()
elif ax is None:
ax = fig.gca()
elif fig is None:
fig = ax.get_figure()
norm = kwargs.get('norm', None)
im = _mplim.NonUniformImage(ax, **kwargs)
vmin = kwargs.pop('vmin', _np.min(z))
vmax = kwargs.pop('vmax', _np.max(z))
# im.set_clim(vmin=vmin, vmax=vmax)
if cmap is not None:
im.set_cmap(cmap)
m = _cm.ScalarMappable(cmap=im.get_cmap(), norm=norm)
m.set_array(z)
if add_cbar:
cax, cb = _cb(ax=ax, im=m, fig=fig)
if alpha is not None:
im.set_alpha(alpha)
im.set_data(x, y, z)
ax.images.append(im)
if scalex:
xmin = min(x)
xmax = max(x)
ax.set_xlim(xmin, xmax)
if scaley:
ymin = min(y)
ymax = max(y)
ax.set_ylim(ymin, ymax)
return _SI(im=im, cb=cb, cax=cax)
```
#### File: scisalt/matplotlib/pcolor_axes.py
```python
import os as _os
on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import numpy as _np
def px_to_units(xpx, ypx):
return xpx, ypx
def pcolor_axes(array, px_to_units=px_to_units):
"""
Return axes :code:`x, y` for *array* to be used with :func:`matplotlib.pyplot.color`.
*px_to_units* is a function to convert pixels to units. By default, returns pixels.
"""
# ======================================
# Coords need to be +1 larger than array
# ======================================
x_size = array.shape[0]+1
y_size = array.shape[1]+1
x = _np.empty((x_size, y_size))
y = _np.empty((x_size, y_size))
for i in range(x_size):
for j in range(y_size):
x[i, j], y[i, j] = px_to_units(i-0.5, j-0.5)
return x, y
```
#### File: scisalt/matplotlib/SciImage.py
```python
class SciImage(object):
def __init__(self, im, cb, cax):
self._im = im
self._cb = cb
self._cax = cax
@property
def im(self):
return self._im
@property
def cb(self):
return self._cb
@property
def cax(self):
return self._cax
```
#### File: scisalt/qt/mplwidget.py
```python
from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as _FigureCanvas
from matplotlib.backends.backend_qt4 import NavigationToolbar2QT as _NavigationToolbar
import matplotlib as _mpl
import numpy as _np
from .Rectangle import Rectangle
import pdb
import traceback
import logging
loggerlevel = logging.DEBUG
logger = logging.getLogger(__name__)
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Slider_and_Text(QtGui.QWidget):
valueChanged = QtCore.pyqtSignal(int)
sliderReleased = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
QtGui.QWidget.__init__(self)
self.setMaximumHeight(40)
# Enable tracking by default
self._tracking = True
self.hLayout = QtGui.QHBoxLayout()
self.slider = QtGui.QSlider()
self.leftbutton = QtGui.QPushButton()
self.leftbutton.setText("<")
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.leftbutton.sizePolicy().hasHeightForWidth())
# self.leftbutton.setSizePolicy(sizePolicy)
self.leftbutton.clicked.connect(self._subone)
self.rightbutton = QtGui.QPushButton()
self.rightbutton.setText(">")
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rightbutton.sizePolicy().hasHeightForWidth())
# self.rightbutton.setSizePolicy(sizePolicy)
self.rightbutton.clicked.connect(self._addone)
self.v = QtGui.QIntValidator()
self.box = QtGui.QLineEdit()
self.box.setValidator(self.v)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.box.sizePolicy().hasHeightForWidth())
# self.box.setSizePolicy(sizePolicy)
self.hLayout.addWidget(self.leftbutton)
self.hLayout.addWidget(self.slider)
self.hLayout.addWidget(self.box)
self.hLayout.addWidget(self.rightbutton)
self.setLayout(self.hLayout)
self.slider.valueChanged.connect(self._sliderChanged)
self.box.editingFinished.connect(self._textChanged)
self.setOrientation(QtCore.Qt.Horizontal)
# Connect release so tracking works as expected
self.slider.sliderReleased.connect(self._sliderReleased)
def _addone(self):
self.value = self.value + 1
self.valueChanged.emit(self.value)
def _subone(self):
self.value = self.value - 1
self.valueChanged.emit(self.value)
def _sliderReleased(self):
print('Released')
self.sliderReleased.emit(self.slider.value)
def setTracking(self, val):
print('Tracking set to {}'.format(val))
self._tracking = val
def setMaximum(self, val):
self.slider.setMaximum(val)
self.v.setRange(self.slider.minimum(), self.slider.maximum())
self.box.setValidator(self.v)
def setMinimum(self, val):
self.slider.setMinimum(val)
self.v.setRange(self.slider.minimum(), self.slider.maximum())
self.box.setValidator(self.v)
def _sliderChanged(self, val):
self.box.setText(str(val))
if self._tracking:
try:
self.slider.sliderReleased.disconnect()
except:
pass
self.valueChanged.emit(val)
else:
try:
self.slider.sliderReleased.disconnect()
except:
pass
self.slider.sliderReleased.connect(self._sliderChanged_notracking)
def _sliderChanged_notracking(self):
val = self.slider.value()
# print('Value to be emitted is {}'.format(val))
self.valueChanged.emit(val)
def _textChanged(self):
val = self.box.text()
self.slider.setValue(int(val))
self._sliderChanged_notracking()
def setOrientation(self, *args, **kwargs):
self.slider.setOrientation(*args, **kwargs)
def _getValue(self):
return self.slider.value()
def _setValue(self, val):
self.slider.setValue(val)
self.box.setText(str(val))
value = property(_getValue, _setValue)
def setValue(self, val):
self.slider.setValue(val)
self.box.setText(str(val))
# self.valueChanged.emit(val)
class Mpl_Plot(_FigureCanvas):
def __init__(self, parent=None):
# Initialize things
self.fig = _mpl.figure.Figure()
_FigureCanvas.__init__(self, self.fig)
_FigureCanvas.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
_FigureCanvas.updateGeometry(self)
# Create axes
self.ax = self.fig.add_subplot(111)
def plot(self, *args, **kwargs):
self.ax.clear()
self.ax.plot(*args, **kwargs)
self.ax.ticklabel_format(style='sci', scilimits=(0, 0), axis='y')
self.ax.figure.canvas.draw()
class Mpl_Image(QtGui.QWidget):
# Signal for when the rectangle is changed
rectChanged = QtCore.pyqtSignal(Rectangle)
def __init__(self, parent=None, rectbool = True, toolbarbool=False, image=None):
# Initialize things
QtGui.QWidget.__init__(self)
self.rectbool = rectbool
self._clim_min = 0
self._clim_max = 3600
self._pressed = False
# Add a vertical layout
self.vLayout = QtGui.QVBoxLayout()
# Add a figure
self.fig = _mpl.figure.Figure()
# Add a canvas containing the fig
self.canvas = _FigureCanvas(self.fig)
_FigureCanvas.setSizePolicy(self.canvas, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
_FigureCanvas.updateGeometry(self.canvas)
# Setup the layout
if toolbarbool:
self.toolbar = _NavigationToolbar(self.canvas, self)
self.toolbar.setMaximumHeight(20)
self.vLayout.addWidget(self.toolbar)
self.vLayout.addWidget(self.canvas)
self.setLayout(self.vLayout)
# Create axes
self.ax = self.fig.add_subplot(111)
# Include rectangle functionality
if rectbool:
self.fig.canvas.mpl_connect('button_press_event', self.on_press)
self.fig.canvas.mpl_connect('button_release_event', self.on_release)
self.Rectangle = Rectangle(
x = -10 ,
y = 0 ,
width = 0 ,
height = 3 ,
axes = self.ax
)
# Add image
self.image = image
def _get_img(self):
return self._image
def _set_img(self, image):
self.ax.clear()
self._image = image
if image is not None:
self._imgplot = self.ax.imshow(image, interpolation='none')
if self.rectbool:
self.ax.add_patch(self.Rectangle.get_rect())
# imagemax = _np.max(_np.max(image))
self.set_clim(self._clim_min, self._clim_max)
image = property(_get_img, _set_img)
def set_clim(self, clim_min, clim_max):
if self.image is not None:
self._clim_min = clim_min
self._clim_max = clim_max
self._imgplot.set_clim(clim_min, clim_max)
self.ax.figure.canvas.draw()
def on_press(self, event):
if self.toolbar._active is None:
self._pressed = True
self.x0 = event.xdata
self.y0 = event.ydata
logger.log(level=loggerlevel, msg='Pressed: x0: {}, y0: {}'.format(self.x0, self.y0))
def on_release(self, event):
if self._pressed:
self._pressed = False
print('release')
self.x1 = event.xdata
self.y1 = event.ydata
width = self.x1 - self.x0
height = self.y1 - self.y0
logger.log(level=loggerlevel, msg='Released: x0: {}, y0: {}, x1: {}, y1: {}, width: {}, height: {}'.format(
self.x0 ,
self.y0 ,
self.x1 ,
self.y1 ,
width ,
height
)
)
self.Rectangle.set_xy((self.x0, self.y0))
self.Rectangle.set_width(width)
self.Rectangle.set_height(height)
self.ax.figure.canvas.draw()
self.rectChanged.emit(self.Rectangle)
# print(self.rect)
def zoom_rect(self, border=None, border_px=None):
# ======================================
# Get x coordinates
# ======================================
x0 = self.Rectangle.get_x()
width = self.Rectangle.get_width()
x1 = x0+width
# ======================================
# Get y coordinates
# ======================================
y0 = self.Rectangle.get_y()
height = self.Rectangle.get_height()
y1 = y0+height
# ======================================
# Validate borders
# ======================================
if (border_px is None) and (border is not None):
xborder = border[0]*width
yborder = border[1]*height
elif (border_px is not None) and (border is None):
xborder = border_px[0]
yborder = border_px[1]
elif (border_px is None) and (border is None):
raise IOError('No border info specified!')
elif (border_px is not None) and (border is not None):
raise IOError('Too much border info specified, both border_px and border!')
else:
raise IOError('End of the line!')
# ======================================
# Add borders
# ======================================
x0 = x0 - xborder
x1 = x1 + xborder
y0 = y0 - yborder
y1 = y1 + yborder
# ======================================
# Validate coordinates to prevent
# unPythonic crash
# ======================================
if not ((0 <= x0 and x0 <= self.image.shape[1]) and (0 <= x1 and x1 <= self.image.shape[1])):
print('X issue')
print('Requested: x=({}, {})'.format(x0, x1))
x0 = 0
x1 = self.image.shape[1]
if not ((0 <= y0 and y0 <= self.image.shape[0]) and (0 <= y1 and y1 <= self.image.shape[0])):
print('y issue')
print('Requested: y=({}, {})'.format(y0, y1))
y0 = 0
y1 = self.image.shape[0]
# ======================================
# Set viewable area
# ======================================
self.ax.set_xlim(x0, x1)
self.ax.set_ylim(y0, y1)
# ======================================
# Redraw canvas to show updates
# ======================================
self.ax.figure.canvas.draw()
class Mpl_Image_Plus_Slider(QtGui.QWidget):
# def __init__(self, parent=None, **kwargs):
def __init__(self, parent=None, **kwargs):
# Initialize self as a widget
QtGui.QWidget.__init__(self, parent)
# Add a vertical layout with parent self
self.vLayout = QtGui.QVBoxLayout(self)
self.vLayout.setObjectName(_fromUtf8("vLayout"))
# Add an Mpl_Image widget to vLayout,
# save it to self._img
# Pass arguments through to Mpl_Image.
self._img = Mpl_Image(parent=parent, toolbarbool=True, **kwargs)
self._img.setObjectName(_fromUtf8("_img"))
self.vLayout.addWidget(self._img)
# Add a slider to vLayout,
# save it to self.max_slider
# self.max_slider = QtGui.QSlider(self)
self.max_slider = Slider_and_Text(self)
self.max_slider.setObjectName(_fromUtf8("max_slider"))
self.max_slider.setOrientation(QtCore.Qt.Horizontal)
self.vLayout.addWidget(self.max_slider)
# Setup slider to work with _img's clims
self.max_slider.valueChanged.connect(lambda val: self.set_clim(0, val))
def _get_image(self):
return self._img.image
def _set_image(self, image):
self._img.image = image
maximage = _np.max(_np.max(image))
self.max_slider.setMaximum(maximage)
image = property(_get_image, _set_image)
def _get_ax(self):
return self._img.ax
ax = property(_get_ax)
def _get_Rectangle(self):
return self._img.Rectangle
# def _set_rect(self, rect):
# self._img.rect(rect)
Rectangle = property(_get_Rectangle)
def zoom_rect(self, border=None, border_px=None):
self._img.zoom_rect(border, border_px)
def set_clim(self, *args, **kwargs):
self._img.set_clim(*args, **kwargs)
def setSliderValue(self, val):
self.max_slider.setValue(val)
```
#### File: scisalt/scipy/fill_missing_timestamps.py
```python
import os as _os
on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import numpy as _np
import scipy as _sp
import logging as _logging
logger = _logging.getLogger(__name__)
def fill_missing_timestamps(timestamp, values):
# ======================================
# Find the stats of the values
# ======================================
values_mean = _np.mean(values)
values_std = _np.std(values)
# ======================================
# Find timestamp interval between each
# step
# ======================================
offsets = timestamp[1:]-timestamp[:-1]
mode_res = _sp.stats.mstats.mode(offsets)
dt = mode_res[0][0]
# ======================================
# Start the arrays to fill
# ======================================
ts_new = _np.array([timestamp[0]])
values_new = _np.array([values[0]])
for ts_i, val_i in zip(timestamp[1:], values[1:]):
# ======================================
# Find gap from last time
# ======================================
gap = ts_i-ts_new[-1]
# ======================================
# Find number of intervals the gap is
# ======================================
n_dt = _np.round(gap/dt)
# ======================================
# Shots are missing if the gap is > 1*dt
# ======================================
if n_dt > 1:
n_fill = n_dt - 1
logger.warn('{} missing shot(s) after timestamp: {}'.format(n_fill, ts_new[-1]))
# ======================================
# Fill time info
# ======================================
t_fill = ts_new[-1] + dt * _np.linspace(1, n_fill, n_fill)
logger.warn('Filling time: {}'.format(t_fill))
ts_new = _np.append(ts_new, t_fill)
# ======================================
# Fill values with random values
# ======================================
val_fill = _np.random.normal(values_mean, values_std, n_fill)
logger.warn('Filling values: {}'.format(val_fill))
values_new = _np.append(values_new, val_fill)
# ======================================
# Append next shot
# ======================================
ts_new = _np.append(ts_new, ts_i)
values_new = _np.append(values_new, val_i)
return (ts_new, values_new)
```
#### File: scisalt/utils/githubtunnel.py
```python
import os as _os
import argparse as _argparse
import shlex
from subprocess import call
__all__ = ['githubtunnel']
def githubtunnel(user1, server1, user2, server2, port, verbose, stanford=False):
"""
Opens a nested tunnel, first to *user1*@*server1*, then to *user2*@*server2*, for accessing on *port*.
If *verbose* is true, prints various ssh commands.
If *stanford* is true, shifts ports up by 1.
Attempts to get *user1*, *user2* from environment variable ``USER_NAME`` if called from the command line.
"""
if stanford:
port_shift = 1
else:
port_shift = 0
# command1 = 'ssh -nNf -L {}:quickpicmac3.slac.stanford.edu:22 {}@{}'.format(port, user, server)
command1 = 'ssh -nNf -L {}:{}:22 {}@{}'.format(port-1-port_shift, server2, user1, server1)
command2 = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -nNf -L {}:cardinal.stanford.edu:22 -p {} {}@localhost'.format(port-port_shift, port-port_shift-1, user2)
command3 = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -nNf -L {}:github.com:22 -p {} {}@localhost'.format(port, port-1, user2)
if verbose:
print(command1)
if stanford:
print(command2)
print(command3)
try:
call(shlex.split(command1))
if stanford:
call(shlex.split(command2))
call(shlex.split(command3))
except:
print('Failure!')
pass
# ================================
# Get info one way or another
# ================================
def environvar(prompt, val):
if val is None:
val = input(prompt + ' ')
return val
def _script():
# ================================
# Access command line arguments
# ================================
parser = _argparse.ArgumentParser(description=
'Creates a tunnel primarily for Git.')
parser.add_argument('-V', action='version', version='%(prog)s v0.1')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose mode.')
parser.add_argument('-p', '--port', default=7777, type=int,
help='Local port to listen on.')
parser.add_argument('-s1', '--server1', default='mcclogin',
help='First server hop.')
parser.add_argument('-s2', '--server2', default='iris.slac.stanford.edu',
help='Second server hop.')
parser.add_argument('-su', '--stanford', action='store_true',
help='Tunnel through Stanford')
parser.add_argument('-u1', '--user1', default=_os.environ.get('PHYSICS_USER'),
help='User name to use for login.')
parser.add_argument('-u2', '--user2', default=_os.environ.get('PHYSICS_USER'),
help='User name to use for login.')
arg = parser.parse_args()
# ================================
# Ask user for logins if needed
# ================================
prompt1 = 'User name for server {}?'.format(arg.server1)
prompt2 = 'User name for server {}?'.format(arg.server2)
user1 = environvar(prompt1, arg.user1)
user2 = environvar(prompt2, arg.user2)
# ================================
# Run with command line arguments
# ================================
githubtunnel(user1, arg.server1, user2, arg.server2, arg.port, arg.verbose, stanford=arg.stanford)
if __name__ == '__main__':
_script()
```
#### File: tests/facettools/test_print2elog.py
```python
import mytools.facettools as mtft
import datetime as dt
def test_print2elog():
mtft.print2elog(author='<NAME>', title='Test', text='This is a test')
``` |
{
"source": "joelfrederico/VCheck",
"score": 2
} |
#### File: VCheck/tests/base.py
```python
import unittest as _unittest
import unittest.mock as _mock
import git
import vcheck
import vcheck.versionerror as _mod2check
# ================================
# Hexsha for repo head
# ================================
current_hexsha = 'b39035318052f36e8347c54b2dba4195a03c7847'
# ================================
# Hexsha for repo tags
# ================================
current_hexshas = [ 'b56a895c7a5996f13341023033ab324ada6ee2bc',
'093f93188ce93e2ab5e2453c1423bcf87542c08b',
'1109ccbc8ffa750db7f0a71523d18833d54904a5'
]
# ================================
# Hexsha guaranteed not present
# ================================
unpresent_hexsha = '0ff92d0c2b192ffcc136108d6c339d742da3d5f0'
# ================================
# Versions for repo tags
# ================================
current_versions = [ 'v0.0.0', 'v0.0.1', 'v1.0.0' ]
# ================================
# Version guaranteed not present
# ================================
unpresent_version = 'v2.0.0'
# ================================
# Module to check
# ================================
class base(_unittest.TestCase):
def setUp(self):
# ================================
# Create patchers
# ================================
self.gitRepo_patcher = _mock.patch('git.Repo', autospec=True)
self.gitTagRef_patcher = _mock.patch('git.TagReference', autospec=True)
# ================================
# Start patchers
# ================================
self.gitRepo_patcher.start()
self.gitTagRef_patcher.start()
# ================================
# Add cleanup
# ================================
self.addCleanup(self.gitRepo_patcher.stop)
self.addCleanup(self.gitTagRef_patcher.stop)
self.addCleanup(self._clearcmod)
self.gitRepoInst = git.Repo()
self.mockrepo_real()
def tearDown(self):
pass
def mockrepo_real(self, is_dirty=False, on_version_ind=None, current_hexshas=current_hexshas, current_versions=current_versions):
inst = self.gitRepoInst
# ================================
# Set whether dirty or real
# ================================
inst.is_dirty.return_value = is_dirty
# ================================
# Mock repo has versions/tags
# ================================
if on_version_ind is not None:
inst.head.object.hexsha = current_hexshas[on_version_ind]
else:
inst.head.object.hexsha = current_hexsha
inst.tags = []
for i in current_versions:
inst.tags.append(git.TagReference('a', 'b'))
for i, tag in enumerate(inst.tags):
tag.object.hexsha = current_hexshas[i]
tag.name = current_versions[i]
# ================================
# Reset self.cmod instance
# ================================
self._cmod = None
@property
def cmod(self):
if self._cmod is None:
self._cmod = vcheck.CheckMod(self.mod2check)
return self._cmod
def _clearcmod(self):
self._cmod = None
@property
def mod2check(self):
return _mod2check
```
#### File: VCheck/vcheck/scripts.py
```python
import sys as _sys
import argparse as _argparse
import vcheck
from .versionmod import version, hexsha
import builtins as _builtins
# from .checkmod import CheckMod as _CheckMod
def _vcheck():
# ================================
# Access command line arguments
# ================================
parser = _argparse.ArgumentParser(description=
'Returns the hash and version of a git repository.')
parser.add_argument('-V', action='version', version='%(prog)s v{}'.format(vcheck.__version__))
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose mode.')
parser.add_argument('module',
help='Module to check.')
arg = parser.parse_args()
mod = _builtins.__import__(arg.module)
def printerr():
e = _sys.exc_info()
print(e[1].args[0])
try:
print('Hexsha is: {}'.format(hexsha(mod)))
except:
printerr()
return
try:
print(version(mod))
except:
printerr()
if __name__ == '__main__':
_vcheck()
```
#### File: VCheck/vcheck/versionerror.py
```python
class VersionError(Exception):
"""
Custom exceptions specific to different errors while version-checking.
While version-checking, there are quite a few errors that can arise.
Most are related to the state of the git repository not reliably
reproducing a specific version. Different exceptions with varying
`errno` codes can be raised. Codes are used to programmatically
identify different types of failures.
Parameters
----------
msg : str
A short error message explaining the nature of the error.
errno : int
A preset number taken from the :class:`vcheck.VersionError` class.
Attributes
----------
VERSION_UNMATCHED : code
Failure because the git repository's version was not found.
DIRTY : code
Failure because the git repository was dirty.
NO_GIT : code
Failure because the module is not contained in a git repository.
NO_TAGS : code
Failure because the git repository has no tags.
NOT_AT_TAG : code
Failure because the git repository is not at any tag.
"""
VERSION_UNMATCHED = 1
DIRTY = 2
NO_GIT = 3
NO_TAGS = 4
NOT_AT_TAG = 5
def __init__(self, msg, errno=None):
redmsg = '\033[31m{}\033[0m'.format(msg)
super().__init__(redmsg)
self._msg = redmsg
self._errno = errno
@property
def msg(self):
"""
A short error message explaining the nature of the error.
"""
return self._msg
@property
def errno(self):
"""
A preset number taken from the :class:`vcheck.VersionError` class.
"""
return self._errno
``` |
{
"source": "joelfrederico/WatchMendeley",
"score": 3
} |
#### File: WatchMendeley/watchmendeley/scripts.py
```python
import argparse as _argparse
import re
def fix_higgs_paper(path, output='/Users/joelfrederico/Thesis/overleaf/Dissertation.bib'):
print('Path: {}'.format(path))
print('Output: {}'.format(output))
punctuation = {0x2018: 0x27, 0x2019: 0x27, 0x201C: 0x22, 0x201D: 0x22}
# ================================
# Open files for reading, writing
# ================================
with open(path, encoding='utf-8') as f_in:
with open(output, mode='w', encoding='utf-8') as f_out:
# ================================
# Regex for replacing title
# ================================
prog_title = re.compile('^title = ')
prog_url = re.compile('^url = ')
sub_paren = re.compile('(^title = )({{)(.*)(}})(,\n)')
wrap_parens_list = {'TeV', 'SDDS', 'Higgs', 'Gaussian', 'Euler-Mascheroni'}
wrap_parens_compiled = []
for wrap_parens_target in wrap_parens_list:
wrap_parens_compiled.append(re.compile(wrap_parens_target))
url_fix_und = re.compile(r'{\\_}')
url_fix_tilde = re.compile(r'{~}')
math_mode_amp = re.compile(r'{\\\$}')
math_mode_backslash = re.compile(r'\\backslash')
math_mode_parens_open = re.compile(r'{\\{}')
math_mode_parens_closed = re.compile(r'{\\}}')
# ================================
# Run through each line.
# ================================
for i, line in enumerate(f_in):
m_title = prog_title.match(line)
m_url = prog_url.match(line)
# ================================
# Fix title parentheses
# ================================
m = sub_paren.match(line)
if m:
line = sub_paren.sub('\g<1>{\g<3>}\g<5>', line)
if m_title:
# ================================
# Fix math mode
# ================================
line = math_mode_amp.sub('$', line)
line = math_mode_backslash.sub(r'\\', line)
line = math_mode_parens_open.sub(r'{', line)
line = math_mode_parens_closed.sub(r'}', line)
# ================================
# Fix words that should be wrapped
# in parentheses
# ================================
wrap_parens_repl = '{\g<0>}'
for wrap_parens in wrap_parens_compiled:
line = wrap_parens.sub(wrap_parens_repl, line)
# ================================
# Fix url underscores
# ================================
if m_url:
line = url_fix_und.sub('_', line)
line = url_fix_tilde.sub('~', line)
f_out.writelines(line.translate(punctuation))
def _mendeleysync():
# ================================
# Access command line arguments
# ================================
parser = _argparse.ArgumentParser(description=
'Copies Mendeley\'s BibTeX and fixes it.')
parser.add_argument('-V', action='version', version='%(prog)s v0.1')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose mode.')
parser.add_argument('-i', '--input',
help='Path to BibTeX input file.')
parser.add_argument('-o', '--output',
help='Path to BibTeX input file.')
arg = parser.parse_args()
# ================================
# Run with command line arguments
# ================================
fix_higgs_paper(arg.input, arg.output)
if __name__ == '__main__':
# ================================
# Access command line arguments
# ================================
parser = _argparse.ArgumentParser(description=
'Copies Mendeley\'s BibTeX and fixes it.')
parser.add_argument('-V', action='version', version='%(prog)s v0.1')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose mode.')
parser.add_argument('-p', '--path',
help='Path to BibTeX file.')
arg = parser.parse_args()
# ================================
# Run with command line arguments
# ================================
fix_higgs_paper(arg.path)
``` |
{
"source": "joelgarde/flax",
"score": 2
} |
#### File: examples/linen_design_test/mlp_explicit.py
```python
import jax
from jax import numpy as jnp, random, lax
from flax import nn
from flax.nn import initializers
from typing import Any, Callable, Iterable, List, Optional, Tuple, Type, Union
from flax.linen import Module
import numpy as np
from pprint import pprint
from dense import Dense
# Require JAX omnistaging mode.
jax.config.enable_omnistaging()
# Add `in_features` to the built-in Dense layer that normally works
# via shape inference.
class DenseExplicit(Dense):
in_features: Optional[int] = None
def setup(self):
# We feed a fake batch through the module, which initialized parameters.
# Assuming we're in a jit, should use no FLOPs -- "just shape inference".
self.__call__(jnp.zeros((1, self.in_features, )))
class MLP(Module):
def setup(self):
self.dense1 = DenseExplicit(in_features=3, features=2)
self.dense2 = DenseExplicit(in_features=2, features=1)
# explicit instances are materialized immediately at init
pprint(self.dense2.variables)
# {'params': {'bias': DeviceArray([0.], dtype=float32),
# 'kernel': DeviceArray([[ 0.6704609 ],
# [-0.90477365]], dtype=float32)}}
def __call__(self, x):
return self.dense2(nn.relu(self.dense1(x)))
# Return an initialized instance of MLP by only calling `setup`.
rngkey = jax.random.PRNGKey(10)
init_variables = MLP().init({'params': rngkey}, jnp.ones((1, 3)))
pprint(init_variables)
# {'params': {'dense1': {'bias': DeviceArray([0., 0.], dtype=float32),
# 'kernel': DeviceArray([[ 0.18307537, -0.38739476],
# [-0.902451 , -0.5190721 ],
# [ 0.51552075, 1.1169153 ]], dtype=float32)},
# 'dense2': {'bias': DeviceArray([0.], dtype=float32),
# 'kernel': DeviceArray([[ 0.6704609 ],
# [-0.90477365]], dtype=float32)}}}
```
#### File: flax/linen/normalization.py
```python
from typing import (Any, Callable, Optional, Tuple)
from jax import lax
from jax.nn import initializers
import jax.numpy as jnp
from flax.linen.module import Module, compact, merge_param
PRNGKey = Any
Array = Any
Shape = Tuple[int]
Dtype = Any # this could be a real type?
_no_init = lambda rng, shape: ()
def _absolute_dims(rank, dims):
return tuple([rank + dim if dim < 0 else dim for dim in dims])
class BatchNorm(Module):
"""BatchNorm Module.
Attributes:
use_running_average: if True, the statistics stored in batch_stats
will be used instead of computing the batch statistics on the input.
axis: the feature or non-batch axis of the input.
momentum: decay rate for the exponential moving average of
the batch statistics.
epsilon: a small float added to variance to avoid dividing by zero.
dtype: the dtype of the computation (default: float32).
use_bias: if True, bias (beta) is added.
use_scale: if True, multiply by scale (gamma).
When the next layer is linear (also e.g. nn.relu), this can be disabled
since the scaling will be done by the next layer.
bias_init: initializer for bias, by default, zero.
scale_init: initializer for scale, by default, one.
axis_name: the axis name used to combine batch statistics from multiple
devices. See `jax.pmap` for a description of axis names (default: None).
axis_index_groups: groups of axis indices within that named axis
representing subsets of devices to reduce over (default: None). For
example, `[[0, 1], [2, 3]]` would independently batch-normalize over
the examples on the first two and last two devices. See `jax.lax.psum`
for more details.
"""
use_running_average: Optional[bool] = None
axis: int = -1
momentum: float = 0.99
epsilon: float = 1e-5
dtype: Dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
axis_name: Optional[str] = None
axis_index_groups: Any = None
@compact
def __call__(self, x, use_running_average: Optional[bool] = None):
"""Normalizes the input using batch statistics.
Args:
x: the input to be normalized.
use_running_average: if true, the statistics stored in batch_stats
will be used instead of computing the batch statistics on the input.
Returns:
Normalized inputs (the same shape as inputs).
"""
use_running_average = merge_param(
'use_running_average', self.use_running_average, use_running_average)
x = jnp.asarray(x, jnp.float32)
axis = self.axis if isinstance(self.axis, tuple) else (self.axis,)
axis = _absolute_dims(x.ndim, axis)
feature_shape = tuple(d if i in axis else 1 for i, d in enumerate(x.shape))
reduced_feature_shape = tuple(d for i, d in enumerate(x.shape) if i in axis)
reduction_axis = tuple(i for i in range(x.ndim) if i not in axis)
# we detect if we're in initialization via empty variable tree.
initializing = not self.has_variable('batch_stats', 'mean')
ra_mean = self.variable('batch_stats', 'mean',
lambda s: jnp.zeros(s, jnp.float32),
reduced_feature_shape)
ra_var = self.variable('batch_stats', 'var',
lambda s: jnp.ones(s, jnp.float32),
reduced_feature_shape)
if use_running_average:
mean, var = ra_mean.value, ra_var.value
else:
mean = jnp.mean(x, axis=reduction_axis, keepdims=False)
mean2 = jnp.mean(lax.square(x), axis=reduction_axis, keepdims=False)
if self.axis_name is not None and not initializing:
concatenated_mean = jnp.concatenate([mean, mean2])
mean, mean2 = jnp.split(
lax.pmean(
concatenated_mean,
axis_name=self.axis_name,
axis_index_groups=self.axis_index_groups), 2)
var = mean2 - lax.square(mean)
if not initializing:
ra_mean.value = self.momentum * ra_mean.value + (1 - self.momentum) * mean
ra_var.value = self.momentum * ra_var.value + (1 - self.momentum) * var
y = x - mean.reshape(feature_shape)
mul = lax.rsqrt(var + self.epsilon)
if self.use_scale:
scale = self.param('scale',
self.scale_init,
reduced_feature_shape).reshape(feature_shape)
mul = mul * scale
y = y * mul
if self.use_bias:
bias = self.param('bias',
self.bias_init,
reduced_feature_shape).reshape(feature_shape)
y = y + bias
return jnp.asarray(y, self.dtype)
class LayerNorm(Module):
"""Layer normalization (https://arxiv.org/abs/1607.06450).
Operates on the last axis of the input data.
It normalizes the activations of the layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within
each example close to 0 and the activation standard deviation close to 1.
Attributes:
epsilon: A small float added to variance to avoid dividing by zero.
dtype: the dtype of the computation (default: float32).
use_bias: If True, bias (beta) is added.
use_scale: If True, multiply by scale (gamma). When the next layer is linear
(also e.g. nn.relu), this can be disabled since the scaling will be done
by the next layer.
bias_init: Initializer for bias, by default, zero.
scale_init: Initializer for scale, by default, one.
"""
epsilon: float = 1e-6
dtype: Any = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
@compact
def __call__(self, x):
"""Applies layer normalization on the input.
Args:
x: the inputs
Returns:
Normalized inputs (the same shape as inputs).
"""
x = jnp.asarray(x, jnp.float32)
features = x.shape[-1]
mean = jnp.mean(x, axis=-1, keepdims=True)
mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True)
var = mean2 - lax.square(mean)
mul = lax.rsqrt(var + self.epsilon)
if self.use_scale:
mul = mul * jnp.asarray(
self.param('scale', self.scale_init, (features,)),
self.dtype)
y = (x - mean) * mul
if self.use_bias:
y = y + jnp.asarray(
self.param('bias', self.bias_init, (features,)),
self.dtype)
return jnp.asarray(y, self.dtype)
class GroupNorm(Module):
"""Group normalization (arxiv.org/abs/1803.08494).
This op is similar to batch normalization, but statistics are shared across
equally-sized groups of channels and not shared across batch dimension.
Thus, group normalization does not depend on the batch composition and does
not require maintaining internal state for storing statistics.
The user should either specify the total number of channel groups or the
number of channels per group.
Attributes:
num_groups: the total number of channel groups. The default value of 32 is
proposed by the original group normalization paper.
group_size: the number of channels in a group.
epsilon: A small float added to variance to avoid dividing by zero.
dtype: the dtype of the computation (default: float32).
use_bias: If True, bias (beta) is added.
use_scale: If True, multiply by scale (gamma). When the next layer is linear
(also e.g. nn.relu), this can be disabled since the scaling will be done
by the next layer.
bias_init: Initializer for bias, by default, zero.
scale_init: Initializer for scale, by default, one.
"""
num_groups: int = 32
group_size: Optional[int] = None
epsilon: float = 1e-6
dtype: Any = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
@compact
def __call__(self, x):
"""Applies group normalization to the input (arxiv.org/abs/1803.08494).
Args:
x: the input of shape N...C, where N is a batch dimension and C is a
channels dimensions. `...` represents an arbitrary number of extra
dimensions that are used to accumulate statistics over.
Returns:
Normalized inputs (the same shape as inputs).
"""
x = jnp.asarray(x, jnp.float32)
if ((self.num_groups is None and self.group_size is None) or
(self.num_groups is not None and self.group_size is not None)):
raise ValueError('Either `num_groups` or `group_size` should be '
'specified, but not both of them.')
num_groups = self.num_groups
if self.group_size is not None:
channels = x.shape[-1]
if channels % self.group_size != 0:
raise ValueError('Number of channels ({}) is not multiple of the '
'group size ({}).'.format(channels, self.group_size))
num_groups = channels // self.group_size
input_shape = x.shape
group_shape = x.shape[:-1] + (num_groups, x.shape[-1] // num_groups)
x = x.reshape(group_shape)
reduction_axis = [d for d in range(1, x.ndim - 2)] + [x.ndim - 1]
mean = jnp.mean(x, axis=reduction_axis, keepdims=True)
mean_of_squares = jnp.mean(jnp.square(x), axis=reduction_axis,
keepdims=True)
var = mean_of_squares - jnp.square(mean)
x = (x - mean) * lax.rsqrt(var + self.epsilon)
x = x.reshape(input_shape)
feature_shape = tuple([1 for d in input_shape[:-1]] + [input_shape[-1]])
if self.use_scale:
x = x * self.param('scale', self.scale_init, feature_shape)
if self.use_bias:
x = x + self.param('bias', self.bias_init, feature_shape)
return x.astype(self.dtype)
``` |
{
"source": "joel-g/azure-reddit-tweeter",
"score": 3
} |
#### File: joel-g/azure-reddit-tweeter/azr.py
```python
import tweepy, praw, time
with open('config.ini','r') as config:
tokens = config.readlines()
TW_CONSUMER_KEY = tokens[0].rstrip()
TW_CONSUMER_SECRET = tokens[1].rstrip()
TW_ACCESS_KEY = tokens[2].rstrip()
TW_ACCESS_SECRET = tokens[3].rstrip()
REDDIT_APP = tokens[4].rstrip()
REDDIT_USER = tokens[5].rstrip()
def authenticate_twitter():
print('Authenticating twitter...')
auth = tweepy.OAuthHandler(TW_CONSUMER_KEY, TW_CONSUMER_SECRET)
auth.set_access_token(TW_ACCESS_KEY, TW_ACCESS_SECRET)
twitter = tweepy.API(auth)
print('Twitter authenticated.\n')
return twitter
def authenticate_reddit():
print('Authenticating reddit...\n')
reddit = praw.Reddit(REDDIT_APP, user_agent=REDDIT_USER)
print('Reddit authenticated.\n')
return reddit
def get_reddit_posts(reddit):
print("Fetching new posts...")
time.sleep(1)
posts = []
for post in reddit.subreddit('azure').hot(limit=20):
posts.append(post)
print("Returning " + str(len(posts)) + " reddit posts")
return posts
def record_already_tweeted(submission_id):
print("Logging tweet...")
writeable = open("tweeted.txt", 'a+')
writeable.write(submission_id + '\n')
writeable.close()
time.sleep(2)
def is_tweeted(submission_id):
print("Checking to see if this has already been tweeted...")
time.sleep(1)
readable = open("tweeted.txt", "r")
if submission_id in readable.read().splitlines():
print("It has been tweeted.\n")
time.sleep(1)
return True
else:
print("It has not been tweeted.\n")
time.sleep(1)
return False
def tweet(twitter, submission):
print("Tweeting...")
try:
twitter.update_status(submission.title + " http://reddit.com" + submission.permalink)
record_already_tweeted(submission.id)
print("Tweeted!\n")
except:
print("I was not able to TWEET!")
record_already_tweeted(submission.id + "FAILURE")
time.sleep(2)
def get_azure_tweets(twitter, x):
new_tweets = twitter.search(q="azure", count=x, lang="en")
print("Returning " + str(x) + " Azure tweets")
return new_tweets
def get_user_ids(list_of_tweets):
user_ids = []
for tweet in list_of_tweets:
user_ids.append(tweet.user.id)
print("Returning user IDs")
return user_ids
def follow_users(list_of_ids, twitter):
count = 0
print("Following new accounts")
for user_id in list_of_ids:
time.sleep(1)
try:
twitter.create_friendship(user_id)
print("Followed ", user_id)
count = count + 1
except:
print("Couldn't follow this user.")
break
print("Followed " + str(count) + " new accounts")
return count
def unfollow_old(twitter, x):
print("Unfollowing " + str(x) + " oldest follows")
follows_ids = twitter.friends_ids(twitter.me().id)
follows_ids.reverse()
for i in range(0,x-1):
twitter.destroy_friendship(follows_ids[i])
print(i)
time.sleep(180)
def like_tweets(twitter, tweets):
for tweet in tweets:
try:
twitter.create_favorite(tweet.id)
print("Liked ", tweet.id)
except:
print("Couldn't like that tweet")
break
def main():
reddit = authenticate_reddit()
twitter = authenticate_twitter()
while True:
like_tweets(twitter, get_azure_tweets(twitter, 50))
for post in get_reddit_posts(reddit):
if not is_tweeted(post.id):
tweet(twitter, post)
count = follow_users(get_user_ids(get_azure_tweets(twitter, 25)), twitter)
unfollow_old(twitter, count-1)
print("Sleeping 7 hours...\n\n")
time.sleep(21600)
break
if __name__ == '__main__':
main()
``` |
{
"source": "joel-g/cxp_community_stackoverflow_leaderboard",
"score": 3
} |
#### File: joel-g/cxp_community_stackoverflow_leaderboard/index.py
```python
from flask import Flask, render_template
import requests
app = Flask(__name__)
STACK_IDS=('8826629','9541448', '6591675', '7452904', '9393813', '8840926', '9454112', '9598801', '8797362', '8748848', '9672092', '9603809', '9536851', '8366814', '9541342', '8840650', '9873161', '9401038', '5420074')
def form_api_call(stack_ids):
url = "http://api.stackexchange.com/2.2/users/"
for user in stack_ids:
url = url + user + ";"
url = url[:-1]
url = url + "?order=desc&sort=reputation&site=stackoverflow&key=dS6SG)8rCYRWj0DcXzmJ4w(("
return url
def get_users():
res = requests.get(form_api_call(STACK_IDS))
return res.json()['items']
def rank_users(order_by):
jsons = get_users()
jsons = sorted(jsons, key=lambda k: k[order_by], reverse=True)
return jsons
@app.route('/')
def index(users=None):
jsons = rank_users("reputation")
return render_template('index.html', users=jsons, title="Total Reputation")
@app.route('/week')
def total(users=None):
jsons = rank_users("reputation_change_week")
return render_template('index.html', users=jsons, title="Rep Change this Week")
@app.route('/month')
def month(users=None):
jsons = rank_users("reputation_change_month")
return render_template('index.html', users=jsons, title="Rep Change this Month")
if __name__ == '__main__':
app.run()
``` |
{
"source": "joelgerard/malleus",
"score": 2
} |
#### File: api/domain/user.py
```python
class User:
id = None
first_name = None
last_name = None
address1 = None
address2 = None
city = None
postal_code = None
state = None
country = None
description = None
def __init__(self):
pass
```
#### File: api/service/bench_service.py
```python
from malleus.api.domain.user_generator import UserGenerator
from malleus.api.repository.datastore import Datastore
from malleus.api.repository.mongodb import MongoDB
from malleus.api.domain.timer import Timer
from malleus.api.service.protos.bench_service_pb2 import BenchRequest
import malleus.api.service.protos.bench_service_pb2_grpc as bench_service_pb2_grpc
class BenchService(bench_service_pb2_grpc.BenchServiceServicer):
datasources = {BenchRequest.GDATASTORE: Datastore,
BenchRequest.MONGODB: MongoDB}
def read(self, request: BenchRequest, context = None):
num = request.num
datastore = Datastore()
timer = Timer()
for i in range(1, num):
timing = timer.start('DS read 1')
user = datastore.get(i)
timer.end(timing)
return timer.timings
def write(self, request: BenchRequest, context = None):
size = request.num
datastore = self.datasources[request.datasource]()
timer = Timer()
user_gen = UserGenerator()
users = user_gen.get_random_users(int(size))
timing = timer.start('DS write batch')
datastore.update_list(users)
timer.end(timing)
return timer.timings
```
#### File: client/service/call_service.py
```python
import config
from malleus.api.service.protos.bench_service_pb2 import BenchRequest
#from malleus.api.service.protos.bench_service_pb2 import BenchRequest.Datasource
from malleus.api.domain.timer import Timer
import grpc
from malleus.api.service.protos.bench_service_pb2 import BenchRequest
import malleus.api.service.protos.bench_service_pb2_grpc as bench_service_pb2_grpc
class CallService:
def __init__(self, region):
channel = grpc.insecure_channel(config.host[region])
self.stub = bench_service_pb2_grpc.BenchServiceStub(channel)
def write(self, num, datasource = None):
bench_request = BenchRequest()
bench_request.num = num
return self.stub.write(bench_request)
def read(self, num, datasource = None):
datasources = [BenchRequest.GDATASTORE, BenchRequest.MONGODB]
for datasource in datasources:
bench_request = BenchRequest()
bench_request.datasource = datasource
bench_request.num = num
timings = self.stub.read(bench_request)
timer = Timer(timings)
self.print_stats(datasource, timer)
#return timings
def print_stats(self, datasource, timer):
print(datasource)
print("Duration: " + str(timer.get_duration()))
print("Average: " + str(timer.get_avg()))
print("95pct:" + str(timer.get_95p()))
print("99pct:" + str(timer.get_99p()))
``` |
{
"source": "joelghill/catcam",
"score": 3
} |
#### File: joelghill/catcam/motion_sensor.py
```python
import RPi.GPIO as GPIO
import time
class MotionDetector() :
_input_pin = 11
_is_running = False
_on_motion_detected = None
def __init__(self, callback, input_pin=11) :
"""
Initializes a new instance of the SonicDistance class
tigger is the GPIO pin connected to the trigger sensor pin
echo is the GPIO pin connected to the echo sensor pin
"""
self._input_pin = input_pin
self._on_motion_detected = callback
def start(self, wait=0.5) :
"""
Begins monitoring for distance changes
offset - The amount of change in distance before callback is activated
wait - wait time in seconds before checking distance changes
"""
self._prepare()
self._is_running = True
print('Detecting motion...')
while self._is_running == True :
GPIO.wait_for_edge(self._input_pin, GPIO.RISING)
self._on_motion_detected()
def stop(self):
"""
Stops monitoring for distance. Cleans up GPIO
"""
self._is_running = false
GPIO.cleanup()
def _prepare(self):
"""
Prepares this instance for using the GPIO board
to interact with HC-SR04 distance sensor
"""
try:
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self._input_pin, GPIO.IN, pull_up_down = GPIO.PUD_UP)
print "Waiting for sensor to settle"
time.sleep(5)
print("ready to detect motion")
except Exception as e:
print("prepare call failed: " + str(e))
self.print_config()
raise
def print_config(self):
print("Input Pin: " + str(self._input_pin))
#monitor = SonicDistanceMonitor(print_distance)
#monitor.start(0.2)
```
#### File: joelghill/catcam/tweet_composer.py
```python
import json
import random
class TweetComposer:
"""
Composes tweets
"""
_tweets_path = ""
_flavor_text_odds = 0
_tweets = None
def __init__(self, tweets_path='./tweets.json', flavour_text_odds=50):
"""
Constructs a new instance of the TweetComposer class
:type flavour_text_odds: object
:type tweets_path: basestring
"""
self._tweets_path = tweets_path
self._flavor_text_odds = flavour_text_odds
self._load_tweets()
def compose_tweet(self, predictions):
"""
Composes a tweet based on the predicted content of an image
:param predictions: The predicted content of an image
:return: A tweet ready to send out into the world
"""
random_value = random.randint(0, 100)
if random_value >= self._flavor_text_odds:
return self.get_flavor_text_tweet(predictions)
else:
return TweetComposer.get_default_tweet(predictions)
def get_flavor_text_tweet(self, predictions):
"""
Gets a tweet containing flavored text
:type predictions: object
"""
tweet_object = random.choice(self._tweets)
keys = predictions.keys()
text = tweet_object["message"].format(keys[0], keys[1], keys[2])
if tweet_object["upper_case"]:
text = text.upper()
return text
@staticmethod
def get_default_tweet(predictions):
"""
Gets the default tweet text
:type predictions: object
"""
first_guess_name = predictions.keys()[0]
first_guess_value = predictions.values()[0]
second_guess = predictions.keys()[1]
text = "I'm {0:.2%} confident this is a {1}. Might be a {2}"\
.format(first_guess_value, first_guess_name, second_guess)
return text
def _load_tweets(self):
"""
Loads the list of tweet objects from disk
"""
with open(self._tweets_path) as f:
self._tweets = json.load(f)
``` |
{
"source": "joelghill/PyRC",
"score": 4
} |
#### File: joelghill/PyRC/draw.py
```python
from Tkinter import *
WIDTH = 640
HEIGHT = 480
COLOR = "Green"
SIZE = 4
def paint( event ):
x1, y1 = ( event.x - SIZE/2 ), ( event.y - SIZE/2 )
x2, y2 = ( event.x + SIZE/2 ), ( event.y + SIZE/2 )
w.create_oval( x1, y1, x2, y2, fill = COLOR, border=0 )
master = Tk()
master.title( "Painting using Ovals" )
w = Canvas(master, width=WIDTH,height=HEIGHT)
w.pack(expand = YES, fill = BOTH)
w.bind( "<B1-Motion>", paint )
message = Label( master, text = "Press and Drag the mouse to draw" )
message.pack( side = BOTTOM )
mainloop()
``` |
{
"source": "JoelGotsch/Kantnprojekt_Backend",
"score": 2
} |
#### File: services/web/debugger.py
```python
from os import getenv, environ
def initialize_flask_server_debugger_if_needed():
if (getenv("DEBUGGER") == "True") and (getenv("ALREADY_DEBUGGING") == "False"):
import multiprocessing
try:
if multiprocessing.current_process().pid > 1:
print("import debugpy")
import debugpy
debugpy.listen(("0.0.0.0", 10001))
print("VS Code debugger can now be attached, press F5 in VS Code..", flush=True)
environ["ALREADY_DEBUGGING"] = "True"
debugpy.wait_for_client()
print("VS Code debugger attached, enjoy debugging", flush=True)
except Exception as e:
print("Couldn't start debugger because of:")
print(e)
print("ALREADY_DEBUGGING: " + str(getenv("ALREADY_DEBUGGING")))
else:
print("No debugging. Alrighty..")
```
#### File: services/web/manage.py
```python
from project.api_struc.models import User, Exercise, Workout, Action, Challenge, UserChallenge
from project import app, db
import project.misc.funcs as funcs
from flask.cli import FlaskGroup
import datetime
from pytz import timezone
import os
from debugger import initialize_flask_server_debugger_if_needed
initialize_flask_server_debugger_if_needed()
cli = FlaskGroup(app)
# app, db = create_app("config")
# enables to run something like "python manage.py create_db", which is used in the Makefile.
@cli.command("create_db")
def create_db():
db.drop_all()
db.create_all()
db.session.commit()
@cli.command("seed_db")
def seed_db():
try:
user2 = User(
id=funcs.rand_user_id(),
email="<EMAIL>",
user_name="kantnprojekt",
password="<PASSWORD>", # <PASSWORD>6
token="5tABELThgUIc5oH5j9jbIU2AKedRYa",
)
user1 = User(
id=funcs.rand_user_id(),
email="<EMAIL>",
user_name="Viech",
password="<PASSWORD>", # <PASSWORD>
token="<KEY>",)
db.session.add(user1)
db.session.add(user2)
db.session.commit()
ex1 = Exercise(
id=funcs.rand_string(30),
title="Schritte",
note="Anzahl der Schritte pro Tag. Jeder Schritt zählt 0.01 Punkte.",
user_id=user1.id,
points=0.01)
ex2 = Exercise(
id=funcs.rand_string(30),
title="Liegestütze",
note="Jeder Liegestütz zählt 1 Punkt.",
user_id=user1.id,
points=1)
ex3 = Exercise(
id=funcs.rand_string(30),
title="Situps",
note="Jeder Situp zählt 0.25 Punkte.",
user_id=user1.id,
points=0.25)
ex4 = Exercise(
id=funcs.rand_string(30),
title="Klimmzüge",
note="Jeder Klimmzug zählt 4 Punkte.",
user_id=user1.id,
points=4)
ex5 = Exercise(
id=funcs.rand_string(30),
title="Hollow Hold",
note="Jede Minute im Hollow Hold zählt 20 Punkte.",
user_id=user1.id,
unit="min",
points=20)
ex6 = Exercise(
id=funcs.rand_string(30),
title="Plank",
note="Alle Versionen sind OK: Front-Plank, Side-Plank,... 15 Punkte pro Minute",
user_id=user1.id,
unit="min",
points=15)
ex7 = Exercise(
id=funcs.rand_string(30),
title="(Wand-) Handstand",
note="Handstand an der Wand oder freistehend. 25 Punkte pro Minute",
user_id=user1.id,
unit="min",
points=25)
ex8 = Exercise(
id=funcs.rand_string(30),
title="Superman",
note="Bauch am Boden, Arme nach vorne und Beine hinten in die Luft gestreckt, Hintern anspannen.",
user_id=user1.id,
unit="min",
points=10)
ex9 = Exercise(
id=funcs.rand_string(30),
title="Schlaf",
note="Jede Stunde Schlaf bringt 10 Punkte. Aber maximal 80 Punkte pro Tag/Nacht.",
user_id=user1.id,
unit="h",
points=10,
max_points_day=80)
ex10 = Exercise(
id=funcs.rand_string(30),
title="Alkohol",
note="Für jedes Bier (oder etwa äquivalente Menge Alkohol) gibt es 50 Punkte Abzug. Ein Bier pro Woche ist aber frei.",
user_id=user1.id,
unit="Bier",
points=-50,
weekly_allowance=1)
ex11 = Exercise(
id=funcs.rand_string(30),
title="Yoga",
note="Alle Yoga Formen sind OK.",
user_id=user1.id,
unit="min",
points=3)
# missing: Plank, Wall-Handstand, Superman, Yoga
challenge1 = Challenge(
id=funcs.rand_string(30),
name="Kantnprojekt",
description="2000 Punkte pro Kalenderwoche müssen erreicht werden. Diese setzen sich aus allen Übungen inklusive Schlaf und abzüglich Alkohol zusammen.",
min_points=2000,
eval_period="week",
start_date=datetime.datetime.utcnow(),
end_date=datetime.datetime.utcnow()+datetime.timedelta(days=90))
print("appending all exercises...")
challenge1.exercises.append(ex1)
challenge1.exercises.append(ex2)
challenge1.exercises.append(ex3)
challenge1.exercises.append(ex4)
challenge1.exercises.append(ex5)
challenge1.exercises.append(ex6)
challenge1.exercises.append(ex7)
challenge1.exercises.append(ex8)
challenge1.exercises.append(ex9)
challenge1.exercises.append(ex10)
challenge1.exercises.append(ex11)
accepted_chall_1 = UserChallenge()
accepted_chall_1.user = user2
accepted_chall_1.challenge = challenge1
for ch in user2.challenges:
print(ch.challenge.name + ":")
print(" Übungen:")
for ex in ch.challenge.exercises:
print(" "+ex.title)
print(" Teilnehmer:")
for us in ch.challenge.users:
print(" "+us.user.user_name)
print("adding all exercises...")
db.session.add(ex1)
db.session.add(ex2)
db.session.add(ex3)
db.session.add(ex4)
db.session.add(ex5)
db.session.add(ex6)
db.session.add(ex7)
db.session.add(ex8)
db.session.add(ex9)
db.session.add(ex10)
db.session.add(ex11)
print("adding challenge...")
db.session.add(challenge1)
print("adding accepted challenge...")
db.session.add(accepted_chall_1)
db.session.commit()
print("adding workouts...")
wo1 = Workout(
id=funcs.rand_string(30),
user_id=user1.id,
date=datetime.datetime(2020, 9, 21, 15, 0, 0,
tzinfo=timezone('CET')),
note="Test")
wo2 = Workout(
id=funcs.rand_string(30),
user_id=user1.id,
date=datetime.datetime(2020, 9, 20, 15, 0, 0,
tzinfo=timezone('CET')),
note="Test2")
db.session.add(wo1)
db.session.add(wo2)
db.session.commit()
print("adding actions...")
ac1 = Action(
id=funcs.rand_string(30),
exercise_id=ex6.id, workout_id=wo1.id,
number=7.5) # sleep
ac2 = Action(
id=funcs.rand_string(30),
exercise_id=ex1.id, workout_id=wo1.id,
number=26000) # steps
wo1.actions.append(ac1)
wo1.actions.append(ac2)
# db.session.add(wo1)
db.session.commit()
print(Action.query.get(ac1.id))
# TODO: test deleting entries
except Exception as e:
print(e)
if __name__ == "__main__":
cli()
```
#### File: project/api_struc/Workout.py
```python
from typing import NoReturn
from flask_restful import Resource
from flask import request, jsonify
from sqlalchemy.sql.elements import Null
from .models import Exercise, Workout, User, Action, db
import datetime
import dateutil.parser
from ..misc import funcs as funcs
# We need the db object! ahhhhhhhhhh => move it to models.py?! then app needs to import it. is it still the same object if manage.py is then initializing it again when loading models.py? But probably its doing that already anyways..
class API_Workout(Resource):
def get(self):
try:
token = request.headers.get("token")
user = User.query.filter(User.token == token).first()
wos = []
if user is None:
return {"message": "Token is invalid!"}, 400
try:
workout_id = request.values.get("workout_id")
if workout_id is not None and len(workout_id) > 0:
wos = [Workout.query.get(workout_id)]
except:
wos = []
try:
last_edit_date = dateutil.parser.parse(
request.values.get("last_edit_date"))
except:
last_edit_date = datetime.datetime.min
try:
start_date = dateutil.parser.parse(
request.values.get("start_date"))
except:
start_date = datetime.datetime.min
try:
end_date = dateutil.parser.parse(
request.values.get("end_date"))
except:
end_date = datetime.datetime.now()
try:
number = int(request.values.get("number"))
except:
number = 0
try:
only_header = bool(request.values.get("only_header"))
except:
only_header = False
if len(wos) == 0:
if number == 0:
wos = Workout.query.filter(
Workout.date >= start_date, Workout.date <= end_date, Workout.user_id == user.id, Workout.latest_edit >= last_edit_date).all()
else:
wos = Workout.query.filter(Workout.date >= start_date, Workout.date <= end_date,
Workout.user_id == user.id, Workout.latest_edit >= last_edit_date).limit(number).all()
# wos = Workout.query.filter(
# Workout.date >= start_date and Workout.date <= end_date and Workout.user_id==user.id).limit(number).all()
# wos = Workout.query.filter_by(
# date >= start_date, date <= end_date, user_id=user.id).limit(number).all()
# wos2 = Workout.query.filter_by(latest_edit >= start_date, latest_edit <= end_date, user_id = user_id).all()
if only_header:
result = {wo.id: wo.latest_edit.isoformat() for wo in wos}
else:
result = {wo.id: wo.serialize() for wo in wos}
return(result)
except Exception as e:
return ({"message": str(e)}, 400)
def post(self):
# has to have
response = {}
try:
token = request.headers.get("token")
# firebase_id_token=request.values.get("firebase_id_token")
# verify the id token with firebase admin package! only then return api key.
user = User.query.filter_by(token=token).first()
if user is None:
return(({"status": "failure", "message": "Authentication didn't work"}), 400)
except Exception as e:
return ({"message": str(e)}, 400)
data = request.get_json(force=True) # should be a dict
print("incoming workout data: "+str(data))
try:
for wo_id in data:
# check if wo_id is already created, so we only need to update it:
wo = Workout.query.filter_by(id=wo_id, user_id=user.id).first()
json_wo = data[wo_id]
if wo is not None: # updating existing workout
if not json_wo["not_deleted"]:
db.session.delete(wo)
db.session.commit()
response[json_wo["local_id"]] = ""
continue
wo.date = dateutil.parser.parse(json_wo["date"])
wo.note = json_wo["note"]
wo.latest_edit = dateutil.parser.parse(
json_wo["latest_edit"])
wo.not_deleted = json_wo["not_deleted"]
# removing and adding each of the actions!
# for ac in wo.actions:
# db.session.delete(ac)
# db.session.commit()
for ac in wo.actions:
db.session.delete(ac)
if len(wo.actions)>0:
db.session.commit()
# wo.actions.delete() # TODO: only delete actions which are not resent and don't re-create them below.
for ac_key in json_wo["actions"]:
# check if action is already in database:
if Action.query.get(ac_key) is not None:
continue
json_ac = json_wo["actions"][ac_key]
# TODO: This block is not necessary if the database is permanent and new exercises are uploaded directly. Should instead throw an error that the exercise is not known?
# check if exercise exists already:
if Exercise.query.get(json_ac["exercise_id"]) is None:
json_ex = json_ac["exercise"]
ex = Exercise(
id=json_ac["exercise_id"],
title=json_ex["title"],
note=json_ex["note"],
user_id=json_ex["user_id"],
unit=json_ex["unit"],
points=json_ex["points"],
max_points_day=json_ex["max_points_day"],
weekly_allowance=json_ex["weekly_allowance"],
not_deleted=json_ex["not_deleted"],
)
db.session.add(ex)
db.session.commit()
ac = Action(id=ac_key, exercise_id=json_ac["exercise_id"], workout_id=wo.id,
number=json_ac["number"], note=json_ac["note"])
wo.actions.append(ac)
db.session.add(ac)
db.session.commit()
response[json_wo["local_id"]] = wo.id
db.session.commit()
else: # creating a new workout
new_wo_id = funcs.rand_string(30)
wo = Workout(id=new_wo_id,
user_id=user.id,
date=dateutil.parser.parse(json_wo["date"]),
note=json_wo["note"],
latest_edit=dateutil.parser.parse(
json_wo["latest_edit"]),
not_deleted=json_wo["not_deleted"],
)
response[json_wo["local_id"]] = new_wo_id
db.session.add(wo)
db.session.commit()
for ac_key in json_wo["actions"]:
json_ac = json_wo["actions"][ac_key]
if Exercise.query.get(json_ac["exercise_id"]) is None:
json_ex = json_ac["exercise"]
ex = Exercise(
id=json_ac["exercise_id"],
title=json_ex["title"],
note=json_ex["title"],
user_id=json_ex["user_id"],
unit=json_ex["unit"],
points=json_ex["points"],
max_points_day=json_ex["max_points_day"],
weekly_allowance=json_ex["weekly_allowance"],
not_deleted=json_ex["not_deleted"],
)
db.session.add(ex)
db.session.commit()
ac = Action(id=funcs.rand_string(30), exercise_id=json_ac["exercise_id"], workout_id=wo.id,
number=json_ac["number"], note=json_ac["note"])
wo.actions.append(ac)
db.session.add(ac)
db.session.commit()
return({"status": 'success', 'data': response}, 201)
except Exception as e:
print(e)
return(({"status": "failure", "message": "Could not read json or header."}), 400)
def delete(self): # done via "not_deleted" = False
pass
``` |
{
"source": "Joelgranados/cijoe",
"score": 2
} |
#### File: modules/cij/analyser.py
```python
import re
import os
from typing import List, Tuple
import dataclasses
import copy
import yaml
from cij.runner import Status, TestRun, TestCase
import cij.runner
import cij
from cij.util import rehome
from cij.errors import CIJError, InvalidRangeError, UnknownUnitError
UNITS = {
# general
'': 1, # no unit
'B': 1, # bytes
'k': 1000, # kilo
'M': 1000**2, # mega
'G': 1000**3, # giga
# kibi
'KiB': 1024**1, # kibibytes
'MiB': 1024**2, # mibibytes
'GiB': 1024**3, # gibibytes
'TiB': 1024**4, # tibibytes
# kilo
'kB': 1000**1, # kilobytes
'MB': 1000**2, # megabytes
'GB': 1000**3, # gigabytes
'TB': 1000**4, # gigabytes
# time
'nsec': 1/1000**3, # nanoseconds
'usec': 1/1000**2, # microseconds
'msec': 1/1000**1, # milliseconds
'sec': 1, # seconds
'min': 60 # minutes
}
class Range:
"""
Range implements parsing and validation of mathematical range notation,
e.g. `[-5;100[` which translates to "must be >= -5 and < 100".
"""
# pylint: disable=no-self-use
# pylint: disable=too-few-public-methods
_rng_re = re.compile(
r"^(?P<elower>\[|\])\s*(?P<rstart>-inf|-?\d+(\.\d*)?)\s*;" # [1.0;
r"\s*(?P<rend>inf|-?\d+(\.\d*)?)\s*(?P<eupper>\[|\])" # 1.0]
fr"\s*(?P<unit>({'|'.join(UNITS)}))$" # ms
)
def __init__(self, rng: str):
match = self._rng_re.match(rng)
if not match:
raise InvalidRangeError(f"invalid syntax or unit for \"{rng}\"")
rng_start = float(match["rstart"])
rng_end = float(match["rend"])
if rng_start > rng_end:
raise InvalidRangeError(
"expected lower bound <= upper bound, "
f"{rng_start} <= {rng_end}"
)
# NOTE: _rng_re enforces that match["unit"] exists in UNITS.
unit_val = UNITS[match["unit"]]
self._rng_start = rng_start
self._rng_end = rng_end
self._elower = match["elower"]
self._eupper = match["eupper"]
self._unit = match["unit"]
self._check_lower = self._make_check_lower(match["elower"],
rng_start * unit_val)
self._check_upper = self._make_check_upper(match["eupper"],
rng_end * unit_val)
def contains(self, val: float) -> bool:
""" Check whether n is contained in range.
val must be given in the base unit of the measurement, e.g. seconds for
time and bytes for storage.
"""
return self._check_lower(val) and self._check_upper(val)
def _make_check_lower(self, edge_lower: str, rng_start: float):
if edge_lower == '[':
return lambda n: n >= rng_start
if edge_lower == ']':
return lambda n: n > rng_start
raise InvalidRangeError("invalid input _make_check_lower")
def _make_check_upper(self, edge_upper: str, rng_end: float):
if edge_upper == '[':
return lambda n: n < rng_end
if edge_upper == ']':
return lambda n: n <= rng_end
raise InvalidRangeError("invalid input _make_check_upper")
def format_val(self, val: float) -> str:
""" Formats and returns val using the unit of the range.
Example:
range: "[250; 750]usec"
val: 0.0005
output: "500 usec"
"""
val_conv = val / UNITS[self._unit]
return f"{val_conv:.3f} {self._unit}"
def __str__(self):
return (f"{self._elower}{self._rng_start};"
f"{self._rng_end}{self._eupper} {self._unit}")
def to_base_unit(val: float, unit: str = "") -> float:
""" Converts val in the given unit to its base unit.
Example:
val: 100, unit: 'KiB'
output: 102400 (bytes)
val: 500, unit: 'msec'
output: 0.5 (seconds)
"""
unit_scalar = UNITS.get(unit, None)
if not unit_scalar:
raise UnknownUnitError(f"Unit '{unit}' is not supported")
return val * unit_scalar
def preqs_from_file(fpath):
""" Read yaml-formatted performance requirements from fpath """
with open(fpath, 'r') as preqf:
return yaml.safe_load(preqf)
@dataclasses.dataclass
class CheckedPreq:
""" Contains information about checked performance requirements """
key: str
error: bool
msg: str
ctx: dict
def check_preqs(preqs, metrics) -> List[CheckedPreq]:
"""
Check performance requirements against measured metrics.
"""
checked_preqs = []
def add_preq(key: str, msg: str, error: bool, ctx: dict):
checked_preqs.append(
CheckedPreq(key=key, msg=msg, error=error, ctx=ctx)
)
for pkey, rng_str in preqs.items():
ctx = metrics.get('ctx', {})
mval = metrics.get(pkey, None)
if mval is None:
add_preq(key=pkey, error=True, ctx=ctx,
msg="expected to be measured, but wasn't")
continue
try:
rng = Range(rng_str)
except InvalidRangeError as ex:
add_preq(key=pkey, error=True, ctx=ctx,
msg=f"invalid range \"{rng_str}\": {ex}")
continue
if not rng.contains(mval):
add_preq(key=pkey, error=True, ctx=ctx,
msg=f"{rng.format_val(mval)} in {rng} failed")
continue
add_preq(key=pkey, error=False, ctx=ctx,
msg=f"{rng.format_val(mval)} in {rng} satisfied")
return checked_preqs
def analyse_prequirements(trun: TestRun, preqs_declr) -> int:
"""
Analyse trun and enforce test pass/fail based on the given performance
requirements declaration.
NOTE: updates relevant trun fields.
"""
global_conf = preqs_declr.get("global", {})
global_preqs = global_conf.get("metrics", {})
global_tcase_preqs = global_conf.get("testcases", {})
tr_err = 0
for tplan in trun.testplans:
tp_err = 0
for tsuite in tplan.testsuites:
ts_err = 0
tsuite_preqs = preqs_declr.get(tsuite.name, {})
for tcase in tsuite.testcases:
tc_err = 0
tcase_preqs = copy.deepcopy(global_preqs)
tcase_preqs.update(global_tcase_preqs.get(tcase.name, {}))
tcase_preqs.update(tsuite_preqs.get(tcase.name, {}))
if not tcase_preqs:
continue
cij.info(
f"{tsuite.name}/{tcase.name} checking {set(tcase_preqs)}"
)
tc_err, skip = tcase_check_preqs(tcase, tcase_preqs,
tsuite.name)
if skip:
continue
tcase.status_preq = Status.Fail if tc_err else Status.Pass
ts_err += tc_err
tsuite.status_preq = Status.Fail if ts_err else Status.Pass
tp_err += ts_err
tplan.status_preq = Status.Fail if tp_err else Status.Pass
tr_err += tp_err
trun.status_preq = Status.Fail if tr_err else Status.Pass
return 0
def tcase_check_preqs(tcase: TestCase, preqs: dict, tsuite_name: str
) -> Tuple[int, bool]:
"""
Retrieve metrics and check them against the given preqs.
Logs results to tcase.analysis_log_fpath.
"""
tc_err = 0
test_metrics = _get_metrics(tcase.aux_root)
if not test_metrics:
cij.info(
f"{tsuite_name}/{tcase.name} no measurements found"
)
return tc_err, True
# Check performance requirements against measured metrics
with open(tcase.analysis_log_fpath, 'w') as alog:
for metrics in test_metrics:
checked_preqs = check_preqs(preqs, metrics)
for cpreq in checked_preqs:
cij.emph(f"{cpreq.key}: {cpreq.msg}", rval=int(cpreq.error))
cij.emph(f"{cpreq.key}: {cpreq.msg} {cpreq.ctx}",
rval=int(cpreq.error), file=alog)
tc_err += sum(cpreq.error for cpreq in checked_preqs)
return tc_err, False
def _get_metrics(aux_root: str) -> List[dict]:
fpath = os.path.join(aux_root, "metrics.yml")
if not os.path.exists(fpath):
return []
with open(fpath, 'r') as yml_file:
return yaml.safe_load(yml_file)
def main(args):
"""
Run cij analyser steps.
If `preqs` is set, log files in the given TRUN path are searched
for metric.yml-files and the TRUN will be updated with pass/fail for
performance requirements.
"""
trun = cij.runner.trun_from_file(args.trun_fpath)
rehome(trun.args.output, args.output, trun)
try:
err = 0
if args.preqs:
preqs_declr = preqs_from_file(args.preqs)
preq_err = analyse_prequirements(trun, preqs_declr)
if preq_err:
cij.err('Failed to analyse prequirements')
else:
cij.info('Successfully analyzed prequirements')
err += preq_err
cij.runner.trun_to_file(trun, fpath=cij.runner.yml_fpath(args.output))
except CIJError as ex:
cij.err(f"main:FAILED to run analysis: {ex}")
return err
```
#### File: cij/extractors/util.py
```python
import os
import argparse
from typing import Sequence, List
import yaml
from cij.runner import TestRun
import cij
def parse_args_load_trun(name: str) -> TestRun:
"""
Parse arguments, load TRUN from test output directory and return it.
This function is a helper for extractors to easily and consistently
implement direct CLI functionality without being invoked through the
cij_extractor command.
"""
prsr = argparse.ArgumentParser(
description=name,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
prsr.add_argument(
"--output",
help="Path to test result output directory",
required=True
)
args = prsr.parse_args()
trun_fpath = cij.runner.yml_fpath(args.output)
return cij.runner.trun_from_file(trun_fpath)
def dump_metrics_to_file(metrics: List[dict], aux_root: str):
"""
Dump a list of measured metrics to metrics.yml at aux_root.
"""
fpath = os.path.join(aux_root, "metrics.yml")
with open(fpath, 'w') as yml_file:
data = yaml.dump(
metrics, explicit_start=True, default_flow_style=False
)
yml_file.write(data)
```
#### File: modules/cij/reporter.py
```python
import datetime
import traceback
import glob
import os
import jinja2
import cij.runner
import cij
from cij.util import rehome
def tcase_comment(tcase):
"""
Extract testcase comment section / testcase description
@returns the testcase-comment from the tcase.fpath as a list of strings
"""
src = open(tcase.fpath).read()
if len(src) < 3:
cij.err("rprtr::tcase_comment: invalid src, tcase: %r" % tcase.name)
return None
ext = os.path.splitext(tcase.fpath)[-1]
if ext not in [".sh", ".py"]:
cij.err("rprtr::tcase_comment: invalid ext: %r, tcase: %r" % (
ext, tcase.name
))
return None
comment = []
for line in src.splitlines()[2:]:
if ext == ".sh" and not line.startswith("#"):
break
if ext == ".py" and '"""' not in line:
break
comment.append(line)
return comment
def tcase_parse_descr(tcase):
"""Parse descriptions from the the given tcase"""
descr_short = "SHORT"
descr_long = "LONG"
try:
comment = tcase_comment(tcase)
except (IOError, OSError, ValueError) as exc:
comment = []
cij.err("tcase_parse_descr: failed: %r, tcase: %r" % (exc, tcase))
# Remove empty lines
comment = [line for line in comment if line.strip()]
for line_number, line in enumerate(comment):
if line.startswith("#"):
comment[line_number] = line[1:]
if comment:
descr_short = comment[0]
if len(comment) > 1:
descr_long = "\n".join(comment[1:])
return descr_short, descr_long
def runlogs_to_html(run_root):
"""
Returns content of the given 'fpath' with HTML annotations, currently
simply a conversion of ANSI color codes to HTML elements
"""
if not os.path.isdir(run_root):
return "CANNOT_LOCATE_LOGFILES"
hook_enter = []
hook_exit = []
tcase = []
for fpath in glob.glob(os.path.join(run_root, "*.log")):
if "exit" in fpath:
hook_exit.append(fpath)
continue
if "hook" in fpath:
hook_enter.append(fpath)
continue
tcase.append(fpath)
content = ""
for fpath in hook_enter + tcase + hook_exit:
with open(fpath, "r") as logf:
content += "# BEGIN: run-log from log_fpath: %s\n" % fpath
content += logf.read()
content += "# END: run-log from log_fpath: %s\n\n" % fpath
return content
def analysislog_to_html(fpath):
"""
Returns contents of the given 'fpath' with HTML annotations, currently
simply a conversion of ANSI color codes to HTML elements
"""
if not os.path.exists(fpath):
return "CANNOT_LOCATE_ANALYSIS_LOGFILES"
content = ""
with open(fpath, "r") as logf:
content += f"# BEGIN: analysis-log from log_fpath: {fpath}\n"
content += logf.read()
content += f"# END: analysis-log from log_fpath: {fpath}\n\n"
return content
def src_to_html(fpath):
"""
Returns content of the given 'fpath' with HTML annotations for syntax
highlighting
"""
if not os.path.exists(fpath):
return "COULD-NOT-FIND-TESTCASE-SRC-AT-FPATH:%r" % fpath
# NOTE: Do SYNTAX highlight?
return open(fpath, "r").read()
def aux_listing(aux_root):
"""Listing"""
listing = []
for root, _, fnames in os.walk(aux_root):
count = len(aux_root.split(os.sep))
prefix = root.split(os.sep)[count:]
for fname in fnames:
listing.append(os.sep.join(prefix + [fname]))
return listing
def process_tsuite(tsuite):
"""Goes through the tsuite and processes "*.log" """
# scoop of output from all run-logs
tsuite.log_content = runlogs_to_html(tsuite.res_root)
tsuite.aux_list = aux_listing(tsuite.aux_root)
return True
def process_tcase(tcase):
"""Goes through the trun and processes "run.log" """
tcase.src_content = src_to_html(tcase.fpath)
tcase.log_content = runlogs_to_html(tcase.res_root)
tcase.analysis_content = analysislog_to_html(tcase.analysis_log_fpath)
tcase.aux_list = aux_listing(tcase.aux_root)
tcase.descr, tcase.descr_long = tcase_parse_descr(tcase)
return True
def process_tplan(tplan):
"""Goes through the tplan and processes "run.log" """
tplan.log_content = runlogs_to_html(tplan.res_root)
tplan.aux_list = aux_listing(tplan.aux_root)
return True
def process_trun(trun):
"""Goes through the trun and processes "run.log" """
trun.log_content = runlogs_to_html(trun.res_root)
trun.aux_list = aux_listing(trun.aux_root)
return True
def postprocess(trun):
"""Perform postprocessing of the given test run"""
plog = []
plog.append(("trun", process_trun(trun)))
for tplan in trun.testplans:
plog.append(("tplan", process_tplan(tplan)))
for tsuite in tplan.testsuites:
plog.append(("tsuite", process_tsuite(tsuite)))
for tcase in tsuite.testcases:
plog.append(("tcase", process_tcase(tcase)))
for task, success in plog:
if not success:
cij.err("rprtr::postprocess: FAILED for %r" % task)
return sum((success for task, success in plog))
def dset_to_html(dset, tmpl_fpath):
"""
@returns A HTML representation of the given 'dset' using the template at
'tmpl_fpath'
"""
def stamp_to_datetime(stamp):
"""Create a date object from timestamp"""
return datetime.datetime.fromtimestamp(int(stamp))
def strftime(dtime, fmt):
"""Create a date object from timestamp"""
return dtime.strftime(fmt)
tmpl_dpath = os.path.dirname(tmpl_fpath)
tmpl_fname = os.path.basename(tmpl_fpath)
env = jinja2.Environment(
autoescape=True,
loader=jinja2.FileSystemLoader(tmpl_dpath)
)
env.filters['stamp_to_datetime'] = stamp_to_datetime
env.filters['strftime'] = strftime
tmpl = env.get_template(tmpl_fname)
return tmpl.render(dset=dset)
def main(args):
"""Main entry point"""
trun = cij.runner.trun_from_file(args.trun_fpath)
rehome(trun.args.output, args.output, trun)
postprocess(trun)
cij.emph("main: reports are uses tmpl_fpath: %r" % args.tmpl_fpath)
cij.emph("main: reports are here args.output: %r" % args.output)
html_fpath = os.path.join(args.output, "%s.html" % args.tmpl_name)
cij.emph("html_fpath: %r" % html_fpath)
try: # Create and store HTML report
with open(html_fpath, 'w') as html_file:
html_file.write(dset_to_html(trun, args.tmpl_fpath))
except (IOError, OSError, ValueError) as exc:
traceback.print_exc()
cij.err("rprtr:main: exc: %s" % exc)
return 1
return 0
``` |
{
"source": "JoelGritter/LSTM-predict-note",
"score": 3
} |
#### File: JoelGritter/LSTM-predict-note/4P80_seminar.py
```python
!sudo apt-get install fluidsynth
!pip install midi2audio
!pip install mingus
from mingus.containers import Note, NoteContainer, Track
from mingus.midi.midi_file_out import write_NoteContainer, write_Track
from midi2audio import FluidSynth
fsy = FluidSynth()
# imports for data manipulation
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
# imports for machine learning
import keras
from keras.models import Sequential
from keras.layers import Dense, LSTM
# read in the notes, make an array with 0's, except for the current note
def read_and_format(input_filepath):
input_data = []
with open(input_filepath) as input_file:
for line in input_file:
values = line.split(",")
for value in values:
tmp = [0.0] * 88
v = int(value)
tmp[v-1] = 1.0
input_data.append(tmp)
return input_data
input_data = read_and_format("k330-allegro-moderato.csv")
# get the previous 20 notes, predict the next note
def generate_datasets(input_array, n_prev = 20):
temp_x = [input_array[i:i+n_prev] for i in range(len(input_array) - n_prev)]
temp_y = [input_array[i+n_prev] for i in range(len(input_array) - n_prev)]
return np.array(temp_x), np.array(temp_y)
x, y = generate_datasets(input_data)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.20, shuffle=True)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
print(y_train[0])
# build the model itself
model = Sequential()
model.add(LSTM(30))
model.add(Dense(88, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# train the model
model.fit(x_train, y_train, batch_size=10, epochs=100, validation_split=0.05)
# test the model
model.evaluate(x_test, y_test)
# See incorrectly predicted
predictions = model.predict(x_test)
incorrect_indices = []
for (index, (prediction, target)) in enumerate(zip(predictions, y_test)):
pred = np.argmax(prediction)
tar = np.argmax(target)
if pred != tar:
incorrect_indices.append(index)
print(", ".join(map(str, incorrect_indices)))
# Predict song
test_in = x_test[0]
test_out = y_test[0]
# initial - provide inital 20 notes
# n - how many predicted notes to add (i.e. expand by this number)
def make_big_song(initial, n):
res =[ x for x in initial]
for _ in range(n):
next = model.predict(np.array([res[-20:],]))[0]
res.append(next)
return np.array(res)
test = make_big_song(test_in, 60)
print(test.shape)
# Expects n x 88
def vector_to_midi(arr, filename="nice.midi"):
track = Track()
for note_arr in arr:
note_num = int(np.argmax(note_arr))
note = Note()
note.from_int(note_num - 3)
track.add_notes(note)
write_Track(filename, track)
print("Done!")
vector_to_midi(test)
def predict_to_file(first_20_notes, expected, filename="nice"):
next = model.predict(np.array([first_20_notes]))
actual_next = np.array([expected])
next_file = filename + "_predicted_note"
actual_next_file = filename + "_actual_note"
orig_file = filename + "_first_20_notes"
vector_to_midi(next, next_file + ".midi")
vector_to_midi(actual_next, actual_next_file + ".midi")
vector_to_midi(first_20_notes, orig_file + ".midi")
# This conversion not seem to work
# fsy.midi_to_audio(next_file + ".midi", next_file + ".mp3")
# fsy.midi_to_audio(actual_next_file + ".midi", actual_next_file + ".mp3")
# fsy.midi_to_audio(orig_file + ".midi", orig_file + ".mp3")
predict_to_file(test_in, test_out)
inci = incorrect_indices[0]
predict_to_file(x_test[inci], y_test[inci], 'first_incorrect')
``` |
{
"source": "JoelGRod/Algorithms-py",
"score": 4
} |
#### File: main/graphs/dijkstra.py
```python
graph = {
"init": {
"a": 6,
"b": 2
},
"a": {
"end": 1
},
"b": {
"a": 3,
"end": 5
},
"end": {}
}
# All nodes are required, non init nodes are infinite
costs = {
"a": 6,
"b": 2,
"end": float("inf")
}
# Only init nodes are required
parents = {
"a": "init",
"b": "init",
# "end": None
}
def find_least_cost_node(costs, processed):
lower_cost = float("inf")
least_cost_node = None
for node in costs:
cost = costs[node]
if lower_cost > cost and node not in processed:
lower_cost = cost
least_cost_node = node
return least_cost_node
def dijkstra(graph, costs, parents):
processed = []
node = find_least_cost_node(costs, processed)
while node is not None:
cost = costs[node]
neighbors = graph[node]
for n in neighbors:
new_cost = cost + neighbors[n]
if costs[n] > new_cost:
costs[n] = new_cost
parents[n] = node
processed.append(node)
node = find_least_cost_node(costs, processed)
return parents, costs
# print(dijkstra(graph, costs, parents))
```
#### File: main/maths/prime_factors.py
```python
def prime_factors(n):
primes = []
c = 2
while n > 1:
if n % c == 0:
primes.append(int(c))
n /= c
else:
c += 1
return primes
```
#### File: main/misce/bouncing_balls.py
```python
def bouncing_ball(h, bounce, window):
if h <= 0 or bounce <= 0 or bounce >= 1 or window >= h: return -1
total = 1
h *= bounce
while h > window:
h *= bounce
total += 2
return total
def bouncingBall_recursive(h, bounce, window):
if h <= 0 or bounce <= 0 or bounce >= 1 or window >= h:
return -1
return 2 + bouncingBall_recursive(h * bounce, bounce, window)
```
#### File: main/misce/cache.py
```python
urls = {}
def check_url(url):
if urls.get(url):
return f"{urls[url]} - cached"
else:
data = f"some data obtained from url {url}"
urls[url] = data
return data
def main():
pages = ["www.google.com", "www.elsitio.com", "www.twitter.com", "www.google.com", "www.elsitio.com"]
for page in pages:
data = check_url(page)
print(data)
if __name__ == "__main__":
main()
```
#### File: main/misce/count_list_elements.py
```python
import time
""" Count list elements
"""
def count_items(elements):
if elements == []: return 0
return 1 + count_items(elements[1:])
# Faster
def count_items_two(elements):
try: elements[1]
except: return 1
return 1 + count_items_two(elements[1:])
elements = [1,2,3,4,5,6,7,8,9,99]
start = time.process_time()
print(count_items(elements))
print(f"{(time.process_time() - start)*1000:8f} ms")
start = time.process_time()
print(count_items_two(elements))
print(f"{(time.process_time() - start)*1000:8f} ms")
```
#### File: main/misce/range_extraction.py
```python
from operator import itemgetter
from itertools import groupby
def solution(args):
ranges = []
for k, g in groupby(enumerate(args), lambda x: x[0]-x[1]):
nums = [str(num) for num in list(map(itemgetter(1), g))]
if len(nums) > 2:
ranges.append(f"{nums[0]}-{nums[-1]}")
if len(nums) == 2:
ranges.append(f"{nums[0]},{nums[1]}")
if len(nums) == 1:
ranges.append(f"{nums[0]}")
return ",".join(ranges)
def solution_extra(args):
out = []
beg = end = args[0]
for n in args[1:] + [""]:
if n != end + 1:
if end == beg:
out.append( str(beg) )
elif end == beg + 1:
out.extend( [str(beg), str(end)] )
else:
out.append( str(beg) + "-" + str(end) )
beg = n
end = n
return ",".join(out)
```
#### File: main/misce/ranking_system.py
```python
class User():
def __init__(self):
self.ranks = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8]
self.rank = -8
self.progress = 0
def inc_progress(self, activity_rank):
current_rank_idx = self.ranks.index(self.rank)
activity_rank_idx = self.ranks.index(activity_rank)
result = (
len(self.ranks[current_rank_idx: activity_rank_idx])
if activity_rank_idx >= current_rank_idx
else -len(self.ranks[activity_rank_idx:current_rank_idx]))
if self.rank < 8:
self.progress += self.calculate_score(result)
self.update_rank()
def calculate_score(self, result):
if result == -1:
return 1
if result == 0:
return 3
if result > 0:
return (10 * result * result)
def update_rank(self):
while self.progress >= 100:
current_rank_idx = self.ranks.index(self.rank)
self.progress -= 100
self.rank = self.ranks[current_rank_idx + 1]
if self.rank == 8:
self.progress = 0
```
#### File: main/misce/roman_numerals.py
```python
class RomanNumerals:
roman_rosetta = {
"I": 1,
"IV": 4,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000
}
@classmethod
def to_roman(cls, val):
result = ""
str_val = str(val)
values = [digit + "0" * (len(str_val) - (idx + 1))
for idx, digit in enumerate(str_val)
if digit != "0"]
for digit in values:
if len(digit) == 4:
val = int(digit) / 1000
result += ("M" * int(val))
if len(digit) == 3:
val = int(digit) / 100
if val < 4:
result += ("C" * int(val))
if 4 < val < 9:
result += "D" + "C" * (int(val) - 5)
if val == 4:
result += "CD"
if val == 9:
result += "CM"
if len(digit) == 2:
val = int(digit) / 10
if val < 4:
result += ("X" * int(val))
if 4 < val < 9:
result += "L" + "X" * (int(val) - 5)
if val == 4:
result += "XL"
if val == 9:
result += "XC"
if len(digit) == 1:
val = int(digit)
if val < 4:
result += ("I" * int(val))
if 4 < val < 9:
result += "V" + "I" * (int(val) - 5)
if val == 4:
result += "IV"
if val == 9:
result += "IX"
return result
@classmethod
def from_roman(cls, roman_num):
result = 0
for idx, r_digit in enumerate(roman_num):
if ((idx) == len(roman_num) - 1 or
cls.roman_rosetta[r_digit] >= cls.roman_rosetta[roman_num[idx + 1]]):
result += cls.roman_rosetta[r_digit]
else:
result -= cls.roman_rosetta[r_digit]
return result
# Solution II
class RomanNumeralsTwo:
ROMANS = {
'M': 1000,
'CM': 900,
'D': 500,
'C': 100,
'XC': 90,
'L': 50,
'X': 10,
'V': 5,
'IV': 4,
'I': 1,
}
@classmethod
def to_roman(cls, n):
s = ''
for key, value in cls.ROMANS.items():
while n % value != n:
n = n - value
s += key
return s
@classmethod
def from_roman(cls, r):
s = 0
for key, value in cls.ROMANS.items():
while r.startswith(key):
r = r[len(key):]
s += value
return s
# Solution III
from collections import OrderedDict
import re
class RomanNumeralsThree(object):
ROMAN_NUMERALS = OrderedDict([
('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1),
])
DECIMAL_TO_ROMAN = [(v, k) for k, v in ROMAN_NUMERALS.items()]
ROMAN_RE = '|'.join(ROMAN_NUMERALS)
@classmethod
def from_roman(cls, roman):
return sum(cls.ROMAN_NUMERALS[d] for d in re.findall(cls.ROMAN_RE, roman))
@classmethod
def to_roman(cls, decimal):
result = []
for number, roman in cls.DECIMAL_TO_ROMAN:
while decimal >= number:
decimal -= number
result.append(roman)
return ''.join(result)
```
#### File: main/sets/combinations_with_reps.py
```python
def combine_with_reps(options, comb_len):
if comb_len == 1: return options
combos = []
for idx, option in enumerate(options):
recursive_combos = combine_with_reps(options[idx:], comb_len - 1)
for recursive_combo in recursive_combos:
combos.append(str(option) + str(recursive_combo))
return combos
```
#### File: main/strings/get_middle_char.py
```python
def get_middle(s):
return (
s[len(s) // 2 - 1:len(s) // 2 + 1]
if len(s) % 2 == 0
else s[len(s) // 2:len(s) // 2 + 1])
```
#### File: main/strings/list_position.py
```python
from algorithms.main.maths.factorial import factorial
def list_position(word):
"""Return the anagram list position of the word"""
if len(word) == 1: return 1 # Base Case
sum_pos = list_position(word[1:]) # Recursive Case
y = len(word)
# Count chars in word and create a list of tuples
counter = {}
for char in word:
if char in counter: counter[char] += 1
else: counter[char] = 1
# Multiply factorials of accumulated values
fact_add = 1
for accum in counter.values():
fact_add *= factorial(accum)
py = factorial(y) // fact_add
# We need all the elements before first char
sorted_elem = sorted(counter.items())
# Sum accumulated values of chars before first char
sum_elem_before = 0
for el in sorted_elem:
if el[0] == word[0]: break
sum_elem_before += el[1]
return (
(py // y * sum_elem_before) + sum_pos
if sum_elem_before != 0
else 0 + sum_pos)
################################################################
# Extra Solution I
# from math import factorial
# def listPosition(word):
# """Return the anagram list position of the word"""
# count = 0
# while len(word) > 1:
# first = word[0]
# uniques = set(word)
# possibilities = factorial(len(word))
# for letter in uniques:
# possibilities /= factorial(word.count(letter))
# for letter in uniques:
# if letter < first:
# count += possibilities / len(word) * word.count(letter)
# word = word[1:]
# return count +1
################################################################
# Extra Solution II
# from collections import Counter
# def listPosition(word):
# l, r, s = len(word), 1, 1
# c = Counter()
# for i in range(l):
# x = word[(l - 1) - i]
# c[x] += 1
# for y in c:
# if (y < x):
# r += s * c[y] // c[x]
# s = s * (i + 1) // c[x]
# return r
################################################################
# Extra Solution III
# import math
# from collections import Counter
# def listPosition(word):
# if len(word) == 1:
# return 1
# else:
# return sorted(word).index(word[0]) * calc_word_perm(word) // len(word) + listPosition(word[1:])
# def calc_word_perm(word):
# denom = 1
# for count in Counter(word).values():
# denom *= math.factorial(count)
# return math.factorial(len(word))//denom
```
#### File: main/strings/sort_strings_by_number.py
```python
import re
def order(sentence):
# code here
my_list = sentence.split(" ")
my_list.sort(key=natural_keys)
return " ".join(my_list)
def natural_keys(text):
return [int(c) for c in re.split(r'(\d+)', text) if c.isdigit()]
def order_extra(sentence):
return " ".join(sorted(sentence.split(), key=lambda x: int(filter(str.isdigit, x))))
```
#### File: main/trees/priority_stack_perform.py
```python
class PriorityStack:
def __init__(self):
self.items = {}
self.register = {}
self.ordered_priorities = []
def empty(self):
return len(self.ordered_priorities) == 0
def peek(self):
return self.items[self.ordered_priorities[0]][0]
def pop(self):
next = self.peek()
self.remove(next[0], next[1])
return [next[0], next[1]]
def remove(self, key, priority):
self.register.pop(key)
self.items[priority] = [
item for item in self.items[priority] if item[0] != key]
if len(self.items[priority]) == 0:
self.items.pop(priority)
self.sort_priorities()
def push(self, key, priority):
# check if exists and has a different priority
self.item_exists(key, priority)
# create new
item = [key, priority]
# Stack (item at the beginning - LIFO - insert), for queue use append <--- THIS
self.items[priority].insert(0, item)
# Update ordered priorities if necessary
self.sort_priorities()
def item_exists(self, key, priority):
if key in self.register and self.register[key] != priority:
self.remove(key, self.register[key])
if priority not in self.items:
self.items[priority] = []
if key not in self.register:
self.register[key] = priority
def sort_priorities(self):
if len(self.items.keys()) != len(self.ordered_priorities):
priorities = list(self.items.keys())
self.ordered_priorities = sorted(priorities)
```
#### File: tests/misce/ranking_system_test.py
```python
import unittest
from algorithms.main.misce.ranking_system import User
class TestRankingSystem(unittest.TestCase):
user = User()
def test_ranking_system(self):
self.assertEqual(self.user.rank, -8)
self.assertEqual(self.user.progress, 0)
self.user.inc_progress(-7)
self.assertEqual(self.user.progress, 10)
self.user.inc_progress(-5)
self.assertEqual(self.user.progress, 0)
self.assertEqual(self.user.rank, -7)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/misce/sum_of_intervals_test.py
```python
import unittest
from algorithms.main.misce.sum_of_intervals import sum_of_intervals
class TestSumOfIntervals(unittest.TestCase):
def test_sum_of_intervals(self):
self.assertEqual(sum_of_intervals([(1, 5)]), 4)
self.assertEqual(sum_of_intervals([(1, 5), (6, 10)]), 8)
self.assertEqual(sum_of_intervals([(1, 5), (1, 5)]), 4)
self.assertEqual(sum_of_intervals([(1, 4), (7, 10), (3, 5)]), 7)
if __name__ == '__main__':
unittest.main()
```
#### File: tests/sets/permutations_without_reps_test.py
```python
import time
import unittest
from algorithms.main.sets.permutations_without_reps import permute_without_reps as pnr, permute_without_reps_two as pnrtwo, permute_without_reps_three as pnrthree
class TestPermutationsWithoutReps(unittest.TestCase):
def setUp(self):
self._started_at = time.time()
def tearDown(self):
elapsed = time.time() - self._started_at
print(f'Time: ({elapsed:.8f}s)')
def test_permutations_without_reps(self):
self.assertEqual(sorted(pnr('a')), ['a'])
self.assertEqual(sorted(pnr('ab')), ['ab', 'ba'])
self.assertTrue(sorted(pnr('MISSISSIPPI')))
self.assertEqual(sorted(pnr('aabb')), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
self.assertEqual(sorted(pnr(['a'])), ['a'])
self.assertEqual(sorted(pnr(['a','b'])), ['ab', 'ba'])
self.assertEqual(sorted(pnr(['a','a','b','b'])), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
def test_permutations_without_reps_two(self):
self.assertEqual(sorted(pnrtwo('a')), ['a'])
self.assertEqual(sorted(pnrtwo('ab')), ['ab', 'ba'])
self.assertTrue(sorted(pnr('MISSISSIPPI')))
self.assertEqual(sorted(pnrtwo('aabb')), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
self.assertEqual(sorted(pnrtwo(['a'])), ['a'])
self.assertEqual(sorted(pnrtwo(['a','b'])), ['ab', 'ba'])
self.assertEqual(sorted(pnrtwo(['a','a','b','b'])), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
def test_permutations_without_reps_three(self):
self.assertEqual(sorted(pnrthree('a')), ['a'])
self.assertEqual(sorted(pnrthree('ab')), ['ab', 'ba'])
self.assertTrue(sorted(pnr('MISSISSIPPI')))
self.assertEqual(sorted(pnrthree('aabb')), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
self.assertEqual(sorted(pnrthree(['a'])), ['a'])
self.assertEqual(sorted(pnrthree(['a','b'])), ['ab', 'ba'])
self.assertEqual(sorted(pnrthree(['a','a','b','b'])), [
'aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa'])
if __name__ == '__main__':
unittest.main()
```
#### File: tests/strings/highest_scoring_word_test.py
```python
import unittest
from algorithms.main.strings.highest_scoring_word import high
class TestHighestScoringWord(unittest.TestCase):
def test_high_scoring_word(self):
self.assertEqual(high('man i need a taxi up to ubud'), 'taxi')
self.assertEqual(high('what time are we climbing up the volcano'), 'volcano')
self.assertEqual(high('take me to semynak'), 'semynak')
self.assertEqual(high('aa b'), 'aa')
self.assertEqual(high('b aa'), 'b')
self.assertEqual(high('bb d'), 'bb')
self.assertEqual(high('d bb'), 'd')
self.assertEqual(high("aaa b"), "aaa")
if __name__ == '__main__':
unittest.main()
```
#### File: tests/strings/sort_strings_by_number_test.py
```python
import unittest
from algorithms.main.strings.sort_strings_by_number import order
class TestSortStringByNumber(unittest.TestCase):
def test_sort_string_by_number(self):
self.assertEqual(order("is2 Thi1s T4est 3a"), "Thi1s is2 3a T4est")
self.assertEqual(order("4of Fo1r pe6ople g3ood th5e the2"), "Fo1r the2 g3ood 4of th5e pe6ople")
self.assertEqual(order(""), "")
self.assertEqual(order("is2 This1 Test4 a3"), "This1 is2 a3 Test4")
self.assertEqual(order("something1 something12 something17 something2"), "something1 something2 something12 something17")
if __name__ == '__main__':
unittest.main()
```
#### File: tests/trees/binary_tree_sort_levels_test.py
```python
import time
import unittest
from algorithms.main.trees.binary_tree_sort_levels import tree_by_levels
class Node:
def __init__(self, L, R, n):
self.left = L
self.right = R
self.value = n
class TestTreeByLevels(unittest.TestCase):
def setUp(self):
self._started_at = time.time()
def tearDown(self):
elapsed = time.time() - self._started_at
print(f'Time: ({elapsed:.8f}s)')
def test_tree_by_levels(self):
# Arrange
self.assertEqual(tree_by_levels(None), [])
self.assertEqual(tree_by_levels(
Node(
Node(
None,
Node(
None,
None,
4),
2),
Node(
Node(
None,
None,
5),
Node(
None,
None,
6),
3),
1)), [1, 2, 3, 4, 5, 6])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joelgrondman/nltk205sslfix",
"score": 3
} |
#### File: corpus/reader/util.py
```python
import os
import sys
import bisect
import re
import tempfile
try: import cPickle as pickle
except ImportError: import pickle
from itertools import islice
# Use the c version of ElementTree, which is faster, if possible:
try: from xml.etree import cElementTree as ElementTree
except ImportError: from xml.etree import ElementTree
from nltk.tokenize import wordpunct_tokenize
from nltk.internals import slice_bounds
from nltk.data import PathPointer, FileSystemPathPointer, ZipFilePathPointer
from nltk.data import SeekableUnicodeStreamReader
from nltk.sourcedstring import SourcedStringStream
from nltk.util import AbstractLazySequence, LazySubsequence, LazyConcatenation, py25
######################################################################
#{ Corpus View
######################################################################
class StreamBackedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file, which acts like a sequence of tokens:
it can be accessed by index, iterated over, etc. However, the
tokens are only constructed as-needed -- the entire corpus is
never stored in memory at once.
The constructor to ``StreamBackedCorpusView`` takes two arguments:
a corpus fileid (specified as a string or as a ``PathPointer``);
and a block reader. A "block reader" is a function that reads
zero or more tokens from a stream, and returns them as a list. A
very simple example of a block reader is:
>>> def simple_block_reader(stream):
... return stream.readline().split()
This simple block reader reads a single line at a time, and
returns a single token (consisting of a string) for each
whitespace-separated substring on the line.
When deciding how to define the block reader for a given
corpus, careful consideration should be given to the size of
blocks handled by the block reader. Smaller block sizes will
increase the memory requirements of the corpus view's internal
data structures (by 2 integers per block). On the other hand,
larger block sizes may decrease performance for random access to
the corpus. (But note that larger block sizes will *not*
decrease performance for iteration.)
Internally, ``CorpusView`` maintains a partial mapping from token
index to file position, with one entry per block. When a token
with a given index *i* is requested, the ``CorpusView`` constructs
it as follows:
1. First, it searches the toknum/filepos mapping for the token
index closest to (but less than or equal to) *i*.
2. Then, starting at the file position corresponding to that
index, it reads one block at a time using the block reader
until it reaches the requested token.
The toknum/filepos mapping is created lazily: it is initially
empty, but every time a new block is read, the block's
initial token is added to the mapping. (Thus, the toknum/filepos
map has one entry per block.)
In order to increase efficiency for random access patterns that
have high degrees of locality, the corpus view may cache one or
more blocks.
:note: Each ``CorpusView`` object internally maintains an open file
object for its underlying corpus file. This file should be
automatically closed when the ``CorpusView`` is garbage collected,
but if you wish to close it manually, use the ``close()``
method. If you access a ``CorpusView``'s items after it has been
closed, the file object will be automatically re-opened.
:warning: If the contents of the file are modified during the
lifetime of the ``CorpusView``, then the ``CorpusView``'s behavior
is undefined.
:warning: If a unicode encoding is specified when constructing a
``CorpusView``, then the block reader may only call
``stream.seek()`` with offsets that have been returned by
``stream.tell()``; in particular, calling ``stream.seek()`` with
relative offsets, or with offsets based on string lengths, may
lead to incorrect behavior.
:ivar _block_reader: The function used to read
a single block from the underlying file stream.
:ivar _toknum: A list containing the token index of each block
that has been processed. In particular, ``_toknum[i]`` is the
token index of the first token in block ``i``. Together
with ``_filepos``, this forms a partial mapping between token
indices and file positions.
:ivar _filepos: A list containing the file position of each block
that has been processed. In particular, ``_toknum[i]`` is the
file position of the first character in block ``i``. Together
with ``_toknum``, this forms a partial mapping between token
indices and file positions.
:ivar _stream: The stream used to access the underlying corpus file.
:ivar _len: The total number of tokens in the corpus, if known;
or None, if the number of tokens is not yet known.
:ivar _eofpos: The character position of the last character in the
file. This is calculated when the corpus view is initialized,
and is used to decide when the end of file has been reached.
:ivar _cache: A cache of the most recently read block. It
is encoded as a tuple (start_toknum, end_toknum, tokens), where
start_toknum is the token index of the first token in the block;
end_toknum is the token index of the first token not in the
block; and tokens is a list of the tokens in the block.
"""
def __init__(self, fileid, block_reader=None, startpos=0,
encoding=None, source=None):
"""
Create a new corpus view, based on the file ``fileid``, and
read with ``block_reader``. See the class documentation
for more information.
:param fileid: The path to the file that is read by this
corpus view. ``fileid`` can either be a string or a
``PathPointer``.
:param startpos: The file position at which the view will
start reading. This can be used to skip over preface
sections.
:param encoding: The unicode encoding that should be used to
read the file's contents. If no encoding is specified,
then the file's contents will be read as a non-unicode
string (i.e., a str).
:param source: If specified, then use an ``SourcedStringStream``
to annotate all strings read from the file with
information about their start offset, end ofset,
and docid. The value of ``source`` will be used as the docid.
"""
if block_reader:
self.read_block = block_reader
# Initialize our toknum/filepos mapping.
self._toknum = [0]
self._filepos = [startpos]
self._encoding = encoding
self._source = source
# We don't know our length (number of tokens) yet.
self._len = None
self._fileid = fileid
self._stream = None
self._current_toknum = None
"""This variable is set to the index of the next token that
will be read, immediately before ``self.read_block()`` is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current token number."""
self._current_blocknum = None
"""This variable is set to the index of the next block that
will be read, immediately before ``self.read_block()`` is
called. This is provided for the benefit of the block
reader, which under rare circumstances may need to know
the current block number."""
# Find the length of the file.
try:
if isinstance(self._fileid, PathPointer):
self._eofpos = self._fileid.file_size()
else:
self._eofpos = os.stat(self._fileid).st_size
except Exception, exc:
raise ValueError('Unable to open or access %r -- %s' %
(fileid, exc))
# Maintain a cache of the most recently read block, to
# increase efficiency of random access.
self._cache = (-1, -1, None)
fileid = property(lambda self: self._fileid, doc="""
The fileid of the file that is accessed by this view.
:type: str or PathPointer""")
def read_block(self, stream):
"""
Read a block from the input stream.
:return: a block of tokens from the input stream
:rtype: list(any)
:param stream: an input stream
:type stream: stream
"""
raise NotImplementedError('Abstract Method')
def _open(self):
"""
Open the file stream associated with this corpus view. This
will be called performed if any value is read from the view
while its file stream is closed.
"""
if isinstance(self._fileid, PathPointer):
self._stream = self._fileid.open(self._encoding)
elif self._encoding:
self._stream = SeekableUnicodeStreamReader(
open(self._fileid, 'rb'), self._encoding)
else:
self._stream = open(self._fileid, 'rb')
if self._source is not None:
self._stream = SourcedStringStream(self._stream, self._source)
def close(self):
"""
Close the file stream associated with this corpus view. This
can be useful if you are worried about running out of file
handles (although the stream should automatically be closed
upon garbage collection of the corpus view). If the corpus
view is accessed after it is closed, it will be automatically
re-opened.
"""
if self._stream is not None:
self._stream.close()
self._stream = None
def __len__(self):
if self._len is None:
# iterate_from() sets self._len when it reaches the end
# of the file:
for tok in self.iterate_from(self._toknum[-1]): pass
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
start, stop = slice_bounds(self, i)
# Check if it's in the cache.
offset = self._cache[0]
if offset <= start and stop <= self._cache[1]:
return self._cache[2][start-offset:stop-offset]
# Construct & return the result.
return LazySubsequence(self, start, stop)
else:
# Handle negative indices
if i < 0: i += len(self)
if i < 0: raise IndexError('index out of range')
# Check if it's in the cache.
offset = self._cache[0]
if offset <= i < self._cache[1]:
return self._cache[2][i-offset]
# Use iterate_from to extract it.
try:
return self.iterate_from(i).next()
except StopIteration:
raise IndexError('index out of range')
# If we wanted to be thread-safe, then this method would need to
# do some locking.
def iterate_from(self, start_tok):
# Start by feeding from the cache, if possible.
if self._cache[0] <= start_tok < self._cache[1]:
for tok in self._cache[2][start_tok-self._cache[0]:]:
yield tok
start_tok += 1
# Decide where in the file we should start. If `start` is in
# our mapping, then we can jump straight to the correct block;
# otherwise, start at the last block we've processed.
if start_tok < self._toknum[-1]:
block_index = bisect.bisect_right(self._toknum, start_tok)-1
toknum = self._toknum[block_index]
filepos = self._filepos[block_index]
else:
block_index = len(self._toknum)-1
toknum = self._toknum[-1]
filepos = self._filepos[-1]
# Open the stream, if it's not open already.
if self._stream is None:
self._open()
# Each iteration through this loop, we read a single block
# from the stream.
while filepos < self._eofpos:
# Read the next block.
self._stream.seek(filepos)
self._current_toknum = toknum
self._current_blocknum = block_index
tokens = self.read_block(self._stream)
assert isinstance(tokens, (tuple, list, AbstractLazySequence)), (
'block reader %s() should return list or tuple.' %
self.read_block.__name__)
num_toks = len(tokens)
new_filepos = self._stream.tell()
assert new_filepos > filepos, (
'block reader %s() should consume at least 1 byte (filepos=%d)' %
(self.read_block.__name__, filepos))
# Update our cache.
self._cache = (toknum, toknum+num_toks, list(tokens))
# Update our mapping.
assert toknum <= self._toknum[-1]
if num_toks > 0:
block_index += 1
if toknum == self._toknum[-1]:
assert new_filepos > self._filepos[-1] # monotonic!
self._filepos.append(new_filepos)
self._toknum.append(toknum+num_toks)
else:
# Check for consistency:
assert new_filepos == self._filepos[block_index], (
'inconsistent block reader (num chars read)')
assert toknum+num_toks == self._toknum[block_index], (
'inconsistent block reader (num tokens returned)')
# If we reached the end of the file, then update self._len
if new_filepos == self._eofpos:
self._len = toknum + num_toks
# Generate the tokens in this block (but skip any tokens
# before start_tok). Note that between yields, our state
# may be modified.
for tok in tokens[max(0, start_tok-toknum):]:
yield tok
# If we're at the end of the file, then we're done.
assert new_filepos <= self._eofpos
if new_filepos == self._eofpos:
break
# Update our indices
toknum += num_toks
filepos = new_filepos
# If we reach this point, then we should know our length.
assert self._len is not None
# Use concat for these, so we can use a ConcatenatedCorpusView
# when possible.
def __add__(self, other):
return concat([self, other])
def __radd__(self, other):
return concat([other, self])
def __mul__(self, count):
return concat([self] * count)
def __rmul__(self, count):
return concat([self] * count)
class ConcatenatedCorpusView(AbstractLazySequence):
"""
A 'view' of a corpus file that joins together one or more
``StreamBackedCorpusViews<StreamBackedCorpusView>``. At most
one file handle is left open at any time.
"""
def __init__(self, corpus_views):
self._pieces = corpus_views
"""A list of the corpus subviews that make up this
concatenation."""
self._offsets = [0]
"""A list of offsets, indicating the index at which each
subview begins. In particular::
offsets[i] = sum([len(p) for p in pieces[:i]])"""
self._open_piece = None
"""The most recently accessed corpus subview (or None).
Before a new subview is accessed, this subview will be closed."""
def __len__(self):
if len(self._offsets) <= len(self._pieces):
# Iterate to the end of the corpus.
for tok in self.iterate_from(self._offsets[-1]): pass
return self._offsets[-1]
def close(self):
for piece in self._pieces:
piece.close()
def iterate_from(self, start_tok):
piecenum = bisect.bisect_right(self._offsets, start_tok)-1
while piecenum < len(self._pieces):
offset = self._offsets[piecenum]
piece = self._pieces[piecenum]
# If we've got another piece open, close it first.
if self._open_piece is not piece:
if self._open_piece is not None:
self._open_piece.close()
self._open_piece = piece
# Get everything we can from this piece.
for tok in piece.iterate_from(max(0, start_tok-offset)):
yield tok
# Update the offset table.
if piecenum+1 == len(self._offsets):
self._offsets.append(self._offsets[-1] + len(piece))
# Move on to the next piece.
piecenum += 1
def concat(docs):
"""
Concatenate together the contents of multiple documents from a
single corpus, using an appropriate concatenation function. This
utility function is used by corpus readers when the user requests
more than one document at a time.
"""
if len(docs) == 1:
return docs[0]
if len(docs) == 0:
raise ValueError('concat() expects at least one object!')
types = set([d.__class__ for d in docs])
# If they're all strings, use string concatenation.
if types.issubset([str, unicode, basestring]):
return reduce((lambda a,b:a+b), docs, '')
# If they're all corpus views, then use ConcatenatedCorpusView.
for typ in types:
if not issubclass(typ, (StreamBackedCorpusView,
ConcatenatedCorpusView)):
break
else:
return ConcatenatedCorpusView(docs)
# If they're all lazy sequences, use a lazy concatenation
for typ in types:
if not issubclass(typ, AbstractLazySequence):
break
else:
return LazyConcatenation(docs)
# Otherwise, see what we can do:
if len(types) == 1:
typ = list(types)[0]
if issubclass(typ, list):
return reduce((lambda a,b:a+b), docs, [])
if issubclass(typ, tuple):
return reduce((lambda a,b:a+b), docs, ())
if ElementTree.iselement(typ):
xmltree = ElementTree.Element('documents')
for doc in docs: xmltree.append(doc)
return xmltree
# No method found!
raise ValueError("Don't know how to concatenate types: %r" % types)
######################################################################
#{ Corpus View for Pickled Sequences
######################################################################
class PickleCorpusView(StreamBackedCorpusView):
"""
A stream backed corpus view for corpus files that consist of
sequences of serialized Python objects (serialized using
``pickle.dump``). One use case for this class is to store the
result of running feature detection on a corpus to disk. This can
be useful when performing feature detection is expensive (so we
don't want to repeat it); but the corpus is too large to store in
memory. The following example illustrates this technique:
.. doctest::
:options: +SKIP
>>> from nltk.corpus.reader.util import PickleCorpusView
>>> from nltk.util import LazyMap
>>> feature_corpus = LazyMap(detect_features, corpus)
>>> PickleCorpusView.write(feature_corpus, some_fileid)
>>> pcv = PickleCorpusView(some_fileid)
"""
BLOCK_SIZE = 100
PROTOCOL = -1
def __init__(self, fileid, delete_on_gc=False):
"""
Create a new corpus view that reads the pickle corpus
``fileid``.
:param delete_on_gc: If true, then ``fileid`` will be deleted
whenever this object gets garbage-collected.
"""
self._delete_on_gc = delete_on_gc
StreamBackedCorpusView.__init__(self, fileid)
def read_block(self, stream):
result = []
for i in range(self.BLOCK_SIZE):
try: result.append(pickle.load(stream))
except EOFError: break
return result
def __del__(self):
"""
If ``delete_on_gc`` was set to true when this
``PickleCorpusView`` was created, then delete the corpus view's
fileid. (This method is called whenever a
``PickledCorpusView`` is garbage-collected.
"""
if getattr(self, '_delete_on_gc'):
if os.path.exists(self._fileid):
try: os.remove(self._fileid)
except (OSError, IOError): pass
self.__dict__.clear() # make the garbage collector's job easier
@classmethod
def write(cls, sequence, output_file):
if isinstance(output_file, basestring):
output_file = open(output_file, 'wb')
for item in sequence:
pickle.dump(item, output_file, cls.PROTOCOL)
@classmethod
def cache_to_tempfile(cls, sequence, delete_on_gc=True):
"""
Write the given sequence to a temporary file as a pickle
corpus; and then return a ``PickleCorpusView`` view for that
temporary corpus file.
:param delete_on_gc: If true, then the temporary file will be
deleted whenever this object gets garbage-collected.
"""
try:
fd, output_file_name = tempfile.mkstemp('.pcv', 'nltk-')
output_file = os.fdopen(fd, 'wb')
cls.write(sequence, output_file)
output_file.close()
return PickleCorpusView(output_file_name, delete_on_gc)
except (OSError, IOError), e:
raise ValueError('Error while creating temp file: %s' % e)
######################################################################
#{ Block Readers
######################################################################
def read_whitespace_block(stream):
toks = []
for i in range(20): # Read 20 lines at a time.
toks.extend(stream.readline().split())
return toks
def read_wordpunct_block(stream):
toks = []
for i in range(20): # Read 20 lines at a time.
toks.extend(wordpunct_tokenize(stream.readline()))
return toks
def read_line_block(stream):
toks = []
for i in range(20):
line = stream.readline()
if not line: return toks
toks.append(line.rstrip('\n'))
return toks
def read_blankline_block(stream):
s = ''
while True:
line = stream.readline()
# End of file:
if not line:
if s: return [s]
else: return []
# Blank line:
elif line and not line.strip():
if s: return [s]
# Other line:
else:
s += line
def read_alignedsent_block(stream):
s = ''
while True:
line = stream.readline()
if line[0] == '=' or line[0] == '\n' or line[:2] == '\r\n':
continue
# End of file:
if not line:
if s: return [s]
else: return []
# Other line:
else:
s += line
if re.match('^\d+-\d+', line) is not None:
return [s]
def read_regexp_block(stream, start_re, end_re=None):
"""
Read a sequence of tokens from a stream, where tokens begin with
lines that match ``start_re``. If ``end_re`` is specified, then
tokens end with lines that match ``end_re``; otherwise, tokens end
whenever the next line matching ``start_re`` or EOF is found.
"""
# Scan until we find a line matching the start regexp.
while True:
line = stream.readline()
if not line: return [] # end of file.
if re.match(start_re, line): break
# Scan until we find another line matching the regexp, or EOF.
lines = [line]
while True:
oldpos = stream.tell()
line = stream.readline()
# End of file:
if not line:
return [''.join(lines)]
# End of token:
if end_re is not None and re.match(end_re, line):
return [''.join(lines)]
# Start of new token: backup to just before it starts, and
# return the token we've already collected.
if end_re is None and re.match(start_re, line):
stream.seek(oldpos)
return [''.join(lines)]
# Anything else is part of the token.
lines.append(line)
def read_sexpr_block(stream, block_size=16384, comment_char=None):
"""
Read a sequence of s-expressions from the stream, and leave the
stream's file position at the end the last complete s-expression
read. This function will always return at least one s-expression,
unless there are no more s-expressions in the file.
If the file ends in in the middle of an s-expression, then that
incomplete s-expression is returned when the end of the file is
reached.
:param block_size: The default block size for reading. If an
s-expression is longer than one block, then more than one
block will be read.
:param comment_char: A character that marks comments. Any lines
that begin with this character will be stripped out.
(If spaces or tabs precede the comment character, then the
line will not be stripped.)
"""
start = stream.tell()
block = stream.read(block_size)
encoding = getattr(stream, 'encoding', None)
assert encoding is not None or isinstance(block, str)
if encoding not in (None, 'utf-8'):
import warnings
warnings.warn('Parsing may fail, depending on the properties '
'of the %s encoding!' % encoding)
# (e.g., the utf-16 encoding does not work because it insists
# on adding BOMs to the beginning of encoded strings.)
if comment_char:
COMMENT = re.compile('(?m)^%s.*$' % re.escape(comment_char))
while True:
try:
# If we're stripping comments, then make sure our block ends
# on a line boundary; and then replace any comments with
# space characters. (We can't just strip them out -- that
# would make our offset wrong.)
if comment_char:
block += stream.readline()
block = re.sub(COMMENT, _sub_space, block)
# Read the block.
tokens, offset = _parse_sexpr_block(block)
# Skip whitespace
offset = re.compile(r'\s*').search(block, offset).end()
# Move to the end position.
if encoding is None:
stream.seek(start+offset)
else:
stream.seek(start+len(block[:offset].encode(encoding)))
# Return the list of tokens we processed
return tokens
except ValueError, e:
if e.args[0] == 'Block too small':
next_block = stream.read(block_size)
if next_block:
block += next_block
continue
else:
# The file ended mid-sexpr -- return what we got.
return [block.strip()]
else: raise
def _sub_space(m):
"""Helper function: given a regexp match, return a string of
spaces that's the same length as the matched string."""
return ' '*(m.end()-m.start())
def _parse_sexpr_block(block):
tokens = []
start = end = 0
while end < len(block):
m = re.compile(r'\S').search(block, end)
if not m:
return tokens, end
start = m.start()
# Case 1: sexpr is not parenthesized.
if m.group() != '(':
m2 = re.compile(r'[\s(]').search(block, start)
if m2:
end = m2.start()
else:
if tokens: return tokens, end
raise ValueError('Block too small')
# Case 2: parenthesized sexpr.
else:
nesting = 0
for m in re.compile(r'[()]').finditer(block, start):
if m.group()=='(': nesting += 1
else: nesting -= 1
if nesting == 0:
end = m.end()
break
else:
if tokens: return tokens, end
raise ValueError('Block too small')
tokens.append(block[start:end])
return tokens, end
######################################################################
#{ Finding Corpus Items
######################################################################
def find_corpus_fileids(root, regexp):
if not isinstance(root, PathPointer):
raise TypeError('find_corpus_fileids: expected a PathPointer')
regexp += '$'
# Find fileids in a zipfile: scan the zipfile's namelist. Filter
# out entries that end in '/' -- they're directories.
if isinstance(root, ZipFilePathPointer):
fileids = [name[len(root.entry):] for name in root.zipfile.namelist()
if not name.endswith('/')]
items = [name for name in fileids if re.match(regexp, name)]
return sorted(items)
# Find fileids in a directory: use os.walk to search all (proper
# or symlinked) subdirectories, and match paths against the regexp.
elif isinstance(root, FileSystemPathPointer):
items = []
# workaround for py25 which doesn't support followlinks
kwargs = {}
if not py25():
kwargs = {'followlinks': True}
for dirname, subdirs, fileids in os.walk(root.path, **kwargs):
prefix = ''.join('%s/' % p for p in _path_from(root.path, dirname))
items += [prefix+fileid for fileid in fileids
if re.match(regexp, prefix+fileid)]
# Don't visit svn directories:
if '.svn' in subdirs: subdirs.remove('.svn')
return sorted(items)
else:
raise AssertionError("Don't know how to handle %r" % root)
def _path_from(parent, child):
if os.path.split(parent)[1] == '':
parent = os.path.split(parent)[0]
path = []
while parent != child:
child, dirname = os.path.split(child)
path.insert(0, dirname)
assert os.path.split(child)[0] != child
return path
######################################################################
#{ Paragraph structure in Treebank files
######################################################################
def tagged_treebank_para_block_reader(stream):
# Read the next paragraph.
para = ''
while True:
line = stream.readline()
# End of paragraph:
if re.match('======+\s*$', line):
if para.strip(): return [para]
# End of file:
elif line == '':
if para.strip(): return [para]
else: return []
# Content line:
else:
para += line
```
#### File: nltk/sem/relextract.py
```python
from collections import defaultdict
from string import join
import re
import htmlentitydefs
from itertools import ifilter
# Dictionary that associates corpora with NE classes
NE_CLASSES = {
'ieer': ['LOCATION', 'ORGANIZATION', 'PERSON', 'DURATION',
'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE'],
'conll2002': ['LOC', 'PER', 'ORG'],
'ace': ['LOCATION', 'ORGANIZATION', 'PERSON', 'DURATION',
'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE', 'FACILITY', 'GPE'],
}
# Allow abbreviated class labels
short2long = dict(LOC = 'LOCATION', ORG = 'ORGANIZATION', PER = 'PERSON')
long2short = dict(LOCATION ='LOC', ORGANIZATION = 'ORG', PERSON = 'PER')
def _expand(type):
"""
Expand an NE class name.
:type type: str
:rtype: str
"""
try:
return short2long[type]
except KeyError:
return type
def class_abbrev(type):
"""
Abbreviate an NE class name.
:type type: str
:rtype: str
"""
try:
return long2short[type]
except KeyError:
return type
def _join(lst, sep=' ', untag=False):
"""
Join a list into a string, turning tags tuples into tag strings or just words.
:param untag: if ``True``, omit the tag from tagged input strings.
:type lst: list
:rtype: str
"""
try:
return join(lst, sep=sep)
except TypeError:
if untag:
return join([tup[0] for tup in lst], sep=sep)
from nltk.tag import tuple2str
return join([tuple2str(tup) for tup in lst], sep=sep)
def descape_entity(m, defs=htmlentitydefs.entitydefs):
"""
Translate one entity to its ISO Latin value.
Inspired by example from effbot.org
"""
#s = 'mcglashan_&_sarrail'
#l = ['mcglashan', '&', 'sarrail']
#pattern = re.compile("&(\w+?);")
#new = list2sym(l)
#s = pattern.sub(descape_entity, s)
#print s, new
try:
return defs[m.group(1)]
except KeyError:
return m.group(0) # use as is
def list2sym(lst):
"""
Convert a list of strings into a canonical symbol.
:type lst: list
:return: a Unicode string without whitespace
:rtype: unicode
"""
sym = _join(lst, '_', untag=True)
sym = sym.lower()
ENT = re.compile("&(\w+?);")
sym = ENT.sub(descape_entity, sym)
sym = sym.replace('.', '')
return sym
def mk_pairs(tree):
"""
Group a chunk structure into a list of pairs of the form (list(str), ``Tree``)
In order to facilitate the construction of (``Tree``, string, ``Tree``) triples, this
identifies pairs whose first member is a list (possibly empty) of terminal
strings, and whose second member is a ``Tree`` of the form (NE_label, terminals).
:param tree: a chunk tree
:return: a list of pairs (list(str), ``Tree``)
:rtype: list of tuple
"""
from nltk.tree import Tree
pairs = []
pair = [[], None]
for dtr in tree:
if not isinstance(dtr, Tree):
pair[0].append(dtr)
else:
# dtr is a Tree
pair[1] = dtr
pairs.append(pair)
pair = [[], None]
return pairs
def mk_reldicts(pairs, window=5, trace=0):
"""
Converts the pairs generated by ``mk_pairs`` into a 'reldict': a dictionary which
stores information about the subject and object NEs plus the filler between them.
Additionally, a left and right context of length =< window are captured (within
a given input sentence).
:param pairs: a pair of list(str) and ``Tree``, as generated by
:param window: a threshold for the number of items to include in the left and right context
:type window: int
:return: 'relation' dictionaries whose keys are 'lcon', 'subjclass', 'subjtext', 'subjsym', 'filler', objclass', objtext', 'objsym' and 'rcon'
:rtype: list(defaultdict)
"""
result = []
while len(pairs) > 2:
reldict = defaultdict(str)
reldict['lcon'] = _join(pairs[0][0][-window:])
reldict['subjclass'] = pairs[0][1].node
reldict['subjtext'] = _join(pairs[0][1].leaves())
reldict['subjsym'] = list2sym(pairs[0][1].leaves())
reldict['filler'] = _join(pairs[1][0])
reldict['objclass'] = pairs[1][1].node
reldict['objtext'] = _join(pairs[1][1].leaves())
reldict['objsym'] = list2sym(pairs[1][1].leaves())
reldict['rcon'] = _join(pairs[2][0][:window])
if trace:
print "(rel(%s, %s)" % (reldict['subjclass'], reldict['objclass'])
result.append(reldict)
pairs = pairs[1:]
return result
def extract_rels(subjclass, objclass, doc, corpus='ace', pattern=None, window=10):
"""
Filter the output of ``mk_reldicts`` according to specified NE classes and a filler pattern.
The parameters ``subjclass`` and ``objclass`` can be used to restrict the
Named Entities to particular types (any of 'LOCATION', 'ORGANIZATION',
'PERSON', 'DURATION', 'DATE', 'CARDINAL', 'PERCENT', 'MONEY', 'MEASURE').
:param subjclass: the class of the subject Named Entity.
:type subjclass: str
:param objclass: the class of the object Named Entity.
:type objclass: str
:param doc: input document
:type doc: ieer document or a list of chunk trees
:param corpus: name of the corpus to take as input; possible values are
'ieer' and 'conll2002'
:type corpus: str
:param pattern: a regular expression for filtering the fillers of
retrieved triples.
:type pattern: SRE_Pattern
:param window: filters out fillers which exceed this threshold
:type window: int
:return: see ``mk_reldicts``
:rtype: list(defaultdict)
"""
if subjclass and subjclass not in NE_CLASSES[corpus]:
if _expand(subjclass) in NE_CLASSES[corpus]:
subjclass = _expand(subjclass)
else:
raise ValueError, "your value for the subject type has not been recognized: %s" % subjclass
if objclass and objclass not in NE_CLASSES[corpus]:
if _expand(objclass) in NE_CLASSES[corpus]:
objclass = _expand(objclass)
else:
raise ValueError, "your value for the object type has not been recognized: %s" % objclass
if corpus == 'ace' or corpus == 'conll2002':
pairs = mk_pairs(doc)
elif corpus == 'ieer':
pairs = mk_pairs(doc.text) + mk_pairs(doc.headline)
else:
raise ValueError, "corpus type not recognized"
reldicts = mk_reldicts(pairs)
relfilter = lambda x: (x['subjclass'] == subjclass and
len(x['filler'].split()) <= window and
pattern.match(x['filler']) and
x['objclass'] == objclass)
return filter(relfilter, reldicts)
def show_raw_rtuple(reldict, lcon=False, rcon=False):
"""
Pretty print the reldict as an rtuple.
:param reldict: a relation dictionary
:type reldict: defaultdict
"""
items = [class_abbrev(reldict['subjclass']), reldict['subjtext'], reldict['filler'], class_abbrev(reldict['objclass']), reldict['objtext']]
format = '[%s: %r] %r [%s: %r]'
if lcon:
items = [reldict['lcon']] + items
format = '...%r)' + format
if rcon:
items.append(reldict['rcon'])
format = format + '(%r...'
printargs = tuple(items)
return format % printargs
def show_clause(reldict, relsym):
"""
Print the relation in clausal form.
:param reldict: a relation dictionary
:type reldict: defaultdict
:param relsym: a label for the relation
:type relsym: str
"""
items = (relsym, reldict['subjsym'], reldict['objsym'])
return "%s(%r, %r)" % items
#######################################################
# Demos of relation extraction with regular expressions
#######################################################
############################################
# Example of in(ORG, LOC)
############################################
def in_demo(trace=0, sql=True):
"""
Select pairs of organizations and locations whose mentions occur with an
intervening occurrence of the preposition "in".
If the sql parameter is set to True, then the entity pairs are loaded into
an in-memory database, and subsequently pulled out using an SQL "SELECT"
query.
"""
from nltk.corpus import ieer
if sql:
try:
import sqlite3
connection = sqlite3.connect(":memory:")
connection.text_factory = sqlite3.OptimizedUnicode
cur = connection.cursor()
cur.execute("""create table Locations
(OrgName text, LocationName text, DocID text)""")
except ImportError:
import warnings
warnings.warn("Cannot import sqlite; sql flag will be ignored.")
IN = re.compile(r'.*\bin\b(?!\b.+ing)')
print
print "IEER: in(ORG, LOC) -- just the clauses:"
print "=" * 45
for file in ieer.fileids():
for doc in ieer.parsed_docs(file):
if trace:
print doc.docno
print "=" * 15
for rel in extract_rels('ORG', 'LOC', doc, corpus='ieer', pattern=IN):
print show_clause(rel, relsym='IN')
if sql:
try:
rtuple = (rel['subjtext'], rel['objtext'], doc.docno)
cur.execute("""insert into Locations
values (?, ?, ?)""", rtuple)
connection.commit()
except NameError:
pass
if sql:
try:
cur.execute("""select OrgName from Locations
where LocationName = 'Atlanta'""")
print
print "Extract data from SQL table: ORGs in Atlanta"
print "-" * 15
for row in cur:
print row
except NameError:
pass
############################################
# Example of has_role(PER, LOC)
############################################
def roles_demo(trace=0):
from nltk.corpus import ieer
roles = """
(.*( # assorted roles
analyst|
chair(wo)?man|
commissioner|
counsel|
director|
economist|
editor|
executive|
foreman|
governor|
head|
lawyer|
leader|
librarian).*)|
manager|
partner|
president|
producer|
professor|
researcher|
spokes(wo)?man|
writer|
,\sof\sthe?\s* # "X, of (the) Y"
"""
ROLES = re.compile(roles, re.VERBOSE)
print
print "IEER: has_role(PER, ORG) -- raw rtuples:"
print "=" * 45
for file in ieer.fileids():
for doc in ieer.parsed_docs(file):
lcon = rcon = False
if trace:
print doc.docno
print "=" * 15
lcon = rcon = True
for rel in extract_rels('PER', 'ORG', doc, corpus='ieer', pattern=ROLES):
print show_raw_rtuple(rel, lcon=lcon, rcon=rcon)
##############################################
### Show what's in the IEER Headlines
##############################################
def ieer_headlines():
from nltk.corpus import ieer
from nltk.tree import Tree
print "IEER: First 20 Headlines"
print "=" * 45
trees = [doc.headline for file in ieer.fileids() for doc in ieer.parsed_docs(file)]
for tree in trees[:20]:
print
print "%s:\n%s" % (doc.docno, tree)
#############################################
## Dutch CONLL2002: take_on_role(PER, ORG
#############################################
def conllned(trace=1):
"""
Find the copula+'van' relation ('of') in the Dutch tagged training corpus
from CoNLL 2002.
"""
from nltk.corpus import conll2002
vnv = """
(
is/V| # 3rd sing present and
was/V| # past forms of the verb zijn ('be')
werd/V| # and also present
wordt/V # past of worden ('become)
)
.* # followed by anything
van/Prep # followed by van ('of')
"""
VAN = re.compile(vnv, re.VERBOSE)
print
print "Dutch CoNLL2002: van(PER, ORG) -- raw rtuples with context:"
print "=" * 45
for doc in conll2002.chunked_sents('ned.train'):
lcon = rcon = False
if trace:
lcon = rcon = True
for rel in extract_rels('PER', 'ORG', doc, corpus='conll2002', pattern=VAN, window=10):
print show_raw_rtuple(rel, lcon=True, rcon=True)
#############################################
## Spanish CONLL2002: (PER, ORG)
#############################################
def conllesp():
from nltk.corpus import conll2002
de = """
.*
(
de/SP|
del/SP
)
"""
DE = re.compile(de, re.VERBOSE)
print
print "Spanish CoNLL2002: de(ORG, LOC) -- just the first 10 clauses:"
print "=" * 45
rels = [rel for doc in conll2002.chunked_sents('esp.train')
for rel in extract_rels('ORG', 'LOC', doc, corpus='conll2002', pattern = DE)]
for r in rels[:10]: print show_clause(r, relsym='DE')
print
def ne_chunked():
IN = re.compile(r'.*\bin\b(?!\b.+ing)')
rels = []
for sent in nltk.corpus.treebank.tagged_sents()[:100]:
sent = nltk.ne_chunk(sent)
print extract_rels('ORG', 'LOC', sent, corpus='ace', pattern = IN)
if __name__ == '__main__':
import nltk
from nltk.sem import relextract
in_demo(trace=0)
roles_demo(trace=0)
conllned()
conllesp()
ieer_headlines()
```
#### File: nltk205sslfix/nltk/sourcedstring.py
```python
import re, sys
from nltk.internals import slice_bounds, abstract
__all__ = [
'StringSource',
'ConsecutiveCharStringSource', 'ContiguousCharStringSource',
'SourcedString', 'SourcedStringStream', 'SourcedStringRegexp',
'SimpleSourcedString', 'CompoundSourcedString',
'SimpleSourcedByteString', 'SimpleSourcedUnicodeString',
'CompoundSourcedByteString', 'CompoundSourcedUnicodeString',
]
#//////////////////////////////////////////////////////////////////////
# String Sources
#//////////////////////////////////////////////////////////////////////
class StringSource(object):
"""
A description of the location of a string in a document. Each
``StringSource`` consists of a document identifier, along with
information about the begin and end offsets of each character in
the string. These offsets are typically either byte offsets or
character offsets. (Note that for unicode strings, byte offsets
and character offsets are not the same thing.)
``StringSource`` is an abstract base class. Two concrete
subclasses are used depending on the properties of the string
whose source is being described:
- ``ConsecutiveCharStringSource`` describes the source of strings
whose characters have consecutive offsets (in particular, byte
strings w/ byte offsets; and unicode strings with character
offsets).
- ``ContiguousCharStringSource`` describes the source of strings
whose characters are contiguous, but do not necessarily have
consecutive offsets (in particular, unicode strings with byte
offsets).
:ivar docid: An identifier (such as a filename) that specifies
which document contains the string.
:ivar offsets: A list of offsets specifying the location of each
character in the document. The *i* th character of the string
begins at offset ``offsets[i]`` and ends at offset
``offsets[i+1]``. The length of the ``offsets`` list is one
greater than the list of the string described by this
``StringSource``.
:ivar begin: The document offset where the string begins. (I.e.,
the offset of the first character in the string.)
``source.begin`` is always equal to ``source.offsets[0]``.
:ivar end: The document offset where the string ends. (For
character offsets, one plus the offset of the last character;
for byte offsets, one plus the offset of the last byte that
encodes the last character). ``source.end`` is always equal
to ``source.offsets[-1]``.
"""
def __new__(cls, docid, *args, **kwargs):
# If the StringSource constructor is called directly, then
# choose one of its subclasses to delegate to.
if cls is StringSource:
if args:
raise TypeError("Specifcy either begin and end, or "
"offsets, using keyword arguments")
if 'begin' in kwargs and 'end' in kwargs and 'offsets' not in kwargs:
cls = ConsecutiveCharStringSource
elif ('begin' not in kwargs and 'end' not in kwargs and
'offsets' in kwargs):
cls = ContiguousCharStringSource
else:
raise TypeError("Specify either begin and end, or offsets "
"(but not both)")
# Construct the object.
return object.__new__(cls)
def __init__(self, docid, **kwargs):
"""
Create a new ``StringSource``. When the ``StringSource``
constructor is called directly, it automatically delegates to
one of its two subclasses:
- If ``begin`` and ``end`` are specified, then a
``ConsecutiveCharStringSource`` is returned.
- If ``offsets`` is specified, then a
``ContiguousCharStringSource`` is returned.
In both cases, the arguments must be specified as keyword
arguments (not positional arguments).
"""
def __getitem__(self, index):
"""
Return a ``StringSource`` describing the location where the
specified character was found. In particular, if ``s`` is the
string that this source describes, then return a
``StringSource`` describing the location of ``s[index]``.
:raise IndexError: If index is out of range.
"""
if isinstance(index, slice):
start, stop = slice_bounds(self, index)
return self.__getslice__(start, stop)
else:
if index < 0: index += len(self)
if index < 0 or index >= len(self):
raise IndexError('StringSource index out of range')
return self.__getslice__(index, index+1)
@abstract
def __getslice__(self, start, stop):
"""
Return a ``StringSource`` describing the location where the
specified substring was found. In particular, if ``s`` is the
string that this source describes, then return a
``StringSource`` describing the location of ``s[start:stop]``.
"""
@abstract
def __len__(self):
"""
Return the length of the string described by this
``StringSource``. Note that this may not be equal to
``self.end-self.begin`` for unicode strings described using
byte offsets.
"""
def __str__(self):
if self.end == self.begin+1:
return '@%s[%s]' % (self.docid, self.begin,)
else:
return '@%s[%s:%s]' % (self.docid, self.begin, self.end)
def __cmp__(self, other):
return (cmp(self.docid, self.docid) or
cmp([(charloc.begin, charloc.end) for charloc in self],
[(charloc.begin, charloc.end) for charloc in other]))
def __hash__(self):
# Cache hash values.
if not hasattr(self, '_hash'):
self._hash = hash( (self.docid,
tuple((charloc.begin, charloc.end)
for charloc in self)) )
return self._hash
class ConsecutiveCharStringSource(StringSource):
"""
A ``StringSource`` that specifies the source of strings whose
characters have consecutive offsets. In particular, the following
two properties must hold for all valid indices:
- source[i].end == source[i].begin + 1
- source[i].end == source[i+1].begin
These properties allow the source to be stored using just a start
offset and an end offset (along with a docid).
This ``StringSource`` can be used to describe byte strings that are
indexed using byte offsets or character offsets; or unicode
strings that are indexed using character offsets.
"""
def __init__(self, docid, begin, end):
if not isinstance(begin, (int, long)):
raise TypeError("begin attribute expected an integer")
if not isinstance(end, (int, long)):
raise TypeError("end attribute expected an integer")
if not end >= begin:
raise ValueError("begin must be less than or equal to end")
self.docid = docid
self.begin = begin
self.end = end
@property
def offsets(self):
return tuple(range(self.begin, self.end+1))
def __len__(self):
return self.end-self.begin
def __getslice__(self, start, stop):
start = max(0, min(len(self), start))
stop = max(start, min(len(self), stop))
return ConsecutiveCharStringSource(
self.docid, self.begin+start, self.begin+stop)
def __cmp__(self, other):
if isinstance(other, ConsecutiveCharStringSource):
return (cmp(self.docid, other.docid) or
cmp(self.begin, other.begin) or
cmp(self.end, other.end))
else:
return StringSource.__cmp__(self, other)
def __repr__(self):
return 'StringSource(%r, begin=%r, end=%r)' % (
self.docid, self.begin, self.end)
class ContiguousCharStringSource(StringSource):
"""
A ``StringSource`` that specifies the source of strings whose
character are contiguous, but do not necessarily have consecutive
offsets. In particular, each character's end offset must be equal
to the next character's start offset:
- source[i].end == source[i+1].begin
This property allow the source to be stored using a list of
``len(source)+1`` offsets (along with a docid).
This ``StringSource`` can be used to describe unicode strings that
are indexed using byte offsets.
"""
CONSTRUCTOR_CHECKS_OFFSETS = False
def __init__(self, docid, offsets):
offsets = tuple(offsets)
if len(offsets) == 0:
raise ValueError("at least one offset must be specified")
if self.CONSTRUCTOR_CHECKS_OFFSETS:
for i in range(len(offsets)):
if not isinstance(offsets[i], (int,long)):
raise TypeError("offsets must be integers")
if i>0 and offsets[i-1]>offsets[i]:
raise TypeError("offsets must be monotonic increasing")
self.docid = docid
self.offsets = offsets
@property
def begin(self): return self.offsets[0]
@property
def end(self): return self.offsets[-1]
def __len__(self):
return len(self.offsets)-1
def __getslice__(self, start, stop):
start = max(0, min(len(self), start))
stop = max(start, min(len(self), stop))
return ContiguousCharStringSource(
self.docid, self.offsets[start:stop+1])
def __cmp__(self, other):
if isinstance(other, ConsecutiveCharStringSource):
return (cmp(self.docid, other.docid) or
cmp(self.offsets, other._offsets))
else:
return StringSource.__cmp__(self, other)
def __repr__(self):
return 'StringSource(%r, offsets=%r)' % (self.docid, self.offsets)
#//////////////////////////////////////////////////////////////////////
# Base Class for Sourced Strings.
#//////////////////////////////////////////////////////////////////////
class SourcedString(object):
"""
A string that is annotated with information about the location in
a document where it was originally found. Sourced strings are
subclassed from Python strings. As a result, they can usually be
used anywhere a normal Python string can be used.
There are two types of sourced strings: ``SimpleSourcedString``s,
which correspond to a single substring of a document; and
``CompoundSourcedString``s, which are constructed by concatenating
strings from multiple sources. Each of these types has two
concrete subclasses: one for unicode strings (subclassed from
``unicode``), and one for byte strings (subclassed from ``str``).
Two sourced strings are considered equal if their contents are
equal, even if their sources differ. This fact is important in
ensuring that sourced strings act like normal strings. In
particular, it allows sourced strings to be used with code that
was originally intended to process plain Python strings.
If you wish to determine whether two sourced strings came from the
same location in the same document, simply compare their
``sources`` attributes. If you know that both sourced strings are
``SimpleSourcedStrings``, then you can compare their ``source``
attribute instead.
String operations that act on sourced strings will preserve
location information whenever possible. However, there are a few
types of string manipulation that can cause source information to
be discarded. The most common examples of operations that will
lose source information are:
- ``str.join()``, where the joining string is not sourced.
- ``str.replace()``, where the original string is not sourced.
- String formatting (the ``%`` operator).
- Regular expression substitution.
:ivar sources: A sorted tuple of ``(index, source)`` pairs. Each
such pair specifies that the source of
``self[index:index+len(source)]`` is ``source``. Any characters
for which no source is specified are sourceless (e.g., plain
Python characters that were concatenated to a sourced string).
When working with simple sourced strings, it's usually easier
to use the ``source`` attribute instead; however, the
``sources`` attribute is defined for both simple and compound
sourced strings.
"""
def __new__(cls, contents, source):
# If the SourcedString constructor is called directly, then
# choose one of its subclasses to delegate to.
if cls is SourcedString:
if isinstance(contents, str):
cls = SimpleSourcedByteString
elif isinstance(contents, unicode):
cls = SimpleSourcedUnicodeString
else:
raise TypeError("Expected 'contents' to be a unicode "
"string or a byte string")
# Create the new object using the appropriate string class's
# __new__, which takes just the contents argument.
return cls._stringtype.__new__(cls, contents)
_stringtype = None
"""A class variable, defined by subclasses of ``SourcedString``,
determining what type of string this class contains. Its
value must be either str or ``unicode``."""
#//////////////////////////////////////////////////////////////////////
#{ Splitting & Stripping Methods
#//////////////////////////////////////////////////////////////////////
def lstrip(self, chars=None):
s = self._stringtype.lstrip(self, chars)
return self[len(self)-len(s):]
def rstrip(self, chars=None):
s = self._stringtype.rstrip(self, chars)
return self[:len(s)]
def strip(self, chars=None):
return self.lstrip(chars).rstrip(chars)
_WHITESPACE_RE = re.compile(r'\s+')
def split(self, sep=None, maxsplit=None):
# Check for unicode/bytestring mismatches:
if self._mixed_string_types(sep, maxsplit):
return self._decode_and_call('split', sep, maxsplit)
# Use a regexp to split self.
if sep is None: sep_re = self._WHITESPACE_RE
else: sep_re = re.compile(re.escape(sep))
if maxsplit is None: return sep_re.split(self)
else: return sep_re.split(self, maxsplit)
def rsplit(self, sep=None, maxsplit=None):
# Check for unicode/bytestring mismatches:
if self._mixed_string_types(sep, maxsplit):
return self._decode_and_call('rsplit', sep, maxsplit)
# Split on whitespace use a regexp.
if sep is None:
seps = list(self._WHITESPACE_RE.finditer(self))
if maxsplit: seps = seps[-maxsplit:]
if not seps: return [self]
result = [self[:seps[0].start()]]
for i in range(1, len(seps)):
result.append(self[seps[i-1].end():seps[i].start()])
result.append(self[seps[-1].end():])
return result
# Split on a given string: use rfind.
else:
result = []
piece_end = len(self)
while maxsplit != 0:
sep_pos = self.rfind(sep, 0, piece_end)
if sep_pos < 0: break
result.append(self[sep_pos+len(sep):piece_end])
piece_end = sep_pos
if maxsplit is not None: maxsplit -= 1
if piece_end > 0:
result.append(self[:piece_end])
return result[::-1]
def partition(self, sep):
head, sep, tail = self._stringtype.partition(self, sep)
i, j = len(head), len(head)+len(sep)
return (self[:i], self[i:j], self[j:])
def rpartition(self, sep):
head, sep, tail = self._stringtype.rpartition(self, sep)
i, j = len(head), len(head)+len(sep)
return (self[:i], self[i:j], self[j:])
_NEWLINE_RE = re.compile(r'\n')
_LINE_RE = re.compile(r'.*\n?')
def splitlines(self, keepends=False):
if keepends:
return self._LINE_RE.findall(self)
else:
return self._NEWLINE_RE.split(self)
#//////////////////////////////////////////////////////////////////////
#{ String Concatenation Methods
#//////////////////////////////////////////////////////////////////////
@staticmethod
def concat(substrings):
"""
Return a sourced string formed by concatenating the given list
of substrings. Adjacent substrings will be merged when
possible.
Depending on the types and values of the supplied substrings,
the concatenated string's value may be a Python string (str
or ``unicode``), a ``SimpleSourcedString``, or a
``CompoundSourcedString``.
"""
# Flatten nested compound sourced strings, and merge adjacent
# strings where possible:
merged = []
for substring in substrings:
SourcedString.__add_substring_to_list(substring, merged)
# Return the concatenated string.
if len(merged) == 0:
return ''
elif len(merged) == 1:
return merged[0]
else:
return CompoundSourcedString(merged)
def __add__(self, other):
return SourcedString.concat([self, other])
def __radd__(self, other):
return SourcedString.concat([other, self])
def __mul__(self, other):
if other <= 0:
return self._stringtype('')
else:
result = self
for i in range(1, other):
result += self
return result
def __rmul__(self, other):
return self.__mul__(other)
def join(self, sequence):
seq_iter = iter(sequence)
# Add the first element; but if sequence is empty, return an
# empty string.
try:
s = seq_iter.next()
except StopIteration:
return self._stringtype('')
# Add the remaining elements, separated by self.
for elt in seq_iter:
s += self
s += elt
return s
@staticmethod
def __add_substring_to_list(substring, result):
"""
Helper for ``concat()``: add ``substring`` to the end of the
list of substrings in ``result``. If ``substring`` is compound,
then add its own substrings instead. Merge adjacent
substrings whenever possible. Discard empty un-sourced
substrings.
"""
# Flatten nested compound sourced strings.
if isinstance(substring, CompoundSourcedString):
for s in substring.substrings:
SourcedString.__add_substring_to_list(s, result)
# Discard empty Python substrings.
elif len(substring) == 0 and not isinstance(substring, SourcedString):
pass # discard.
# Merge adjacent simple sourced strings (when possible).
elif (result and isinstance(result[-1], SimpleSourcedString) and
isinstance(substring, SimpleSourcedString) and
result[-1].end == substring.begin and
result[-1].docid == substring.docid):
result[-1] = SourcedString.__merge_simple_substrings(
result[-1], substring)
# Merge adjacent Python strings.
elif (result and not isinstance(result[-1], SourcedString) and
not isinstance(substring, SourcedString)):
result[-1] += substring
# All other strings just get appended to the result list.
else:
result.append(substring)
@staticmethod
def __merge_simple_substrings(lhs, rhs):
"""
Helper for ``__add_substring_to_list()``: Merge ``lhs`` and
``rhs`` into a single simple sourced string, and return it.
"""
contents = lhs._stringtype.__add__(lhs, rhs)
if (isinstance(lhs.source, ConsecutiveCharStringSource) and
isinstance(rhs.source, ConsecutiveCharStringSource)):
source = ConsecutiveCharStringSource(
lhs.source.docid, lhs.source.begin, rhs.source.end)
else:
source = ContiguousCharStringSource(
lhs.source.docid, lhs.source.offsets+rhs.source.offsets[1:])
return SourcedString(contents, source)
#//////////////////////////////////////////////////////////////////////
#{ Justification Methods
#//////////////////////////////////////////////////////////////////////
def center(self, width, fillchar=' '):
return (fillchar * ((width-len(self))/2) + self +
fillchar * ((width-len(self)+1)/2))
def ljust(self, width, fillchar=' '):
return self + fillchar * (width-len(self))
def rjust(self, width, fillchar=' '):
return fillchar * (width-len(self)) + self
def zfill(self, width):
return self.rjust(width, '0')
#//////////////////////////////////////////////////////////////////////
#{ Replacement Methods
#//////////////////////////////////////////////////////////////////////
# [xx] There's no reason in principle why this can't preserve
# location information. But for now, it doesn't.
def __mod__(self, other):
return self._stringtype.__mod__(self, other)
def replace(self, old, new, count=0):
# Check for unicode/bytestring mismatches:
if self._mixed_string_types(old, new, count):
return self._decode_and_call('replace', old, new, count)
# Use a regexp to find all occurrences of old, and replace them w/ new.
result = ''
pos = 0
for match in re.finditer(re.escape(old), self):
result += self[pos:match.start()]
result += new
pos = match.end()
result += self[pos:]
return result
def expandtabs(self, tabsize=8):
if len(self) == 0: return self
pieces = re.split(r'([\t\n])', self)
result = ''
offset = 0
for piece in pieces:
if piece == '\t':
spaces = 8 - (offset % tabsize)
# Each inserted space's source is the same as the
# source of the tab character that generated it.
result += spaces * SourcedString(' ', piece.source)
offset = 0
else:
result += piece
if piece == '\n': offset = 0
else: offset += len(piece)
return result
def translate(self, table, deletechars=''):
# Note: str.translate() and unicode.translate() have
# different interfaces.
if isinstance(self, unicode):
if deletechars:
raise TypeError('The unicode version of translate() does not '
'accept the deletechars parameter')
return SourcedString.concat(
[SourcedString(table.get(c,c), c.source)
for c in self if table.get(c,c) is not None])
else:
if len(table) != 256:
raise ValueError('translation table must be 256 characters long')
return SourcedString.concat(
[SourcedString(table[ord(c)], c.source)
for c in self if c not in deletechars])
#//////////////////////////////////////////////////////////////////////
#{ Unicode
#//////////////////////////////////////////////////////////////////////
# Unicode string -> byte string
def encode(self, encoding=None, errors='strict'):
if encoding is None: encoding = sys.getdefaultencoding()
if isinstance(self, str):
return self.decode().encode(encoding, errors)
# Encode characters one at a time.
result = []
for i, char in enumerate(self):
char_bytes = self._stringtype.encode(char, encoding, errors)
for char_byte in char_bytes:
if isinstance(char, SimpleSourcedString):
result.append(SourcedString(char_byte, char.source))
else:
assert not isinstance(char, CompoundSourcedString)
result.append(char_byte)
return SourcedString.concat(result)
# Byte string -> unicode string.
def decode(self, encoding=None, errors='strict'):
if encoding is None: encoding = sys.getdefaultencoding()
if isinstance(self, unicode):
return self.encode().decode(encoding, errors)
# Decode self into a plain unicode string.
unicode_chars = self._stringtype.decode(self, encoding, errors)
# Special case: if the resulting string has the same length
# that the source string does, then we can safely assume that
# each character is encoded with one byte; so we can just
# reuse our source.
if len(unicode_chars) == len(self):
return self._decode_one_to_one(unicode_chars)
# Otherwise: re-encode the characters, one at a time, to
# determine how long their encodings are.
result = []
first_byte = 0
for unicode_char in unicode_chars:
char_width = len(unicode_char.encode(encoding, errors))
last_byte = first_byte + char_width - 1
if (isinstance(self[first_byte], SourcedString) and
isinstance(self[last_byte], SourcedString)):
begin = self[first_byte].begin
end = self[last_byte].end
if end-begin == 1:
source = StringSource(docid=self[first_byte].docid,
begin=begin, end=end)
else:
source = StringSource(docid=self[first_byte].docid,
offsets=[begin, end])
result.append(SourcedString(unicode_char, source))
else:
result.append(unicode_char)
# First byte of the next char is 1+last byte of this char.
first_byte = last_byte+1
if last_byte+1 != len(self):
raise AssertionError("SourcedString.decode() does not support "
"encodings that are not symmetric.")
return SourcedString.concat(result)
@abstract
def _decode_one_to_one(unicode_chars):
"""
Helper for ``self.decode()``. Returns a unicode-decoded
version of this ``SourcedString``. ``unicode_chars`` is the
unicode-decoded contents of this ``SourcedString``.
This is used in the special case where the decoded string has
the same length that the source string does. As a result, we
can safely assume that each character is encoded with one
byte; so we can just reuse our source. E.g., this will happen
when decoding an ASCII string with utf-8.
"""
def _mixed_string_types(self, *args):
"""
Return true if the list (self,)+args contains at least one
unicode string and at least one byte string. (If this is the
case, then all byte strings should be converted to unicode by
calling decode() before the operation is performed. You can
do this automatically using ``_decode_and_call()``.
"""
any_unicode = isinstance(self, unicode)
any_bytestring = isinstance(self, str)
for arg in args:
any_unicode = any_unicode or isinstance(arg, unicode)
any_bytestring = any_bytestring or isinstance(arg, str)
return any_unicode and any_bytestring
def _decode_and_call(self, op, *args):
"""
If self or any of the values in args is a byte string, then
convert it to unicode by calling its decode() method. Then
return the result of calling self.op(*args). ``op`` is
specified using a string, because if ``self`` is a byte string,
then it will change type when it is decoded.
"""
# Make sure all args are decoded to unicode.
args = list(args)
for i in range(len(args)):
if isinstance(args[i], str):
args[i] = args[i].decode()
# Make sure self is decoded to unicode.
if isinstance(self, str):
self = self.decode()
# Retry the operation.
method = getattr(self, op)
return method(*args)
#//////////////////////////////////////////////////////////////////////
#{ Display
#//////////////////////////////////////////////////////////////////////
def pprint(self, vertical=False, wrap=70):
"""
Return a string containing a pretty-printed display of this
sourced string.
:param vertical: If true, then the returned display string will
have vertical orientation, rather than the default horizontal
orientation.
:param wrap: Controls when the pretty-printed output is wrapped
to the next line. If ``wrap`` is an integer, then lines are
wrapped when they become longer than ``wrap``. If ``wrap`` is
a string, then lines are wrapped immediately following that
string. If ``wrap`` is None, then lines are never wrapped.
"""
if len(self) == 0: return '[Empty String]'
if vertical == 1: return self._pprint_vertical() # special-cased
max_digits = len(str(max(max(getattr(c, 'begin', 0),
getattr(c, 'end', 0)) for c in self)))
if not isinstance(wrap, (basestring, int, long, type(None))):
raise TypeError("Expected wrap to be a sring, int, or None.")
result = []
prev_offset = None # most recently displayed offset.
prev_docid = None
docid_line = ''
output_lines = [''] * (max_digits+2)
for pos, char in enumerate(self):
char_begin = getattr(char, 'begin', None)
char_end = getattr(char, 'end', None)
char_docid = getattr(char, 'docid', None)
# If the docid changed, then display the docid for the
# previous segment.
if char_docid != prev_docid:
width = len(output_lines[0]) - len(docid_line)
docid_line += self._pprint_docid(width, prev_docid)
prev_docid = char_docid
# Put a cap on the beginning of sourceless strings
elif not output_lines[0] and char_begin is None:
self._pprint_offset(' ', output_lines)
# Display the character.
if char_begin != prev_offset:
self._pprint_offset(char_begin, output_lines)
self._pprint_char(char, output_lines)
self._pprint_offset(char_end, output_lines)
prev_offset = char_end
# Decide whether we're at the end of the line or not.
line_len = len(output_lines[0])
if ( (isinstance(wrap, basestring) and
self[max(0,pos-len(wrap)+1):pos+1] == wrap) or
(isinstance(wrap, (int,long)) and line_len>=wrap) or
pos == len(self)-1):
# Put a cap on the end of sourceless strings
if char_end is None:
self._pprint_offset(' ', output_lines)
# Filter out any empty output lines.
output_lines = [l for l in output_lines if l.strip()]
# Draw the docid line
width = len(output_lines[0]) - len(docid_line)
docid_line += self._pprint_docid(width, prev_docid)
result.append(docid_line)
# Draw the output lines
for output_line in reversed(output_lines):
result.append(output_line)
result.append(output_lines[1])
# Reset variables for the next line.
prev_offset = None
prev_docid = None
docid_line = ''
output_lines = [''] * (max_digits+2)
return '\n'.join(result)
def _pprint_vertical(self):
result = []
prev_offset = None
max_digits = len(str(max(max(getattr(c, 'begin', 0),
getattr(c, 'end', 0)) for c in self)))
for pos, char in enumerate(self):
char_begin = getattr(char, 'begin', None)
char_end = getattr(char, 'end', None)
char_docid = getattr(char, 'docid', None)
if char_begin is None:
assert char_end is None
if pos == 0: result.append('+-----+')
result.append(':%s:' %
self._pprint_char_repr(char).center(5))
if pos == len(self)-1: result.append('+-----+')
prev_offset = None
else:
if char_begin != prev_offset:
result.append('+-----+ %s [%s]' % (
str(char_begin).rjust(max_digits), char_docid))
result.append('|%s| %s [%s]' % (
self._pprint_char_repr(char).center(5),
' '*max_digits, char_docid))
result.append('+-----+ %s [%s]' % (
str(char_end).rjust(max_digits), char_docid))
prev_offset = char_end
return '\n'.join(result)
_PPRINT_CHAR_REPRS = {'\n': r'\n', '\r': r'\r',
'\a': r'\a', '\t': r'\t'}
def _pprint_docid(self, width, docid):
if docid is None: return ' '*width
else: return '[%s]' % (docid[:width-2].center(width-2, '='))
def _pprint_char_repr(self, char):
# Decide how to represent this character.
if 32 <= ord(char) <= 127:
return str(char)
elif char in self._PPRINT_CHAR_REPRS:
return self._PPRINT_CHAR_REPRS[char]
elif isinstance(char, str):
return r'\x%02x' % ord(char)
else:
return r'\u%04x' % ord(char)
def _pprint_char(self, char, output_lines):
"""Helper for ``pprint()``: add a character to the
pretty-printed output."""
char_repr = self._pprint_char_repr(char)
output_lines[0] += char_repr
# Add fillers to the offset lines.
output_lines[1] += '-'*len(char_repr)
for i in range(2, len(output_lines)):
output_lines[i] += ' '*len(char_repr)
def _pprint_offset(self, offset, output_lines):
"""Helper for ``pprint()``: add an offset marker to the
pretty-printed output."""
if offset is None: return
output_lines[0] += '|'
output_lines[1] += '+'
offset_rep = str(offset).rjust(len(output_lines)-2)
for digit in range(len(offset_rep)):
output_lines[-digit-1] += offset_rep[digit]
#//////////////////////////////////////////////////////////////////////
# Simple Sourced String
#//////////////////////////////////////////////////////////////////////
class SimpleSourcedString(SourcedString):
"""
A single substring of a document, annotated with information about
the location in the document where it was originally found. See
``SourcedString`` for more information.
"""
def __new__(cls, contents, source):
# If the SimpleSourcedString constructor is called directly,
# then choose one of its subclasses to delegate to.
if cls is SimpleSourcedString:
if isinstance(contents, str):
cls = SimpleSourcedByteString
elif isinstance(contents, unicode):
cls = SimpleSourcedUnicodeString
else:
raise TypeError("Expected 'contents' to be a unicode "
"string or a byte string")
# Create the new object using the appropriate string class's
# __new__, which takes just the contents argument.
return cls._stringtype.__new__(cls, contents)
def __init__(self, contents, source):
"""
Construct a new sourced string.
:param contents: The string contents of the new sourced string.
:type contents: str or unicode
:param source: The source for the new string. If ``source`` is
a string, then it is used to automatically construct a new
``ConsecutiveCharStringSource`` with a begin offset of
``0`` and an end offset of ``len(contents)``. Otherwise,
``source`` shoulde be a ``StringSource`` whose length matches
the length of ``contents``.
"""
if not isinstance(source, StringSource):
source = ConsecutiveCharStringSource(source, 0, len(contents))
elif len(source) != len(contents):
raise ValueError("Length of source (%d) must match length of "
"contents (%d)" % (len(source), len(contents)))
self.source = source
"""A ``StringLocation`` specifying the location where this string
occurred in the source document."""
@property
def begin(self):
"""
The document offset where the string begins. (I.e.,
the offset of the first character in the string.)"""
return self.source.begin
@property
def end(self):
"""The document offset where the string ends. (For character
offsets, one plus the offset of the last character; for byte
offsets, one plus the offset of the last byte that encodes the
last character)."""
return self.source.end
@property
def docid(self):
"""
An identifier (such as a filename) that specifies the document
where the string was found.
"""
return self.source.docid
@property
def sources(self):
return ((0, self.source),)
def __repr__(self):
if self.end == self.begin+1:
source_repr = '@[%s]' % (self.begin,)
else:
source_repr = '@[%s:%s]' % (self.begin, self.end)
return self._stringtype.__repr__(self) + source_repr
def __getitem__(self, index):
result = self._stringtype.__getitem__(self, index)
if isinstance(index, slice):
if index.step not in (None, 1):
return result
else:
start, stop = slice_bounds(self, index)
return self.__getslice__(start, stop)
else:
return SourcedString(result, self.source[index])
def __getslice__(self, start, stop):
# Negative indices get handled *before* __getslice__ is
# called. Restrict start/stop to be within the range of the
# string, to prevent negative indices from being adjusted
# twice.
start = max(0, min(len(self), start))
stop = max(start, min(len(self), stop))
return SourcedString(
self._stringtype.__getslice__(self, start, stop),
self.source[start:stop])
def capitalize(self):
result = self._stringtype.capitalize(self)
return SourcedString(result, self.source)
def lower(self):
result = self._stringtype.lower(self)
return SourcedString(result, self.source)
def upper(self):
result = self._stringtype.upper(self)
return SourcedString(result, self.source)
def swapcase(self):
result = self._stringtype.swapcase(self)
return SourcedString(result, self.source)
def title(self):
result = self._stringtype.title(self)
return SourcedString(result, self.source)
def _decode_one_to_one(self, unicode_chars):
return SourcedString(unicode_chars, self.source)
#//////////////////////////////////////////////////////////////////////
# Compound Sourced String
#//////////////////////////////////////////////////////////////////////
class CompoundSourcedString(SourcedString):
"""
A string constructed by concatenating substrings from multiple
sources, and annotated with information about the locations where
those substrings were originally found. See ``SourcedString`` for
more information.
:ivar substrings: The tuple of substrings that compose this
compound sourced string. Every compound sourced string is
required to have at least two substrings; and the substrings
themselves may never be CompoundSourcedStrings.
"""
def __new__(cls, substrings):
# If the CompoundSourcedString constructor is called directly,
# then choose one of its subclasses to delegate to.
if cls is CompoundSourcedString:
# Decide whether to use a unicode string or a byte string.
use_unicode = sum(1 for substring in substrings
if isinstance(substring, unicode))
if use_unicode:
cls = CompoundSourcedUnicodeString
else:
cls = CompoundSourcedByteString
# Build the concatenated string using str.join(), which will
# return a str or unicode object; never a sourced string.
contents = ''.join(substrings)
# Create the new object using the appropriate string class's
# __new__, which takes just the contents argument.
return cls._stringtype.__new__(cls, contents)
def __init__(self, substrings):
"""
Construct a new compound sourced string that combines the
given list of substrings.
Typically, compound sourced strings should not be constructed
directly; instead, use ``SourcedString.concat()``, which
flattens nested compound sourced strings, and merges adjacent
substrings when possible.
:raise ValueError: If ``len(substrings) < 2``
:raise ValueError: If ``substrings`` contains any
``CompoundSourcedString``s.
"""
if len(substrings) < 2:
raise ValueError("CompoundSourcedString requires at least "
"two substrings")
# Don't nest compound sourced strings.
for substring in substrings:
if isinstance(substring, CompoundSourcedString):
raise ValueError("substrings may not contain "
"CompoundSourcedStrings.")
self.substrings = tuple(substrings)
@property
def sources(self):
index = 0
source_list = []
for substring in self.substrings:
if isinstance(substring, SourcedString):
source_list.append( (index, substring.source) )
index += len(substring)
return tuple(source_list)
def __repr__(self):
sources = [self._source_repr(s) for s in self.substrings]
source_str = '@[%s]' % ','.join(sources)
return self._stringtype.__repr__(self) + source_str
def _source_repr(self, substring):
if isinstance(substring, SimpleSourcedString):
return '%s:%s' % (substring.begin, substring.end)
else:
return '...'
def __getitem__(self, index):
if isinstance(index, slice):
if index.step not in (None, 1):
return self._stringtype.__getitem__(self, index)
else:
start, stop = slice_bounds(self, index)
return self.__getslice__(start, stop)
else:
if index < 0: index += len(self)
if index < 0 or index >= len(self):
raise IndexError('StringSource index out of range')
return self.__getslice__(index, index+1)
def __getslice__(self, start, stop):
# Bounds checking.
start = max(0, min(len(self), start))
stop = max(start, min(len(self), stop))
# Construct a source list for the resulting string.
result_substrings = []
offset = 0
for substring in self.substrings:
if offset+len(substring) > start:
s, e = max(0, start-offset), stop-offset
result_substrings.append(substring[s:e])
offset += len(substring)
if offset >= stop: break
# Concatentate the resulting substrings.
if len(result_substrings) == 0:
return ''
elif len(result_substrings) == 1:
return result_substrings[0]
else:
return SourcedString.concat(result_substrings)
def capitalize(self):
return SourcedString.concat([s.capitalize() for s in self.substrings])
def lower(self):
return SourcedString.concat([s.lower() for s in self.substrings])
def upper(self):
return SourcedString.concat([s.upper() for s in self.substrings])
def swapcase(self):
return SourcedString.concat([s.swapcase() for s in self.substrings])
def title(self):
return SourcedString.concat([s.title() for s in self.substrings])
def encode(self, encoding=None, errors='strict'):
return SourcedString.concat([s.encode(encoding, errors)
for s in self.substrings])
def _decode_one_to_one(self, unicode_chars):
index = 0
result = []
for substring in self.substrings:
decoded_substring = unicode_chars[index:index+len(substring)]
if isinstance(substring, SourcedString):
result.append(SourcedString(decoded_substring, substring.source))
else:
result.append(decoded_substring)
index += len(substring)
return SourcedString.concat(result)
#//////////////////////////////////////////////////////////////////////
# Concrete Sourced String Classes
#//////////////////////////////////////////////////////////////////////
class SimpleSourcedByteString(SimpleSourcedString, str):
_stringtype = str
class SimpleSourcedUnicodeString(SimpleSourcedString, unicode):
_stringtype = unicode
class CompoundSourcedByteString(CompoundSourcedString, str):
_stringtype = str
class CompoundSourcedUnicodeString(CompoundSourcedString, unicode):
_stringtype = unicode
def __init__(self, substrings):
# If any substrings have type 'str', then decode them to unicode.
for i in range(len(substrings)):
if not isinstance(substrings[i], unicode):
substrings[i] = substrings[i].decode()
CompoundSourcedString.__init__(self, substrings)
#//////////////////////////////////////////////////////////////////////
# Sourced String Regexp
#//////////////////////////////////////////////////////////////////////
_original_re_compile = re.compile
_original_re_sub = re.sub
_original_re_subn = re.subn
class SourcedStringRegexp(object):
"""
Wrapper for regexp pattern objects that cause the ``sub`` and
``subn`` methods to return sourced strings.
"""
def __init__(self, pattern, flags=0):
if isinstance(pattern, basestring):
pattern = _original_re_compile(pattern, flags)
self.pattern = pattern
def __getattr__(self, attr):
return getattr(self.pattern, attr)
def subn(self, repl, string, count=0):
if (isinstance(repl, SourcedString) or
isinstance(string, SourcedString)):
result = ''
pos = 0
n = 0
for match in self.pattern.finditer(string):
result += string[pos:match.start()]
result += repl
pos = match.end()
n += 1
if count and n==count: break
result += string[pos:]
return result, n
else:
return self.pattern.subn(repl, string, count)
def sub(self, repl, string, count=0):
return self.subn(repl, string, count)[0]
@staticmethod
def patch_re_module():
"""
Modify the standard ``re`` module by installing new versions of
the functions ``re.compile``, ``re.sub``, and ``re.subn``,
causing regular expression substitutions to return
``SourcedStrings`` when called with ``SourcedStrings``
arguments.
Use this function only if necessary: it potentially affects
all Python modules that use regular expressions!
"""
def new_re_sub(pattern, repl, string, count=0):
return re.compile(pattern).sub(repl, string, count)
def new_re_subn(pattern, repl, string, count=0):
return re.compile(pattern).subn(repl, string, count)
re.compile = SourcedStringRegexp
re.sub = new_re_sub
re.subn = new_re_subn
@staticmethod
def unpatch_re_module():
"""
Restore the standard ``re`` module to its original state
(undoing the work that was done by ``patch_re_module()``).
"""
re.compile = _original_re_compile
re.sub = _original_re_sub
re.subn = _original_re_subn
#//////////////////////////////////////////////////////////////////////
# Sourced String Stream
#//////////////////////////////////////////////////////////////////////
class SourcedStringStream(object):
"""
Wrapper for a read-only stream that causes ``read()`` (and related
methods) to return ``SourcedStringBase``.
``seek()`` and ``tell()`` are supported, but (currently) there are
some restrictions on the values that may be passed to ``seek()``.
"""
def __init__(self, stream, docid=None, byte_offsets=False):
self.stream = stream
"""The underlying stream."""
self.docid = docid
"""The docid attribute for sourced strings"""
self.charpos = 0
"""The current character (not byte) position"""
assert not byte_offsets, 'not supported yet!'
#/////////////////////////////////////////////////////////////////
# Read methods
#/////////////////////////////////////////////////////////////////
def read(self, size=None):
if size is None: return self._sourced_string(self.stream.read())
else: return self._sourced_string(self.stream.read(size))
def readline(self, size=None):
if size is None: return self._sourced_string(self.stream.readline())
else: return self._sourced_string(self.stream.readline(size))
def readlines(self, sizehint=None, keepends=True):
"""
Read this file's contents, decode them using this reader's
encoding, and return it as a list of unicode lines.
:rtype: list(unicode)
:param sizehint: Ignored.
:param keepends: If false, then strip newlines.
"""
return self.read().splitlines(keepends)
def next(self):
"""Return the next decoded line from the underlying stream."""
line = self.readline()
if line: return line
else: raise StopIteration
def __iter__(self):
"""Return self"""
return self
def xreadlines(self):
"""Return self"""
return self
def _sourced_string(self, contents):
"""Turn the given string into an sourced string, and update
charpos."""
# [xx] currently we only support character offsets, not byte
# offsets!
source = ConsecutiveCharStringSource(self.docid, self.charpos,
self.charpos+len(contents))
self.charpos += len(contents)
return SourcedString(contents, source)
#/////////////////////////////////////////////////////////////////
# Pass-through methods & properties
#/////////////////////////////////////////////////////////////////
@property
def closed(self):
"""True if the underlying stream is closed."""
return self.stream.closed
@property
def name(self):
"""The name of the underlying stream."""
return self.stream.name
@property
def mode(self):
"""The mode of the underlying stream."""
return self.stream.mode
def close(self):
"""Close the underlying stream."""
self.stream.close()
#/////////////////////////////////////////////////////////////////
# Seek and tell
#/////////////////////////////////////////////////////////////////
class SourcedStringStreamPos(int):
def __new__(cls, bytepos, charpos):
self = int.__new__(cls, bytepos)
self.charpos = charpos
return self
def seek(self, offset, whence=0):
if whence == 0:
if isinstance(offset, self.SourcedStringStreamPos):
self.stream.seek(offset)
self.charpos = offset.charpos
elif offset == 0:
self.stream.seek(0)
self.charpos = 0
else:
raise TypeError('seek() must be called with a value that '
'was returned by tell().')
elif whence == 1:
raise TypeError('Relative seek not supported for '
'SourcedStringStream.')
elif whence == 2:
raise TypeError('Seek-from-end not supported for '
'SourcedStringStream.')
else:
raise ValueError('Bad whence value %r' % whence)
def tell(self):
bytepos = self.stream.tell()
return self.SourcedStringStreamPos(bytepos, self.charpos)
```
#### File: nltk/tag/simplify.py
```python
brown_mapping1 = {
'j': 'ADJ', 'p': 'PRO', 'm': 'MOD', 'q': 'DET',
'w': 'WH', 'r': 'ADV', 'i': 'P',
'u': 'UH', 'e': 'EX', 'o': 'NUM', 'b': 'V',
'h': 'V', 'f': 'FW', 'a': 'DET', 't': 'TO',
'cc': 'CNJ', 'cs': 'CNJ', 'cd': 'NUM',
'do': 'V', 'dt': 'DET',
'nn': 'N', 'nr': 'N', 'np': 'NP', 'nc': 'N'
}
brown_mapping2 = {
'vb': 'V', 'vbd': 'VD', 'vbg': 'VG', 'vbn': 'VN'
}
def simplify_brown_tag(tag):
tag = tag.lower()
if tag[0] in brown_mapping1:
return brown_mapping1[tag[0]]
elif tag[:2] in brown_mapping1: # still doesn't handle DOD tag correctly
return brown_mapping1[tag[:2]]
try:
if '-' in tag:
tag = tag.split('-')[0]
return brown_mapping2[tag]
except KeyError:
return tag.upper()
# Wall Street Journal tags (Penn Treebank)
wsj_mapping = {
'-lrb-': '(', '-rrb-': ')', '-lsb-': '(',
'-rsb-': ')', '-lcb-': '(', '-rcb-': ')',
'-none-': '', 'cc': 'CNJ', 'cd': 'NUM',
'dt': 'DET', 'ex': 'EX', 'fw': 'FW', # existential "there", foreign word
'in': 'P', 'jj': 'ADJ', 'jjr': 'ADJ',
'jjs': 'ADJ', 'ls': 'L', 'md': 'MOD', # list item marker
'nn': 'N', 'nnp': 'NP', 'nnps': 'NP',
'nns': 'N', 'pdt': 'DET', 'pos': '',
'prp': 'PRO', 'prp$': 'PRO', 'rb': 'ADV',
'rbr': 'ADV', 'rbs': 'ADV', 'rp': 'PRO',
'sym': 'S', 'to': 'TO', 'uh': 'UH',
'vb': 'V', 'vbd': 'VD', 'vbg': 'VG',
'vbn': 'VN', 'vbp': 'V', 'vbz': 'V',
'wdt': 'WH', 'wp': 'WH', 'wp$': 'WH',
'wrb': 'WH',
'bes': 'V', 'hvs': 'V', 'prp^vbp': 'PRO' # additions for NPS Chat corpus
}
def simplify_wsj_tag(tag):
if tag and tag[0] == '^':
tag = tag[1:]
try:
tag = wsj_mapping[tag.lower()]
except KeyError:
pass
return tag.upper()
indian_mapping = {
'nn': 'N', 'vm': 'MOD', 'jj': 'ADJ', 'nnp': 'NP',
'prp': 'PRO', 'prep': 'PRE', 'vaux': 'V', 'vfm': 'V',
'cc': 'CNJ', 'nnpc': 'NP', 'nnc': 'N', 'qc': 'QC',
'dem': 'DET', 'vrb': 'V', 'qfnum': 'NUM', 'rb': 'ADV',
'qf': 'DET', 'punc': '.', 'rp': 'PRT', 'psp': 'PSP',
'nst': 'N', 'nvb': 'N', 'vjj': 'V', 'neg': 'NEG',
'vnn': 'V', 'xc': 'XC', 'intf': 'INTF', 'nloc': 'N',
'jvb': 'ADJ', 'wq': 'WH', 'qw': 'WH', 'jj:?': 'ADJ',
'"cc': 'CNJ', 'nnp,': 'NP', 'sym\xc0\xa7\xb7': 'SYM',
'symc': 'SYM'}
def simplify_indian_tag(tag):
if ':' in tag:
tag = tag.split(':')[0]
try:
tag = indian_mapping[tag.lower()]
except KeyError:
pass
return tag.upper()
# Alpino tags
alpino_mapping = {
'noun':'N', 'name': 'NP', 'vg': 'VG', 'punct':'.',
'verb':'V', 'pron': 'PRO', 'prep':'P'
}
def simplify_alpino_tag(tag):
try:
tag = alpino_mapping[tag]
except KeyError:
pass
return tag.upper()
# Default tag simplification
def simplify_tag(tag):
return tag[0].upper()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
```
#### File: nltk/test/runtests.py
```python
from __future__ import absolute_import
import sys
import os
import nose
NLTK_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, NLTK_ROOT)
NLTK_TEST_DIR = os.path.join(NLTK_ROOT, 'nltk')
# These tests are expected to fail.
# NOTE: Remember to remove tests from this list after they have been fixed.
FAILING_TESTS = [
"ccg.doctest", # This test randomly fails - nondeterministic output
"collocations.doctest",
"corpus.doctest",
"portuguese_en.doctest",
"probability.doctest",
"relextract.doctest",
]
# These tests require extra dependencies and should not run by default
# TODO: Run the tests if the relevant dependeices are present on the system
DEPENDENT_TESTS = [
# "classify.doctest",
"discourse.doctest",
"drt.doctest",
"gluesemantics.doctest",
"inference.doctest",
"nonmonotonic.doctest",
]
EXCLUDED_TESTS = FAILING_TESTS + DEPENDENT_TESTS
_EXCLUDE_ARGV = ['--exclude='+test for test in EXCLUDED_TESTS]
if __name__ == '__main__':
from nltk.test.doctest_nose_plugin import DoctestFix
from nose.plugins.manager import PluginManager
from nose.plugins.doctests import Doctest
from nose.plugins import builtin
class NltkPluginManager(PluginManager):
"""
Nose plugin manager that replaces standard doctest plugin
with a patched version.
"""
def loadPlugins(self):
for plug in builtin.plugins:
if plug != Doctest:
self.addPlugin(plug())
self.addPlugin(DoctestFix())
super(NltkPluginManager, self).loadPlugins()
manager = NltkPluginManager()
manager.loadPlugins()
# allow passing extra options and running individual tests
# Examples:
#
# python runtests.py semantics.doctest
# python runtests.py --with-id -v
# python runtests.py --with-id -v nltk.featstruct
args = sys.argv[1:]
if not args:
args = [NLTK_TEST_DIR]
if all(arg.startswith('-') for arg in args):
# only extra options were passed
args += [NLTK_TEST_DIR]
nose.main(argv=_EXCLUDE_ARGV + [
#'--with-xunit',
#'--xunit-file=$WORKSPACE/nosetests.xml',
'--with-doctest',
'--doctest-extension=.doctest',
'--doctest-options=+ELLIPSIS,+NORMALIZE_WHITESPACE,+IGNORE_EXCEPTION_DETAIL',
#'--verbosity=3',
] + args, plugins=manager.plugins)
``` |
{
"source": "joelgrus/advent2021",
"score": 4
} |
#### File: joelgrus/advent2021/day07.py
```python
from typing import List
RAW = "16,1,2,0,4,2,7,1,2,14"
POSITIONS = [int(x) for x in RAW.split(",")]
def total_distance_to(positions: List[int], target: int) -> int:
return sum(abs(x - target) for x in positions)
def best_position(positions: List[int]) -> int:
lo = min(positions)
hi = max(positions)
return min(range(lo, hi+1), key=lambda x: total_distance_to(positions, x))
BP = best_position(POSITIONS)
assert BP == 2
TD = total_distance_to(POSITIONS, BP)
assert TD == 37
def cost(num_steps: int) -> int:
"""
the first step costs 1
the second step costs 2 additional
the third step costs 3 additional
and so on
uses the well-known fact that
1 + 2 + ... + n = n(n+1)/2
"""
return num_steps * (num_steps + 1) // 2
def sum_of_squares(n: int) -> int:
"""
returns the sum of the first n squares
using the mathematical fact that
1^2 + 2^2 + ... + n^2 = n(n+1)(2n+1)/6
"""
return n * (n + 1) * (2 * n + 1) // 6
assert cost(11) == 66
def cost_to_target(positions: List[int], target: int) -> int:
return sum(cost(abs(x - target)) for x in positions)
def lowest_cost_position(positions: List[int]) -> int:
lo = min(positions)
hi = max(positions)
return min(range(lo, hi+1), key=lambda x: cost_to_target(positions, x))
LCP = lowest_cost_position(POSITIONS)
assert LCP == 5
TC = cost_to_target(POSITIONS, LCP)
assert TC == 168
if __name__ == "__main__":
raw = open("data/day07.txt").read()
positions = [int(x) for x in raw.split(",")]
bp = best_position(positions)
td = total_distance_to(positions, bp)
print(f"Best position: {bp}")
print(f"Total distance: {td}")
lcp = lowest_cost_position(positions)
tc = cost_to_target(positions, lcp)
print(f"Lowest cost position: {lcp}")
print(f"Total cost: {tc}")
``` |
{
"source": "joelgrus/joelnet",
"score": 3
} |
#### File: joelnet/joelnet/train.py
```python
from joelnet.tensor import Tensor
from joelnet.nn import NeuralNet
from joelnet.loss import Loss, MSE
from joelnet.optim import Optimizer, SGD
from joelnet.data import DataIterator, BatchIterator
def train(net: NeuralNet,
inputs: Tensor,
targets: Tensor,
num_epochs: int = 5000,
iterator: DataIterator = BatchIterator(),
loss: Loss = MSE(),
optimizer: Optimizer = SGD()) -> None:
for epoch in range(num_epochs):
epoch_loss = 0.0
for batch in iterator(inputs, targets):
predicted = net.forward(batch.inputs)
epoch_loss += loss.loss(predicted, batch.targets)
grad = loss.grad(predicted, batch.targets)
net.backward(grad)
optimizer.step(net)
print(epoch, epoch_loss)
``` |
{
"source": "joelgrus/science-questions",
"score": 4
} |
#### File: science-questions/python-data/questions.py
```python
import re
import csv
import random
from collections import defaultdict, Counter
from itertools import accumulate
import json
# get all the questions from the csv
with open('questions.csv') as f:
reader = csv.DictReader(f)
raw_questions = [row['question'] for row in reader]
# want to split in questions and answers
# example: "What's fun? (A) python (B) ruby (C) haskell (D) C++"
# re.split divides a string into pieces based on a regex
split = "\([A-D]\)"
# but that doesn't catch all the cases. let's see what we miss:
for q in raw_questions:
if len(re.split(split, q)) not in [4, 5]:
print(q)
break
# after playing around, this list seems to be exhaustive
splits = [
"\([A-D]\)", # (A) python (B) haskell (C) javascript (D) ruby
"\s[A-D]\.\s", # A. python B. haskell C. javascript D. ruby
"\s[1-4]\.\s", # 1. python 2. haskell 3. javascript 4. ruby
"\s[A-D]\s", # A python B haskell C javascript D ruby
"\s[FGHJ]\s", # F python G haskell H javascript J ruby
"\n [A-D]\s" # A python
# B haskell
# C javascript
# D ruby
]
# see if there's any we missed
for q in raw_questions:
if not any(len(re.split(split, q)) in [4, 5]
for split in splits):
print(q)
# OK, now we're ready to parse the questions
questions = []
answers = []
# we'll use sentinel tokens for the start and stop of a sentence
START = "__START__"
STOP = "__STOP__"
# for each question, find the first split that works. add the question to our
# list of questions, and the answers to the list of answers
for q in raw_questions:
for split in splits:
pieces = [x.strip() for x in re.split(split, q)]
if len(pieces) in [4,5]:
questions.append(pieces[0])
answers.extend(pieces[1:])
break
else:
print(q + "\n")
# we'll store transitions as a dict with string keys and string list values
# transitions["What"] is all the words that we observed following "What"
def make_transitions(sentences):
transitions = defaultdict(list)
for sentence in sentences:
# regex looks for "?", ".", "," or groups of characters that aren't
# any of those, and aren't spaces
words = [START] + re.findall("[^ ?\.,]+|\?|\.|\,", sentence) + [STOP]
for prev_word, next_word in zip(words, words[1:]):
transitions[prev_word].append(next_word)
return transitions
# given transitions and a previous word, pick a random next word
def next_word(transitions, word):
return random.choice(transitions.get(word, [STOP]))
# and then generate a whole sequence of words
# e.g. print(''.join(markov_gen(q_transitions))
def markov_gen(transitions):
word = next_word(transitions, START)
while word != STOP:
yield word
word = next_word(transitions, word)
# one set of transitions for questions, one for answers, and we'll write them
# out to json, so that our service can use them
q_transitions = make_transitions(questions)
a_transitions = make_transitions(answers)
with open('questions.json', 'w') as f:
f.write(json.dumps(q_transitions))
with open('answers.json', 'w') as f:
f.write(json.dumps(a_transitions))
# it's inefficient to store those lists with multiplicity. that is, if all 2000
# questions start with "What is" then the entry for "What" will be a list of
# 2000 "is". Here I tried compressing the lists to (word, cumulative count)
# which you could then pick from uniformly using linear or binary search.
# But it ended up not saving that much space, so I didn't bother.
def compress_transitions(transitions):
compressed = {}
for token, next_tokens in transitions.items():
counts = Counter(next_tokens)
compressed[token] = list(zip(counts.keys(), accumulate(counts.values())))
return compressed
with open('questions_compressed.json', 'w') as f:
f.write(json.dumps(compress_transitions(q_transitions)))
with open('answers_compressed.json', 'w') as f:
f.write(json.dumps(compress_transitions(a_transitions)))
``` |
{
"source": "joelgtaylor/Auto-Merger",
"score": 2
} |
#### File: merger/utils/shellutils.py
```python
import logging
import subprocess
class ShellUtils:
"""
Facade for command line execution.
"""
def __init__(self, LOGGER):
self.logger = LOGGER
def runshellcmd(self, cmd, fprint=True):
"""Given a shell command execute it and
possibly print to console its interaction with console.
Args:
cmd: The shell command to execute.
fprint: Whether to print its interaction with console.
"""
if fprint:
self.log(cmd)
lines = ''
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in process.stdout.readlines():
lines = lines + line
process.wait()
if fprint:
self.log("cmdresult: " + lines)
return lines
def log(self, message):
"""If no LOGGER then just print to console.
Args:
message: Message to log or to print.
"""
if self.logger is not None:
self.logger.debug(message)
else: logging.info(message)
```
#### File: Auto-Merger/test/test.py
```python
__author__ = 'tomerb'
import unittest
class MultiReposTest(unittest.TestCase):
def setUp(self):
self.branches_map_as_str = """aaa"""
def test_extract_branch_with_repo_from_look__base_build_xml(self):
self.assertEquals("a", "a")
``` |
{
"source": "joelguerrero/PhiFlow",
"score": 2
} |
#### File: phi/data/fluidformat.py
```python
import inspect
import json
import logging
import os
import os.path
import re
import shutil
import warnings
import six
import numpy as np
from os.path import join, isfile, isdir
from phi import struct
from phi.physics import field
def read_zipped_array(filename):
file = np.load(filename)
array = file[file.files[-1]] # last entry in npz file has to be data array
if array.shape[0] != 1 or len(array.shape) == 1:
array = np.expand_dims(array, axis=0)
if array.shape[-1] != 1:
array = array[..., ::-1]
return array
def write_zipped_array(filename, array):
if array.shape[0] == 1 and len(array.shape) > 1:
array = array[0, ...]
if array.shape[-1] != 1:
array = array[..., ::-1]
np.savez_compressed(filename, array)
def _check_same_dimensions(arrays):
for array in arrays:
if array.shape[1:-1] != arrays[0].shape[1:-1]:
raise ValueError("All arrays should have the same spatial dimensions, but got %s and %s" % (array.shape, arrays[0].shape))
def read_sim_frame(simpath, fieldnames, frame, set_missing_to_none=True):
if isinstance(fieldnames, six.string_types):
fieldnames = [fieldnames]
for fieldname in fieldnames:
filename = join(simpath, "%s_%06i.npz" % (fieldname, frame))
if os.path.isfile(filename):
yield read_zipped_array(filename)
else:
if set_missing_to_none:
yield None
else:
raise IOError("Missing data at frame %d: %s" % (frame, filename))
def write_sim_frame(simpath, arrays, fieldnames, frame, check_same_dimensions=False):
if check_same_dimensions:
_check_same_dimensions(arrays)
os.path.isdir(simpath) or os.mkdir(simpath)
if not isinstance(fieldnames, (tuple, list)) and not isinstance(arrays, (tuple, list)):
fieldnames = [fieldnames]
arrays = [arrays]
filenames = [join(simpath, "%s_%06i.npz" % (name, frame)) for name in fieldnames]
for i in range(len(arrays)):
write_zipped_array(filenames[i], arrays[i])
return filenames
def read_sim_frames(simpath, fieldnames=None, frames=None):
if fieldnames is None:
fieldnames = get_fieldnames(simpath)
if not fieldnames:
return []
if frames is None:
frames = get_frames(simpath, fieldnames[0])
if isinstance(frames, int):
frames = [frames]
single_fieldname = isinstance(fieldnames, six.string_types)
if single_fieldname:
fieldnames = [fieldnames]
field_lists = [[] for f in fieldnames]
for i in frames:
fields = list(read_sim_frame(simpath, fieldnames, i, set_missing_to_none=False))
for j in range(len(fieldnames)):
field_lists[j].append(fields[j])
result = [np.concatenate(list, 0) for list in field_lists]
return result if not single_fieldname else result[0]
def get_fieldnames(simpath):
fieldnames_set = {f[:-11] for f in os.listdir(simpath) if f.endswith(".npz")}
return sorted(fieldnames_set)
def first_frame(simpath, fieldname=None):
return min(get_frames(simpath, fieldname))
def get_frames(simpath, fieldname=None, mode="intersect"):
if fieldname is not None:
all_frames = {int(f[-10:-4]) for f in os.listdir(simpath) if f.startswith(fieldname) and f.endswith(".npz")}
return sorted(all_frames)
else:
frames_lists = [get_frames(simpath, fieldname) for fieldname in get_fieldnames(simpath)]
if mode.lower() == "intersect":
intersection = set(frames_lists[0]).intersection(*frames_lists[1:])
return sorted(intersection)
elif mode.lower() == "union":
if not frames_lists:
return []
union = set(frames_lists[0]).union(*frames_lists[1:])
return sorted(union)
class Scene(object):
def __init__(self, dir, category, index):
self.dir = dir
self.category = category
self.index = index
self._properties = None
@property
def path(self):
return join(self.dir, self.category, "sim_%06d" % self.index)
def subpath(self, name, create=False):
path = join(self.path, name)
if create and not os.path.isdir(path):
os.mkdir(path)
return path
def _init_properties(self):
if self._properties is not None:
return
dfile = join(self.path, "description.json")
if isfile(dfile):
self._properties = json.load(dfile)
else:
self._properties = {}
def exists_config(self):
return isfile(join(self.path, "description.json"))
@property
def properties(self):
self._init_properties()
return self._properties
@properties.setter
def properties(self, dict):
self._properties = dict
with open(join(self.path, "description.json"), "w") as out:
json.dump(self._properties, out, indent=2)
def put_property(self, key, value):
self._init_properties()
self._properties[key] = value
with open(join(self.path, "description.json"), "w") as out:
json.dump(self._properties, out, indent=2)
def read_sim_frames(self, fieldnames=None, frames=None):
return read_sim_frames(self.path, fieldnames=fieldnames, frames=frames)
def read_array(self, fieldname, frame):
return next(read_sim_frame(self.path, [fieldname], frame, set_missing_to_none=False))
def write_sim_frame(self, arrays, fieldnames, frame, check_same_dimensions=False):
write_sim_frame(self.path, arrays, fieldnames, frame, check_same_dimensions=check_same_dimensions)
def write(self, obj, names=None, frame=0):
if struct.isstruct(obj):
obj = _transform_for_writing(obj)
if names is None:
names = struct.names(obj)
values = struct.flatten(obj)
names = struct.flatten(names)
names = [self._filename(name) for name in names]
self.write_sim_frame(values, names, frame)
else:
name = str(names) if names is not None else 'unnamed'
self.write_sim_frame([obj], [name], frame)
def read(self, obj, frame=0):
if struct.isstruct(obj):
obj = _transform_for_writing(obj)
names = struct.flatten(obj)
if not np.all([isinstance(n, six.string_types) for n in names]):
names = struct.names(obj)
data = struct.map(lambda name: self.read_array(self._filename(name), frame), names)
return data
else:
return self.read_array('unnamed', frame)
def _filename(self, structname):
structname = structname.replace('._', '.').replace('.', '_')
if structname.startswith('_'):
structname = structname[1:]
return structname
@property
def fieldnames(self):
return get_fieldnames(self.path)
@property
def frames(self):
return get_frames(self.path)
def get_frames(self, mode="intersect"):
return get_frames(self.path, None, mode)
def __str__(self):
return self.path
def __repr__(self):
return self.path
def copy_calling_script(self, stack_level=1):
script_path = inspect.stack()[stack_level][1]
script_name = os.path.basename(script_path)
src_path = os.path.join(self.path, "src")
os.path.isdir(src_path) or os.mkdir(src_path)
target = os.path.join(self.path, "src", script_name)
shutil.copy(script_path, target)
try:
shutil.copystat(script_path, target)
except:
warnings.warn('Could not copy file metadata to %s' % target)
def copy_src(self, path):
file_name = os.path.basename(path)
src_dir = os.path.dirname(path)
target_dir = join(self.path, "src")
# Create directory and copy
isdir(target_dir) or os.mkdir(target_dir)
shutil.copy(path, join(target_dir, file_name))
try:
shutil.copystat(path, join(target_dir, file_name))
except:
warnings.warn('Could not copy file metadata to %s' % join(target_dir, file_name))
def mkdir(self, subdir=None):
path = self.path
isdir(path) or os.mkdir(path)
if subdir is not None:
subpath = join(path, subdir)
isdir(subpath) or os.mkdir(subpath)
def remove(self):
if isdir(self.path):
shutil.rmtree(self.path)
@staticmethod
def create(directory, category=None, count=1, mkdir=True, copy_calling_script=True):
if count > 1:
return SceneBatch([Scene.create(directory, category, 1, mkdir, copy_calling_script) for i in range(count)])
# Single scene
directory = os.path.expanduser(directory)
if category is None:
category = os.path.basename(directory)
directory = os.path.dirname(directory)
else:
category = slugify(category)
scenedir = join(directory, category)
if not isdir(scenedir):
os.makedirs(scenedir)
next_index = 0
else:
indices = [int(name[4:]) for name in os.listdir(scenedir) if name.startswith("sim_")]
if not indices:
next_index = 0
else:
next_index = max(indices) + 1
scene = Scene(directory, category, next_index)
if mkdir:
scene.mkdir()
if copy_calling_script:
assert mkdir
scene.copy_calling_script(2)
return scene
@staticmethod
def list(directory, category=None, indexfilter=None, max_count=None):
directory = os.path.expanduser(directory)
if not category:
root_path = directory
category = os.path.basename(directory)
directory = os.path.dirname(directory)
else:
root_path = join(directory, category)
if not os.path.isdir(root_path):
return []
indices = [int(sim[4:]) for sim in os.listdir(root_path) if sim.startswith("sim_")]
if indexfilter:
indices = [i for i in indices if indexfilter(i)]
if max_count and len(indices) >= max_count:
indices = indices[0:max_count]
indices = sorted(indices)
if len(indices)==0:
logging.warning("No simulations sim_XXXXXX found in '%s'" % root_path)
return [Scene(directory, category, scene_index) for scene_index in indices]
@staticmethod
def at(sim_dir):
sim_dir = os.path.expanduser(sim_dir)
if sim_dir[-1]=='/': # remove trailing backslash
sim_dir = sim_dir[0:-1]
dirname = os.path.basename(sim_dir)
if not dirname.startswith("sim_"):
raise ValueError("%s with dir %s is not a valid scene directory." % (sim_dir,dirname))
category_directory = os.path.dirname(sim_dir)
category = os.path.basename(category_directory)
directory = os.path.dirname(category_directory)
index = int(dirname[4:])
return Scene(directory, category, index)
class SceneBatch(Scene):
def __init__(self, scenes):
Scene.__init__(self, scenes[0].dir, scenes[0].category, scenes[0].index)
self.scenes = scenes
@property
def batch_size(self):
return len(self.scenes)
def write_sim_frame(self, arrays, fieldnames, frame, check_same_dimensions=False):
for array in arrays:
assert array.shape[0] == self.batch_size or array.shape[0] == 1,\
'Wrong batch size: %d but %d scenes' % (array.shape[0], self.batch_size)
for i, scene in enumerate(self.scenes):
array_slices = [(array[i, ...] if array.shape[0] > 1 else array[0, ...]) for array in arrays]
scene.write_sim_frame(array_slices, fieldnames, frame=frame, check_same_dimensions=check_same_dimensions)
def read_sim_frames(self, fieldnames=None, frames=None):
raise NotImplementedError()
def read_array(self, fieldname, frame):
return np.concatenate([scene.read_array(fieldname, frame) for scene in self.scenes])
def _transform_for_writing(obj):
def f(value):
if isinstance(value, field.StaggeredGrid):
return value.staggered_tensor()
if isinstance(value, field.CenteredGrid):
return value.data
else:
return value
with struct.unsafe():
data = struct.map(f, obj, lambda x: isinstance(x, (field.StaggeredGrid, field.CenteredGrid)))
return data
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
for greek_letter, name in greek.items():
value = value.replace(greek_letter, name)
value = re.sub('[^\w\s-]', '', value).strip().lower()
value = re.sub('[-\s]+', '-', value)
return value
greek = {
u'Α': 'Alpha', u'α': 'alpha',
u'Β': 'Beta', u'β': 'beta',
u'Γ': 'Gamma', u'γ': 'gamma',
u'Δ': 'Delta', u'δ': 'delta',
u'Ε': 'Epsilon', u'ε': 'epsilon',
u'Ζ': 'Zeta', u'ζ': 'zeta',
u'Η': 'Eta', u'η': 'eta',
u'Θ': 'Theta', u'θ': 'theta',
u'Ι': 'Iota', u'ι': 'iota',
u'Κ': 'Kappa', u'κ': 'kappa',
u'Λ': 'Lambda', u'λ': 'lambda',
u'Μ': 'Mu', u'μ': 'mu',
u'Ν': 'Nu', u'ν': 'nu',
u'Ξ': 'Xi', u'ξ': 'xi',
u'Ο': 'Omicron', u'ο': 'omicron',
u'Π': 'Pi', u'π': 'pi',
u'Ρ': 'Rho', u'ρ': 'rho',
u'Σ': 'Sigma', u'σ': 'sigma',
u'Τ': 'Tau', u'τ': 'tau',
u'Υ': 'Upsilon', u'υ': 'upsilon',
u'Φ': 'Phi', u'φ': 'phi',
u'Χ': 'Chi', u'χ': 'chi',
u'Ψ': 'Psi', u'ψ': 'psi',
u'Ω': 'Omega', u'ω': 'omega',
}
``` |
{
"source": "joelguerrero/PyAero",
"score": 2
} |
#### File: PyAero/src/PFileSystem.py
```python
from PyQt4 import QtGui, QtCore
from PSettings import AIRFOILDATA, FILEFILTER, ICONS_L
import PLogger as logger
class FileSystem(QtGui.QFileSystemModel):
def __init__(self, parent=None):
super(FileSystem, self).__init__(parent)
self.parent = parent
self.setFilter(QtCore.QDir.AllDirs |
QtCore.QDir.Files |
QtCore.QDir.NoDotAndDotDot)
filefilter = QtCore.QStringList(FILEFILTER)
self.setNameFilters(filefilter)
# if true, filtered files are shown, but grey
# if false they are not shown
self.setNameFilterDisables(False)
# this path is watched for changes
self.setRootPath(AIRFOILDATA)
self.tree = QtGui.QTreeView()
self.tree.setModel(self)
# this function sets actual start dir in treeview
self.tree.setRootIndex(self.index(AIRFOILDATA))
self.tree.setAnimated(True)
self.tree.setColumnHidden(1, True) # hide size column
self.tree.setColumnHidden(2, True) # hide type column
self.tree.setColumnHidden(3, True) # hide date modified column
header = self.tree.header()
header.setResizeMode(QtGui.QHeaderView.ResizeToContents)
def data(self, index, role):
"""
This function partly overrides the standard QFileSystemModel data
function to return custom file and folder icons
"""
fileInfo = self.getFileInfo(index)[4]
if role == QtCore.Qt.DecorationRole:
if fileInfo.isDir():
return QtGui.QPixmap(ICONS_L + 'Folder.png')
elif fileInfo.isFile():
return QtGui.QPixmap(ICONS_L + 'airfoil.png')
return super(FileSystem, self).data(index, role)
@QtCore.pyqtSlot(QtCore.QModelIndex)
def onFileSelected(self, index):
return
fileInfo = self.getFileInfo(index)[4]
if fileInfo.isDir():
return
name = self.getFileInfo(index)[0]
logger.log.info('<b><font color="#2784CB">%s</b> selected' % (name))
@QtCore.pyqtSlot(QtCore.QModelIndex)
def onFileLoad(self, index):
fullname = self.getFileInfo(index)[2]
fileInfo = self.getFileInfo(index)[4]
if fileInfo.isDir():
return
self.parent.slots.loadAirfoil(fullname, '#')
def getFileInfo(self, index):
indexItem = self.index(index.row(), 0, index.parent())
fileInfo = self.fileInfo(indexItem)
path = fileInfo.absolutePath()
name = fileInfo.fileName()
ext = fileInfo.suffix()
fullname = fileInfo.absoluteFilePath()
return [name, path, fullname, ext, fileInfo]
```
#### File: PyAero/src/PGraphicsScene.py
```python
from PyQt4 import QtGui
class GraphicsScene(QtGui.QGraphicsScene):
"""The graphics scene manages all items which are drawn in the graphics view
The coordinates in the scene are the "logical" coordinates. These are the
real object coordinates. E.g., an airfoil typically is described in an
x-range from 0 to 1 (no units are given for that). So when PyAero loads an
airfoil, the GraphicsView provides a view on that graphics item. The
"fitallinview" after loading, scales the view so that the airfoil is fully
fitting the graphics view which is in pixels or "physical" coordinates.
Attributes:
parent (TYPE): Description
"""
def __init__(self, parent=None):
# call constructor of QGraphicsScene
super(GraphicsScene, self).__init__(parent)
self.parent = parent
# set scene to large size so that scrollbars are small (if shown)
self.setSceneRect(-50, -50, 100, 100)
def mousePressEvent(self, event):
"""Re-implement QGraphicsView's mousePressEvent handler"""
self.clearSelection()
# call original implementation of QGraphicsView mousePressEvent handler
super(GraphicsScene, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
"""Re-implement QGraphicsView's mousePressEvent handler"""
# call original implementation of QGraphicsView
# mouseReleaseEvent handler
super(GraphicsScene, self).mouseReleaseEvent(event)
def mouseMoveEvent(self, event):
"""Re-implement QGraphicsView's mousePressEvent handler"""
# call original implementation of QGraphicsView mouseMoveEvent handler
super(GraphicsScene, self).mouseMoveEvent(event)
```
#### File: PyAero/src/PShortCuts.py
```python
from PyQt4 import QtGui, QtCore
class PShortCuts(object):
"""docstring for ClassName """
def __init__(self, parent):
if not isinstance(parent, QtGui.QMainWindow):
raise TypeError('parent must be a MainWindow')
super(PShortCuts, self).__init__()
self.parent = parent
def addShortcut(self, shortcut, slot):
"""Add a shortcut to a slot (event handler)
Args:
shortcut (STRING): Something like 'ALT+m'
slot (STRING): Method of PGuiSlots
Returns:
object: QShortcut object
"""
guislot = getattr(self.parent.slots, slot)
sc = QtGui.QShortcut(QtGui.QKeySequence(shortcut), self.parent,
guislot)
return sc
def enableShortcut(self, enable=True):
self.setEnabled(self, enable)
def changeKey(self, key):
self.setKey(key)
``` |
{
"source": "Joel-hanson/fastapi-reactjs-boilerplate",
"score": 2
} |
#### File: backend/app/api.py
```python
import os
import json
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from starlette.config import Config
from starlette.requests import Request
from starlette.middleware.sessions import SessionMiddleware
from starlette.responses import HTMLResponse, RedirectResponse
from authlib.integrations.starlette_client import OAuth, OAuthError
app = FastAPI()
origins = [
"http://localhost:3000",
"localhost:3000"
]
app.add_middleware(SessionMiddleware, secret_key=os.environ.get("GOOGLE_CLIENT_SECRET"))
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
config = Config('.env')
oauth = OAuth(config)
CONF_URL = 'https://accounts.google.com/.well-known/openid-configuration'
oauth.register(
name='google',
server_metadata_url=CONF_URL,
client_kwargs={
'scope': 'openid email profile'
}
)
@app.route('/')
async def homepage(request: Request):
user = request.session.get('user')
if user:
data = json.dumps(user)
html = (
f'<pre>{data}</pre>'
'<a href="/logout">logout</a>'
)
return HTMLResponse(html)
return HTMLResponse('<a href="/login">login</a>')
@app.route('/login')
async def login(request: Request):
redirect_uri = request.url_for('auth')
return await oauth.google.authorize_redirect(request, redirect_uri)
@app.route('/auth')
async def auth(request: Request):
try:
token = await oauth.google.authorize_access_token(request)
except OAuthError as error:
return HTMLResponse(f'<h1>{error.error}</h1>')
user = await oauth.google.parse_id_token(request, token)
request.session['user'] = dict(user)
return RedirectResponse(url='/')
@app.route('/logout')
async def logout(request: Request):
request.session.pop('user', None)
return RedirectResponse(url='/')
``` |
{
"source": "Joel-hanson/flask-aws-project",
"score": 2
} |
#### File: Joel-hanson/flask-aws-project/utils.py
```python
import boto3
import os
import requests
from settings import DEFAULT_REGION, KEYNAME
session = boto3.session.Session(region_name=DEFAULT_REGION, profile_name=KEYNAME)
def get_public_ip(instance_ids):
ec2_client = session.client("ec2")
reservations = ec2_client.describe_instances(InstanceIds=instance_ids).get(
"Reservations"
)
for reservation in reservations:
for instance in reservation["Instances"]:
return instance.get("PublicIpAddress")
def get_running_instances():
ec2_client = session.client("ec2")
reservations = ec2_client.describe_instances(
Filters=[{"Name": "instance-state-name", "Values": ["running"],}]
).get("Reservations")
instances = []
for reservation in reservations:
for instance in reservation["Instances"]:
instance_id = instance["InstanceId"]
instance_type = instance["InstanceType"]
public_ip = instance["PublicIpAddress"]
private_ip = instance["PrivateIpAddress"]
instances.append(
f"{instance_id}, {instance_type}, {public_ip}, {private_ip}"
)
return instances
def get_instance_status(instance_id):
ec2_client = session.client("ec2")
if instance_id:
reservations = ec2_client.describe_instances(InstanceIds=[instance_id]).get(
"Reservations"
)
else:
reservations = ec2_client.describe_instances().get("Reservations")
instances_status = []
for reservation in reservations:
for instance in reservation["Instances"]:
instance_id = instance["InstanceId"]
instance_type = instance["InstanceType"]
instance_status = instance["State"]["Name"]
public_dns_name = instance["PublicDnsName"]
link_details = "Server is spinning up"
if instance_status == "running":
link_details = "Server is up and docker is spinning up right now"
try:
response = requests.get(f"http://{public_dns_name}")
if response.status_code == 200:
link_details = f"The site is up and running. please visit http://{public_dns_name}"
except:
link_details = "Server is up and docker is spinning up right now"
elif instance_status == "terminated":
link_details = "Server is terminated"
elif instance_status == "shutting-down":
link_details = "Server is shutting down"
else:
link_details = ""
instances_status.append(
f"{instance_id}, {instance_type}, {instance_status}, {link_details}"
)
return instances_status
def stop_instance(instance_id):
ec2_client = session.client("ec2")
response = ec2_client.stop_instances(InstanceIds=[instance_id])
return response
def terminate_instance(instance_id):
ec2_client = session.client("ec2")
response = ec2_client.terminate_instances(InstanceIds=[instance_id])
return response
def create_key_pair():
ec2_client = session.client("ec2")
key_pair = ec2_client.create_key_pair(KeyName=KEYNAME)
private_key = key_pair["KeyMaterial"]
# write private key to file with 400 permissions
with os.fdopen(
os.open("/tmp/aws_ec2_key.pem", os.O_WRONLY | os.O_CREAT, 0o400), "w+"
) as handle:
handle.write(private_key)
``` |
{
"source": "Joel-hanson/salt-lint",
"score": 2
} |
#### File: tests/unit/TestYamlHasOctalValueRule.py
```python
import unittest
from saltlint import RulesCollection
from saltlint.rules.YamlHasOctalValueRule import YamlHasOctalValueRule
from tests import RunFromText
GOOD_NUMBER_LINE = '''
testdirectory:
file.recurse:
- name: /tmp/directory
- file_mode: 700
- dir_mode: '0775'
testdirectory02:
file.recurse:
- name: /tmp/directory02
- file_mode: 0
- dir_mode: "0775"
'''
BAD_NUMBER_LINE = '''
testdirectory:
file.recurse:
- name: /tmp/directory001 # shouldn't fail
- mode: 0 # shouldn't fail
- file_mode: 00 # should fail
- dir_mode: 0700 # should fail
'''
class TestFileModeLeadingZeroRule(unittest.TestCase):
collection = RulesCollection()
def setUp(self):
self.collection.register(YamlHasOctalValueRule())
def test_statement_positive(self):
runner = RunFromText(self.collection)
results = runner.run_state(GOOD_NUMBER_LINE)
self.assertEqual(0, len(results))
def test_statement_negative(self):
runner = RunFromText(self.collection)
results = runner.run_state(BAD_NUMBER_LINE)
self.assertEqual(2, len(results))
``` |
{
"source": "JoelHaubold/NzmLabeling",
"score": 3
} |
#### File: JoelHaubold/NzmLabeling/pickle_plotting_v2.py
```python
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
import os
import numpy as np
def get_file_paths(file_directory):
file_paths = os.listdir(file_directory)
file_paths = list(filter(lambda f_path: os.path.isdir(file_directory / f_path), file_paths))
return file_paths
def plot_day(plot_directory, df_phases_day, sdp_name, start_time, df_comparison_values, plot_method, comparison_label):
sdp_directory = plot_directory / sdp_name
if not os.path.exists(sdp_directory):
os.makedirs(sdp_directory)
plt.figure(1)
plt.ylabel('Phases')
p_counter = 1
relevant_plot = False
transgressions_sum = 0
for df_p_day in df_phases_day:
if not df_p_day.empty:
transgressions = plot_method(df_p_day, p_counter)
transgressions_sum += transgressions
relevant_plot = relevant_plot or transgressions > 0
p_counter = p_counter + 1
if relevant_plot and not df_comparison_values.empty:
df_comparison_values.plot(figsize=(24, 6), linewidth=0.5, color='grey', label=comparison_label)
if relevant_plot:
legend = plt.legend(fontsize='x-large', loc='lower left')
for line in legend.get_lines():
line.set_linewidth(4.0)
plot_path = plot_directory / sdp_name / start_time
if relevant_plot:
plt.savefig(plot_path)
plt.close(1)
if transgressions_sum > 0:
print(start_time)
print(transgressions_sum)
return transgressions_sum
def plot_pickle_daywise(pickle_directory, plot_directory, plot_method, comparison_series_func):
transgression_sum = 0
nmbr_elements_sum = 0
file_paths = get_file_paths(pickle_directory)
print(file_paths)
for path in file_paths:
print(path)
comparison_label, df_comparison_values = comparison_series_func(path)
# df_mean_values = pd.read_pickle(pickle_directory/(path+'season_aggregation')).sort_index()
path = pickle_directory / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
nmbr_elements_sum += sum(map(lambda df: df.shape[0], df_phases))
day = pd.Timedelta('1d')
min_date = min(list(map(lambda df: df.index.min(), df_phases))).date()
max_date = max(list(map(lambda df: df.index.max(), df_phases))).date()
print(min_date)
print(max_date)
for start_time in pd.date_range(min_date, max_date, freq='d'):
end_time = start_time + day
# df_day = df.loc[df.index>start_time and df.index<end_time, :]
df_phases_day = list(map(lambda df: df.loc[start_time:end_time], df_phases))
df_comparison_values_day = df_comparison_values.loc[start_time:end_time]
# print(start_time.date())
transgression_sum += plot_day(plot_directory, df_phases_day, path.name, str(start_time.date()),
df_comparison_values_day, plot_method, comparison_label)
return transgression_sum, nmbr_elements_sum
def plot_station_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("StationDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_station_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.StationDif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "meanStationAverage", pd.read_pickle(pickle_directory / 'meanStationValues')
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_station_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_phase_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("PhaseDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_station_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.phase_dif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_station_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_season_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
# anomaly_threshold = 3.2270145810536146
plot_directory = base_plot_directory / ("SeasDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_season_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.SeasDif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "meanSeasonalAverage", pd.read_pickle(
pickle_directory / (station_name + 'season_aggregation')).sort_index()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_season_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_trafo_dif_anomalies(pickle_directory, base_plot_directory):
anomaly_threshold = 1.5
plot_directory = base_plot_directory / ("TrafoDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_trafo_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.Value.diff()) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_trafo_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_trafo_dif_anomalies_v2(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("TrafoDif_v2_" + str(anomaly_threshold).replace(".", "_"))
def plot_trafo_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.trafo) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_trafo_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_time_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("TimeDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_time_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.time_passed) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_time_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def get_quintiles(pickle_directory, quantile):
file_paths = get_file_paths(pickle_directory)
print(file_paths)
aggregated_series = pd.Series()
for path in file_paths:
print(path)
path = pickle_directory / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
for df_p in df_phases:
ser = df_p.time_passed.reset_index(drop=True).abs()
aggregated_series = aggregated_series.append(ser, ignore_index=True)
threshold = aggregated_series.quantile(q=quantile)
print(threshold)
return threshold
def show_df2(pickle_name, pickle_dir=Path('pickles')):
path = pickle_dir / pickle_name
df_phases_h = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
# df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
df_p_h = df_phases_h[0][['Value']].rename(columns={'Value': 'p1'}).loc[
pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['p2'] = df_phases_h[1][['Value']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['p3'] = df_phases_h[2][['Value']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t1'] = df_phases_h[0][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t2'] = df_phases_h[1][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t3'] = df_phases_h[2][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_dif = pd.DataFrame()
df_p_dif['p1'] = df_p_h['p1'].diff() / df_p_h['p1'].index.to_series().diff().dt.total_seconds()
df_p_dif['p2'] = df_p_h['p2'].diff() / df_p_h['p2'].index.to_series().diff().dt.total_seconds()
df_p_dif['p3'] = df_p_h['p3'].diff() / df_p_h['p3'].index.to_series().diff().dt.total_seconds()
df_p_dif_a = df_p_dif.loc[abs(df_p_dif['p1']) >= 0.15].loc[abs(df_p_dif['p2']) >= 0.15].loc[
abs(df_p_dif['p3']) >= 0.15]
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df_p_dif_a)
print(df_p_h)
def show_df(pickle_name, pickle_dir=Path('pickles')):
path = pickle_dir / pickle_name
df_phases_h = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
# df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
df_p_h = df_phases_h[0][['Value']].rename(columns={'Value': 'p1'}).loc[
pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['p2'] = df_phases_h[1][['Value']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['p3'] = df_phases_h[2][['Value']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['t1'] = df_phases_h[0][['trafo']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['t2'] = df_phases_h[1][['trafo']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['t3'] = df_phases_h[2][['trafo']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_dif = pd.DataFrame()
df_p_dif['p1'] = df_p_h['p1'].diff() / df_p_h['p1'].index.to_series().diff().dt.total_seconds()
df_p_dif['p2'] = df_p_h['p2'].diff() / df_p_h['p2'].index.to_series().diff().dt.total_seconds()
df_p_dif['p3'] = df_p_h['p3'].diff() / df_p_h['p3'].index.to_series().diff().dt.total_seconds()
df_p_dif_a = df_p_dif.loc[abs(df_p_dif['p1']) >= 0.15].loc[abs(df_p_dif['p2']) >= 0.15].loc[
abs(df_p_dif['p3']) >= 0.15]
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df_p_dif_a)
print(df_p_h[['p1', 'p2', 'p3']])
def construct_overview2():
file_paths = os.listdir("./../pickles")
df_ps = []
for fp in file_paths:
path = Path("./../pickles") / fp
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
df_ps.append(df_phases)
df_table = pd.DataFrame(columns=["Messungszeitraum [d]", "MA Median [s]", "MA Mean [s]", "Max U [V]",
"Min U [V]", "Average U [V]"])
for df_phases in df_ps:
time_dif = pd.Series()
voltage = pd.Series()
for df_p in df_phases:
time_dif = time_dif.append(df_p['time_passed'], ignore_index=True)
voltage = voltage.append(df_p["Value"], ignore_index=True)
med_time_dif = time_dif.median()
mean_time_dif = time_dif.mean()
voltage_min = min(voltage)
voltage_max = max(voltage)
voltage_mean = voltage.mean()
length = (df_phases[0].index[-1] - df_phases[0].index[0]).days
name = df_phases[0]["ServiceDeliveryPoint"][0]
name = name[-4:]
df_table = df_table.append(pd.Series(name=name,
data={"MA Median [s]": med_time_dif, "MA Mean [s]":mean_time_dif,
"Messungszeitraum [d]": length, "Max U [V]":voltage_max,
"Min U [V]":voltage_min, "Average U [V]":voltage_mean}))
df_table1 = df_table.copy()
df_table.index.name = "Station"
# df_t = df_table.astype("object").copy()
print("x")
def construct_overview():
file_paths = os.listdir("./../pickles")
df_ps = []
for fp in file_paths:
path = Path("./../pickles") / fp
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
df_ps.append(df_phases)
n_p = 0
df_table = pd.DataFrame(columns=["Datenpunkte", "Sprunga.", "Zeita.", "Phasena.", "Saisona.",
"Stationsa.", "Messungszeitraum [d]", "Messungsabstand [s]"])
tr, ph, st, se, ti = 0, 0, 0, 0, 0
for df_phases in df_ps:
n_tr = 0
n_ph = 0
n_st = 0
n_se = 0
n_ti = 0
time_dif = pd.Series()
n_p_h = 0
for df_p in df_phases:
n_tr = n_tr + df_p[abs(df_p['trafo']) > 0.1].shape[0]
n_ph = n_ph + df_p[abs(df_p['phase_dif']) > 7.34].shape[0]
n_st = n_st + df_p[abs(df_p['StationDif']) > 8.772].shape[0]
n_se = n_se + df_p[abs(df_p['SeasDif']) > 5.87].shape[0]
n_ti = n_ti + df_p[abs(df_p['time_passed']) > 179].shape[0]
n_p = n_p + df_p.shape[0]
n_p_h = n_p_h + df_p.shape[0]
print(n_tr)
time_dif = time_dif.append(df_p['time_passed'], ignore_index=True)
med_time_dif = time_dif.median()
length = (df_phases[0].index[-1] - df_phases[0].index[0]).days
name = df_phases[0]["ServiceDeliveryPoint"][0]
name = name[-4:]
df_table = df_table.append(pd.Series(name=name,
data={"Datenpunkte": n_p_h, "Sprunga.": n_tr, "Zeita.": n_ti,
"Phasena.": n_ph, "Saisona.": n_se,
"Stationsa.": n_st, "Messungsabstand [s]": med_time_dif,
"Messungszeitraum [d]": length}))
tr, ti, ph, se, st = tr + n_tr, ti + n_ti, ph + n_ph, se + n_se, st + n_st
df_table1 = df_table.copy()
df_table.drop(columns=["Messungszeitraum [d]", "Messungsabstand [s]"], inplace=True)
df_table.index.name = "Station"
df_table = df_table.append(pd.Series(name="gesamt", data={"Datenpunkte": n_p, "Sprunga.": tr, "Zeita.": ti,
"Phasena.": ph, "Saisona.": se,
"Stationsa.": st}))
df_table = df_table.append(pd.Series(name="anteil", data={"Datenpunkte": n_p / n_p, "Sprunga.": tr / n_p,
"Zeita.": ti / n_p,
"Phasena.": ph / n_p, "Saisona.": se / n_p,
"Stationsa.": st / n_p}))
df_t = df_table.astype("object").copy()
df_t.Datenpunkte = df_t.Datenpunkte.astype("int")
print("x")
def main():
construct_overview2()
# pickle_directory = Path("pickles")
# base_plot_dir = Path("plots")
# quantile = .999
# anomaly_threshold = get_quintiles(pickle_directory, quantile)
# plot_time_dif_anomalies(pickle_directory, base_plot_dir, anomaly_threshold)
# plot_trafo_dif_anomalies_v2(pickle_directory, base_plot_dir, 0.15)
# plot_trafo_dif_anomalies_v2(pickle_directory, base_plot_dir, 0.1)
# show_df('NW00000000000BISMARKSTRASSNV04609', pickle_directory)
# df_table = pd.DataFrame
# ... n_p = 0
# ... for df_phases in df_ps:
# ... n_tr = 0
# ... n_ph = 0
# ... n_st = 0
# ... n_se = 0
# ... n_ti = 0
# ... time_dif = pd.Series()
# ... for df_p in df_ps:
# ... n_tr = n_tr + df_p['trafo'] >0.1
# ... n_ph = n_ph + df_p['phase_dif'] > 7.34
# ... n_st = n_st + df_p['StationDif'] > 8.772
# ... n_se = n_se + df_p['SeasDif'] > 8.772
# ... n_ti = n_ti + df_p['time_passed'] > 179
# ... n_p = n_p + df_p.shape[0]
# ... name = df_p["ServiceDeliveryPoint"][0]
# ... time_dif.append(df_p['time_passed'], ignore_index=True)
# ... med_time_dif = time_dif.median()
# ... length = (df_ps[0].index[-1] - df_ps[0].index[0]).days
# ... df_table.append(pd.Series(name=name),data= {"Sprunganomalien":n_tr,"Zeitanomalien":n_ti,"Phasenanomalien":n_ph, "Saisonanomalien":n_se, "Stationsanomalien":n_st, "Messungsabstand":med_time_dif, "Anzahl Tage":length})
if __name__ == "__main__":
main()
```
#### File: JoelHaubold/NzmLabeling/save_anomalies.py
```python
import pandas as pd
from pathlib import Path
import os
import numpy as np
def calculate_voltage_steps(df_phases):
result_df = pd.DataFrame()
phase_counter = 1
for df_p in df_phases:
steps_up = list(np.where(df_p.Value.diff() > 1)[0])
steps_down = list(np.where(df_p.Value.diff() < -1)[0])
column_name = "1VStepsP"+str(phase_counter)
# column_name_down = "StepDownP" + str(phase_counter)
up_column = {column_name: 'Up'}
down_column = {column_name: 'Down'}
steps_up_df = df_p.iloc[steps_up, :].assign(**up_column)[[column_name]]
steps_down_df = df_p.iloc[steps_down, :].assign(**down_column)[[column_name]]
steps_df = pd.concat([steps_up_df, steps_down_df]).sort_index()
result_df = pd.concat([steps_df, result_df], axis=1).sort_index()
phase_counter = phase_counter + 1
return result_df
def calculate_voltage_range(df_phases, df_sdp):
phase_counter = 1
for df_p in df_phases:
transgressions = list(np.where(df_p.Value > 240)[0])
column_name = "Over240P" + str(phase_counter)
over_column = {column_name: 'Over'}
transgressions_df = df_p.iloc[transgressions, :].assign(**over_column)[[column_name]]
df_sdp = pd.concat([transgressions_df, df_sdp], axis=1).sort_index()
phase_counter = phase_counter + 1
return df_sdp
def calculate_phase_distance(df_phases, df_sdp):
phase_counter = 1
for df_p in df_phases:
transgressions = list(np.where(df_p.Value > 240)[0])
column_name = "Over240P" + str(phase_counter)
over_column = {column_name: 'Over'}
transgressions_df = df_p.iloc[transgressions, :].assign(**over_column)[[column_name]]
df_sdp = pd.concat([transgressions_df, df_sdp], axis=1).sort_index()
phase_counter = phase_counter + 1
return df_sdp
def calculate_anomalies(pickle_directory, excel_file_path):
# print(os.getcwd())
file_paths = os.listdir(pickle_directory)
print(file_paths)
for path in file_paths:
print(path)
path = pickle_directory / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
df_sdp = calculate_voltage_steps(df_phases)
df_sdp = calculate_voltage_range(df_phases, df_sdp)
# excel_writer = pd.ExcelWriter(path=excel_file_path, datetime_format='YYYY-MM-DD HH:MM:SS')
# df_sdp.to_excel(sheet_name=path.name, excel_writer=excel_writer)
csv_path = Path('anomalies') / (path.name+'.csv')
df_sdp.to_csv(path_or_buf=csv_path, sep=';')
# workbook = excel_writer.book
# excel_writer.save()
def main():
pickle_directory = Path("testPickles")
excel_file_path = Path("test.xlsx")
# calculate_anomalies(pickle_directory, excel_file_path)
main()
``` |
{
"source": "joelhaynie/cs760_final_project",
"score": 3
} |
#### File: cs760_final_project/dataset/LearnModel.py
```python
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import keras
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, BatchNormalization, Flatten, Dropout
from keras.layers import LSTM, MaxPooling1D, Conv1D
from keras import regularizers
import sklearn.metrics
# validation set will be 10% of total set
CV_frac = 0.1
# comment out/in the labels you'd like to be filtered from the input and the classification
mood_labels = {
276: "Happy music",
#277: "Funny music",
278: "Sad music",
#279: "Tender music",
#280: "Exciting music",
281: "Angry music",
282: "Scary music",
}
# build mapping of moods indices to 0-based indices
mood_labels_to_ordinals = dict()
mood_ordinals_to_labels = dict()
n = 0
for k in mood_labels.keys():
mood_labels_to_ordinals[k] = n
mood_ordinals_to_labels[n] = k
n += 1
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
max_len = 10
def extract_record_to_xy(record):
''' parses a tfrecord and returns tuple (x,y) where x is the 10x128 feature vector and y is the 0-indexed label '''
tf_seq_example = tf.train.SequenceExample.FromString(record)
example_label = list(np.asarray(tf_seq_example.context.feature['labels'].int64_list.value))
moods = intersection(example_label, mood_labels.keys()) # assume there'll always be a valid label.
if len(moods) == 0:
return None
y = mood_labels_to_ordinals[moods[0]]
n_frames = len(tf_seq_example.feature_lists.feature_list['audio_embedding'].feature)
audio_frame = []
for i in range(n_frames):
audio_frame.append(np.frombuffer(tf_seq_example.feature_lists.feature_list['audio_embedding'].
feature[i].bytes_list.value[0],np.uint8).astype(np.float32))
pad = [np.zeros([128], np.float32) for i in range(max_len - n_frames)]
audio_frame += pad
return audio_frame, y
def logistic_regression_model():
''' Creates a logistic regression model. Used by train_model '''
lr_model = Sequential()
lr_model.add(BatchNormalization(input_shape=(10, 128)))
lr_model.add(Flatten())
lr_model.add(Dense(len(mood_labels), activation='sigmoid'))
# if interested, try using different optimizers and different optimizer configs
lr_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return (lr_model, 'Multiclass Logistic Regression')
def lstm_1layer_model():
''' Creates a 1 layer LSTM model. Used as input to train_model() '''
lstm_model = Sequential()
lstm_model.add(BatchNormalization(input_shape=(10, 128)))
lstm_model.add(Dropout(0.5))
lstm_model.add(LSTM(128, activation='relu',
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l2(0.01)))
lstm_model.add(Dense(len(mood_labels), activation='softmax'))
# if interested, try using different optimizers and different optimizer configs
lstm_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return (lstm_model, 'LSTM 1-Layer')
def lstm_3layer_model():
''' Creates a 3 layer LSTM model. Used as input to train_model() '''
lstm3_model = Sequential()
lstm3_model.add(BatchNormalization(input_shape=(10, 128)))
lstm3_model.add(Dropout(0.5))
lstm3_model.add(LSTM(64, activation='relu',
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l2(0.01),
return_sequences=True))
lstm3_model.add(BatchNormalization())
lstm3_model.add(Dropout(0.5))
lstm3_model.add(LSTM(64, activation='relu',
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l2(0.01),
return_sequences=True))
lstm3_model.add(BatchNormalization())
lstm3_model.add(Dropout(0.5))
lstm3_model.add(LSTM(64, activation='relu',
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l2(0.01)))
lstm3_model.add(Dense(len(mood_labels), activation='softmax'))
# if interested, try using different optimizers and different optimizer configs
lstm3_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return (lstm3_model, '3-layer LSTM')
def nn_model():
''' Creates a traditional nn model. Used as input to train_model() '''
model = Sequential()
model.add(BatchNormalization(input_shape=(10, 128)))
#model.add(Conv1D(20, kernel_size=5, strides=2,
# activation='relu',
# input_shape=(10, 128)))
#nn_model.add(MaxPooling1D(pool_size=2, strides=2))
model.add(Dense(128, activation='sigmoid'))
model.add(Flatten())
model.add(Dense(len(mood_labels), activation='sigmoid'))
model.add(Dense(len(mood_labels), activation='sigmoid'))
# Other layer choices:
#
#nn_model = Sequential()
#nn_model.add(Conv1D(20, kernel_size=5, strides=2,
# activation='relu',
# input_shape=(10, 128)))
##nn_model.add(MaxPooling1D(pool_size=2, strides=2))
##nn_model.add(Conv1D(64, 5, activation='relu'))
#nn_model.add(MaxPooling1D(pool_size=2))
#nn_model.add(Dense(128, activation='relu', input_shape=(10, 128)))
##nn_model.add(MaxPooling1D(pool_size=2))
#nn_model.add(Dense(128, activation='relu'))
#nn_model.add(Flatten())
##nn_model.add(Dense(len(mood_labels), activation='relu'))
#nn_model.add(Dense(len(mood_labels), activation='softmax'))
# if interested, try using different optimizers and different optimizer configs
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return (model, '2-layer NN')
def print_label_stats(name, records):
''' prints the statistical breakdown of training and validation sets '''
ret = dict()
for m in mood_ordinals_to_labels.keys():
ret[m] = 0
tot = len(records)
for rec in records:
ret[rec[1]] += 1
print(name + ' stats:')
for m in ret.keys():
v = ret[m]
print(' %15s: %6d (%.3f)' % (mood_labels[mood_ordinals_to_labels[m]], v, v / tot))
print(' %15s: %6d (%.3f)\n' % ('total', tot, tot / tot))
def train_model(model, train_records, validate_records, batch_size_train, batch_size_validate, epochs):
''' Perform learning on the records in tf_infile, using 90/10 cross validation
model - keras model to train
train_records - instances to train with
validate_records - instances to valiate with
batch_size_train - size of batches for training sets
batch_size_validate - size of batches for validation
epochs - number of epochs to run
'''
# learn the model
num_classes = len(mood_labels)
num_train_recs = len(train_records)
num_val_recs = len(validate_records)
num_recs = num_train_recs + num_val_recs
validation_data = (np.asarray([r[0] for r in validate_records]),
np.asarray([keras.utils.to_categorical(r[1], num_classes) for r in validate_records]))
history = model.fit(x=np.asarray([r[0] for r in train_records]),
y=np.asarray([keras.utils.to_categorical(r[1], num_classes) for r in train_records]),
batch_size=batch_size_train,
epochs=epochs,
verbose=1,
validation_data=validation_data)
return (model, history)
def plot_epochs(history, title):
# plot the results
plt.rcParams['figure.figsize'] = (12,8)
plt.plot(history.history['acc'], label='training accuracy')
plt.plot(history.history['val_acc'], label='validation accuracy')
plt.legend()
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.title(title)
plt.show()
def load_records(tf_infile):
# load the records
print("Loading records from '" + tf_infile + "'...")
records = []
for t in tf.python_io.tf_record_iterator(tf_infile):
r = extract_record_to_xy(t)
if r:
records.append(r)
print("Loaded %d records" % len(records))
return records
def show_confusion_matrix(model, records):
predictions = model.predict_on_batch(np.asarray([r[0] for r in records]))
conf = sklearn.metrics.confusion_matrix([r[1] for r in records], [np.argmax(p) for p in predictions])
print("Confusion matrix:")
print(conf)
conf = conf / len(records)
print(conf)
def main():
# choose an input file by commenting in/out
input_file = 'moods_unbalanced_subset_15615recs.tfrecord'
#input_file = 'moods_balanced_subset_401recs.tfrecord'
records = load_records(input_file)
num_records = len(records)
split = int((1-CV_frac) * num_records)
train_records = records[0:split]
validate_records = records[split:-1]
print_label_stats("train", train_records)
print_label_stats("validate", validate_records)
# train, or load model...comment out/in as desired.
train = True
if train:
# pick a model by changing the function on the next line
#(model, title) = logistic_regression_model()
#(model, title) = lstm_1layer_model()
(model, title) = lstm_3layer_model()
#(model, title) = nn_model()
(model, history) = train_model(model, train_records, validate_records, 32, min(len(validate_records), 128), 60)
model.save(title + '_most_recent.h5')
else:
infile = '3-layer LSTM_most_recent.h5'
print("Loading model: " + infile)
model = keras.models.load_model(infile)
show_confusion_matrix(model, validate_records)
if train:
plot_epochs(history, title)
# Un-comment the model you are interested in...
# logistic regression, small balanced data set:
#train_model(logistic_regression_model(), 'moods_balanced_subset_401recs.tfrecord', 32, 10, 100, 'Models/LogReg_401_20_20_100.h5')
# logistic regression, large unbalanced data set:
#train_model(LogisticRegressionModel(), 'moods_unbalanced_subset_15615recs.tfrecord', 32, 128, 100, 'Models/LogReg_15615_32_128_30.h5')
# LSTM 1-layer, small balanced data set:
#train_model(lstm_1layer_model(), 'moods_balanced_subset_401recs.tfrecord', 20, 10, 100, 'Models/LSTM1_401_20_20_100.h5')
# LSTM 1-layer, large unbalanced data set:
#train_model(lstm_1layer_model(), 'moods_unbalanced_subset_15615recs.tfrecord', 32, 128, 100, 'Models/LSTM1_15615_32_128_30.h5')
# LSTM 3-layer, small balanced data set:
#train_model(lstm_3layer_model(), 'moods_balanced_subset_401recs.tfrecord', 20, 10, 200, 'Models/LSTM3_401_20_20_100.h5')
# LSTM 3-layer, large unbalanced data set:
#train_model(lstm_3layer_model(), 'moods_unbalanced_subset_15615recs.tfrecord', 32, 128, 100, 'Models/LSTM3_15615_32_128_30.h5')
# NN, small balanced data set:
#train_model(nn_model(), 'moods_balanced_subset_401recs.tfrecord', 32, 10, 250, 'Models/NN_401_20_20_100.h5')
# NN, large unbalanced data set:
#train_model(nn_model(), 'moods_unbalanced_subset_15615recs.tfrecord', 32, 128, 100, 'Models/NN_15615_32_128_30.h5')
if __name__ == "__main__":
main()
``` |
{
"source": "Joel-H-dot/MPh",
"score": 2
} |
#### File: MPh/tests/test_config.py
```python
"""Tests the `config` module."""
__license__ = 'MIT'
########################################
# Dependencies #
########################################
import parent # noqa F401
import mph
from sys import argv
import logging
########################################
# Tests #
########################################
def test_option():
assert 'session' in mph.option()
assert 'platform-dependent' in mph.option().values()
assert mph.option('session') == 'platform-dependent'
mph.option('session', 'something else')
assert mph.option('session') == 'something else'
mph.option('session', 'platform-dependent')
########################################
# Main #
########################################
if __name__ == '__main__':
arguments = argv[1:]
if 'log' in arguments:
logging.basicConfig(
level = logging.DEBUG if 'debug' in arguments else logging.INFO,
format = '[%(asctime)s.%(msecs)03d] %(message)s',
datefmt = '%H:%M:%S')
test_option()
``` |
{
"source": "joelhed/pykattis",
"score": 3
} |
#### File: pykattis/pykattis/cli.py
```python
import sys
import argparse
import pkg_resources
from pathlib import Path
from .core import Problem
DESCRIPTION = """\
A CLI tool for solving Kattis problems with python.
"""
def print_err(*args, **kwargs):
"""Print to stderr."""
print(*args, **kwargs, file=sys.stderr)
def print_with_value(message: str, value: str):
"""Print the given message and value, possibly separated by a newline."""
stripped_value = value.strip()
end = "\n" if "\n" in stripped_value else "\t"
print(message, end=end)
print(stripped_value)
class ProblemCommand:
"""A cli command that deals with a Kattis problem."""
command_name = None
def __init__(self):
"""Initialize the command."""
def __call__(self, args):
"""Run the command with the given argparse arguments."""
problem = Problem(args.problem_id)
return self.run(problem, args)
def create_parser(self, subparsers):
"""Create a parser for the problem."""
if self.__class__.command_name is None:
raise NotImplementedError
parser = subparsers.add_parser(
self.__class__.command_name, description=self.__class__.__doc__
)
parser.add_argument("problem_id", help="The Kattis problem ID")
parser.set_defaults(func=self)
return parser
def run(self, problem, args):
"""Run the command.
This should raise for errors, but can optionally return an exit code.
"""
raise NotImplementedError
class CreateCommand(ProblemCommand):
"""Create a problem directory from a template, and download the samples."""
command_name = "create"
def create_parser(self, *args, **kwargs):
parser = super().create_parser(*args, **kwargs)
parser.add_argument(
"--overwrite",
action="store_true",
help="Overwrite the contents of the solution directory",
)
return parser
def run(self, problem, args):
problem.create_directory()
# TODO: Extract
solution_path = problem.package_path / "solution.py"
if solution_path.exists() and not args.overwrite:
print_err("Solution already exists. Continuing...")
else:
print_err(f"Writing template solution file '{solution_path}'")
solution_template = pkg_resources.resource_string(
__name__, "solution_template.py"
).decode("utf8")
with solution_path.open("w") as f:
f.write(solution_template.format(problem=problem))
problem.samples.download()
problem.samples.save()
class RunCommand(ProblemCommand):
"""Run the solution program."""
command_name = "run"
def run(self, problem, args):
input_ = sys.stdin.read()
answer = problem.solution_module.solve(input_)
print(answer)
class TestCommand(ProblemCommand):
"""Run a solution on its defined sample inputs and answers."""
command_name = "test"
def run(self, problem, args):
for input_, expected_answer in problem.samples:
print_with_value("Solving with input:", input_)
answer = problem.solution_module.solve(input_)
if answer.strip() == expected_answer.strip():
print_with_value("Success! Output was:", answer)
else:
print("Failure")
print_with_value("Expected answer:", expected_answer)
print_with_value("Actual answer:", answer)
class DownloadSamplesCommand(ProblemCommand):
"""Download the problem's samples."""
command_name = "download_samples"
def create_parser(self, *args, **kwargs):
parser = super().create_parser(*args, **kwargs)
parser.add_argument(
"out",
type=argparse.FileType("w", encoding="utf8"),
nargs="?",
default="-",
help="The output file, default stdout",
)
def run(self, problem, args):
problem.samples.download()
problem.samples.save(args.out)
def main():
"""The main entry point of the program.
Returns the program's exit code.
"""
commands = [
CreateCommand(),
RunCommand(),
TestCommand(),
DownloadSamplesCommand(),
]
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.set_defaults(func=lambda args: parser.print_help())
subparsers = parser.add_subparsers()
for command in commands:
command.create_parser(subparsers)
args = parser.parse_args()
sys.path.append(str(Path.cwd()))
try:
exit_code = args.func(args)
except ValueError as e:
print_err(e)
return 1
return exit_code if exit_code is not None else 0
``` |
{
"source": "JoelHernandez343/networking-administration",
"score": 3
} |
#### File: app/views/routing.py
```python
from database import models, manage
def build_routing(db):
routes = [
{"name": "Inicio", "route": "/"},
{"name": "Topología", "route": "/topology"},
{"name": "Routers", "route": "/routers"},
{"name": "Acerca de", "route": "/about"},
]
routers = []
for router in manage.router.get_all(db):
r = {
"ip": router["ip_max"],
"hostname": router["hostname"],
"route": f"/routers/{router['ip_max']}",
}
interfaces = []
for interface in router["interfaces"]:
i = {
"name": interface["name"],
"route": f"{r['route']}/{interface['name']}",
}
interfaces.append(i)
r["interfaces"] = interfaces
routers.append(r)
routes[2]["routers"] = routers
return routes
```
#### File: database/manage/__init__.py
```python
from database import models
from database.database import engine
from database.manage import user as user
from database.manage import router as router
from database.manage import interface as interface
def drop_db():
models.Base.metadata.drop_all(bind=engine)
def create_db():
models.Base.metadata.create_all(bind=engine)
def recreate_db():
drop_db()
create_db()
```
#### File: database/manage/router.py
```python
from database import models
from database.manage import interface, user
from networking import net
def _get_max_ip(interfaces):
ip_max_n = 0
ip_max = None
for i in interfaces:
if not i["is_active"]:
continue
if ip_max_n < net.aton(i["ip"]):
ip_max_n = net.aton(i["ip"])
ip_max = i["ip"]
return ip_max
def get_all(db):
routers = db.query(models.Router).all()
return [get(db, r.to_dict()["ip_max"]) for r in routers]
def get(db, router_id):
router = db.query(models.Router).get(router_id)
if router is None:
return None
router_dict = router.to_dict()
router_dict["interfaces"] = interface.get_from(db, router_id)
router_dict["active_interfaces"] = len(
[i for i in router.interfaces if i.is_active]
)
router_dict["users"] = [u.to_dict() for u in router.users]
return router_dict
def add_w_interfaces_n_users(db, router, interfaces, users):
ip_max = _get_max_ip(interfaces)
r = models.Router(
ip_max=ip_max,
hostname=router["name"],
brand=router["brand"],
os=router["os"],
)
db.add(r)
db.add_all(interface.create_multiple(ip_max, interfaces))
db.add_all(user.create_multiple(ip_max, users))
db.commit()
def update_id(db, router_id):
r = db.query(models.Router).get(router_id)
new_ip_max = _get_max_ip([i.to_dict() for i in r.interfaces])
if r is None:
return False
interface.update_id(db, new_ip_max, r)
r.ip_max = new_ip_max
db.add(r)
db.commit()
def modify(db, router_id, hostname=""):
r = db.query(models.Router).get(router_id)
if r is None:
return False
if hostname != "":
r.hostname = hostname
db.add(r)
db.commit()
return True
def delete(db, router_id):
router = db.query(models.Router).get(router_id)
if router is None:
return False
for i in router.interfaces:
db.delete(i)
for u in router.users:
db.delete(u)
db.delete(router)
db.commit()
return True
```
#### File: src/networking/shared.py
```python
from networking.graph import Graph
username = "admin"
password = "<PASSWORD>"
topology = Graph()
visited = []
hostname = ""
pending = []
lan = 0
def log(message):
print(f"[{hostname}] {message}")
```
#### File: app/views/configure_routing.py
```python
import time, json
from flask import render_template, request, jsonify, make_response
from app import app
from router_configuration import configure
@app.route("/configure_routing")
def configure_routing():
return render_template("configure_routing.html")
@app.route("/configure_routing/configure", methods=["POST"])
def configuring():
req = request.get_json()
print(req)
try:
configure()
return make_response(jsonify(req), 200)
except Exception as e:
obj = {"message": str(e)}
return make_response(json.dumps(obj), 500)
```
#### File: src/router_configuration/shared.py
```python
username = "admin"
password = "<PASSWORD>"
name = ""
def set_name(router0, router1=""):
global name
if router1 == "":
name = "[" + router0 + "] "
else:
name = "[" + router0 + "] [" + router1 + "] "
```
#### File: src/app/__init__.py
```python
from flask import Flask, _app_ctx_stack
from sqlalchemy.orm import scoped_session
from database.manage import recreate_db
from database.database import SessionLocal, engine
recreate_db()
app = Flask(__name__)
app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 0
app.session = scoped_session(SessionLocal, scopefunc=_app_ctx_stack.__ident_func__)
app.user = None
app.password = <PASSWORD>
@app.teardown_appcontext
def remove_session(*args, **kwargs):
print("Bye!")
app.session.remove()
from app.views import static_views
from app.views import dynamic_views
from app import requests
```
#### File: database/manage/__init__.py
```python
from database import models
from database.database import engine
from database.manage import interface
from database.manage import vlan
def drop_db():
models.Base.metadata.drop_all(bind=engine)
def create_db():
models.Base.metadata.create_all(bind=engine)
def recreate_db():
drop_db()
create_db()
```
#### File: database/manage/router.py
```python
from .. import models
from . import interface
from networking.ssh import tools
def get_all(db):
routers = db.query(models.Router).all()
res = []
for router in routers:
r = router.to_dict()
r["interfaces"] = [i.to_dict() for i in router.interfaces]
res.append(r)
return res
def get(db, router_id):
router = db.query(models.Router).get(router_id)
if router is None:
return None
res = router.to_dict()
res["interfaces"] = [i.to_dict() for i in router.interfaces]
return res
def add(db, router, interfaces):
ip_max = tools.get_max_ip(interfaces)
r = models.Router(
ip_max=ip_max,
hostname=router["hostname"],
sys_desc=router["sysDescr"],
sys_contact=router["sysContact"],
sys_name=router["sysName"],
sys_location=router["sysLocation"],
accesible_ip=router["accesible_ip"],
)
db.add(r)
db.add_all(interface.create_multiple(ip_max, interfaces))
db.commit()
return get(db, ip_max)
def modify(db, router_id, hostname=""):
router = db.query(models.Router).get(router_id)
if router is None:
return False
if hostname == "":
raise Exception("Hostname must not be empty")
router.hostname = hostname
db.add(router)
db.commit()
return True
```
#### File: networking/snmp/convert.py
```python
def status(status):
try:
status = int(str(status))
if status == 1:
return "up"
if status == 2:
return "down"
if status == 3:
return "testing"
return "unknown"
except:
return "unknown"
def mac(raw_str):
return ":".join("{:02x}".format(ord(c)) for c in raw_str)
```
#### File: networking/snmp/information.py
```python
from . import convert, oids
from .tools import snmp_query
from ..ssh import tools
community = "ro_4CM1"
def get_sys_info(ip):
info = {
"sysDescr": snmp_query(ip, community, oids.DESCR_OID),
"sysContact": snmp_query(ip, community, oids.CONTACT_OID),
"sysName": snmp_query(ip, community, oids.HOSTNAME_OID),
"sysLocation": snmp_query(ip, community, oids.LOCATION_OID),
}
info["hostname"] = info["sysName"].split(".")[0]
return info
def get_if_inout(ip, n):
return {
"ifInOctets": snmp_query(ip, community, f"{oids.INTERFACE_OID}.10.{n}"),
"ifOutOctets": snmp_query(ip, community, f"{oids.INTERFACE_OID}.16.{n}"),
"ifInUcastPkts": snmp_query(ip, community, f"{oids.INTERFACE_OID}.11.{n}"),
"ifOutUcastPkts": snmp_query(ip, community, f"{oids.INTERFACE_OID}.17.{n}"),
}
def get_if_info(ip, n):
return {
"ifDescr": tools.translate_to_flask(
snmp_query(ip, community, f"{oids.INTERFACE_OID}.2.{n}")
),
"ifMtu": snmp_query(ip, community, f"{oids.INTERFACE_OID}.4.{n}"),
"ifSpeed": snmp_query(ip, community, f"{oids.INTERFACE_OID}.5.{n}"),
"ifPhysAddress": convert.mac(
snmp_query(ip, community, f"{oids.INTERFACE_OID}.6.{n}")
),
"ifAdminStatus": convert.status(
snmp_query(ip, community, f"{oids.INTERFACE_OID}.7.{n}")
),
"ifOperStatus": convert.status(
snmp_query(ip, community, f"{oids.INTERFACE_OID}.8.{n}")
),
"mibIndex": n,
}
def get_interfaces(ip):
interfaces = []
number = int(snmp_query(ip, community, oids.INTNUMBER_OID)) + 1
for i in range(number):
interface = get_if_info(ip, i + 1)
if interface["ifDescr"] != "Null0" and interface["ifDescr"] != "":
interfaces.append(interface)
return interfaces
def check_lost_percentage(interface_source, interface_dest, percentage):
info_dest = get_if_inout(interface_dest["ip"], interface_dest["mib_index"])
info_source = get_if_inout(interface_source["ip"], interface_source["mib_index"])
print(info_dest, info_source)
lost_packages = int(info_source["ifOutUcastPkts"]) - int(info_dest["ifInUcastPkts"])
lost_percentage = abs(lost_packages * 100 / int(info_source["ifOutUcastPkts"]))
print(lost_packages, lost_percentage, percentage, info_source["ifOutUcastPkts"])
return (lost_percentage >= percentage, lost_percentage)
```
#### File: networking/ssh/configuration.py
```python
from . import common
def send_commands(session, commands, exits=0):
for _ in range(0, exits):
commands.append("exit")
for command in commands:
log(command)
session.sendline(command)
session.expect("#")
def log(message):
print(f"[{common.current_device}] {message}")
```
#### File: networking/ssh/connection.py
```python
from pexpect import pxssh
from . import common, tools
def create(ip, user):
session = pxssh.pxssh()
print(f"Connecting to {ip} ...")
session.login(ip, user["name"], user["password"], auto_prompt_reset=False)
session.sendline("term length 0")
session.expect("#")
print(f"Connected to {ip}")
common.current_device = tools.get_hostname(session.before)
return session
``` |
{
"source": "joelhochstetter/qChain",
"score": 3
} |
#### File: joelhochstetter/qChain/majorana.py
```python
import numpy as np
from cmath import sqrt
import qutip as qt
from operators import *
tol = 1e-16
def solvePoly(vec):
roots = np.empty(2, dtype=np.complex128)
vec[1]=2*vec[1]
if abs(vec[0]) <= tol:
roots[0] = np.inf
if abs(vec[1]) <= tol:
roots[1] = np.inf
else:
roots[1] = -vec[2]/vec[1]
else:
roots[0] = -0.5*(vec[1]+sqrt(vec[1]**2-4*vec[0]*vec[2]))/vec[0]
roots[1] = -vec[1]/vec[0] - roots[0]
return roots
def root_to_xyz(root):
if root == np.inf:
return [0,0,1]
x = root.real
y = root.imag
den = 1/(1.+(x**2)+(y**2))
return [2*x*den,2*y*den, (1.-(x**2)+(y**2))*den]
def getStars(vec):
#converts 3-spinor into two stars
roots = np.empty(2, dtype=np.complex128)
stars = [[],[],[]] #stores x, y and z coordinates
vec[1] *= -np.sqrt(2)
if abs(vec[0]) <= tol:
roots[0] = np.inf
if abs(vec[1]) <= tol:
roots[1] = np.inf
else:
roots[1] = -vec[2]/vec[1]
else:
roots[0] = -0.5*(vec[1] + sqrt(vec[1]**2-4*vec[0]*vec[2]))/vec[0]
roots[1] = -vec[1]/vec[0] - roots[0]
for r in roots:
if r == np.inf:
stars[0].append(0)
stars[1].append(0)
stars[2].append(-1)
else:
x = r.real
y = r.imag
den = 1/(1.+(x**2)+(y**2))
stars[0].append(2*x*den)
stars[1].append(2*y*den)
stars[2].append((1.-(x**2)-(y**2))*den)
return stars
print(getStars([1,0,1]))
b = qt.Bloch()
b.point_color = ['b','b','r','r','g','g','#CC6600','#CC6600'] #ensures point and line are same colour
b.add_points(getStars([1,sqrt(2),1]))
#b.add_points(getStars([1/sqrt(2),0,1/sqrt(2)]),meth='l')
b.xlabel = ['$<F_x>$','']
b.ylabel = ['$<F_y>$','']
b.zlabel = ['$<F_z>$','']
#b.add_points([[0,0],[-1,1],[0,0]], meth='l')
#b.add_points([[-1,1],[0,0],[0,0]], meth='l')
#b.add_points([0,0])
#b.add_points([0,0,-1])
b.show()
``` |
{
"source": "joelibaceta/backend-codigo-10",
"score": 3
} |
#### File: sesion11/models/movie.py
```python
from sesion11.app import db
class Movie(db.Model):
id = db.Column("idMovie", db.Integer, primary_key = True)
title = db.Column("Title", db.String(45))
plot = db.Column("Plot", db.String(250))
year = db.Column("Year", db.Integer)
length = db.Column("Length", db.Integer)
director = db.Column("Director", db.String(45))
background_image = db.Column("BackgroundImage", db.String(250))
cover_image = db.Column("CoverImage", db.String(250))
main_color = db.Column("MainColor", db.String(45))
def create_movie(**kwargs):
new_movie = Movie(**kwargs)
db.session.add(new_movie)
db.session.commit()
```
#### File: todolist/core/views.py
```python
from django.shortcuts import redirect, render
from django.views import View
from core.models import Task
from core.forms import TaskModelForm
from django.views.generic import CreateView
# Create your views here.
class TasksView(View):
def get(self, request):
tasks = Task.objects.all()
context = {
"tasks": tasks
}
return render(request, "index.html", context)
def post(self, request):
data = request.POST
task = Task(title=data["title"])
task.save()
return redirect("/tasks/")
class TaskView(View):
def get(self, request, pk):
task = Task.objects.get(pk=pk)
#form = TaskModelForm(initial={"title": task.title, "description": task.description})
form = TaskModelForm(instance=task)
context = {
"task": task,
"form": form
}
return render(request, "edit.html", context)
def post(self, request, pk):
task = Task.objects.get(pk=pk)
data = request.POST
print(data)
task.title = data["title"]
task.description = data["description"]
task.due_date = data["due_date"]
task.save()
return redirect("/tasks/")
class TaskCreateView(CreateView):
model = Task
fields = '__all__'
def get_success_url(self):
return "/tasks/"
```
#### File: bookings/core/viewsets.py
```python
from rest_framework import serializers, viewsets
from rest_framework import response
from rest_framework.response import Response
from rest_framework.authtoken.models import Token
from rest_framework import status
from core.models import Booking
from core.serializers import BookingsSerializer
from core.serializers import UserLoginSerializer
class BookingViewSet(viewsets.ViewSet):
"""API de reservas"""
def list(self, request):
if request.user.is_anonymous:
return Response({}, status.HTTP_403_FORBIDDEN)
bookings = Booking.objects.filter(user_id=request.user.id)
serializer = BookingsSerializer(bookings, many=True)
return Response(serializer.data)
def create(self, request):
data = request.data
if "user_id" in data:
return Response({
"error": "user_id param is not allowed"
}, status.HTTP_400_BAD_REQUEST)
if not request.user.is_anonymous:
data["user_id"] = request.user.id
serializer = BookingsSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors)
class UserViewSet(viewsets.ViewSet):
def login(self, request):
data = request.data
serializer = UserLoginSerializer(data=data)
if serializer.is_valid():
user, token = serializer.save()
data = {
'user': user.first_name,
'access_token': token
}
return Response(data)
else:
return Response(serializer.errors, status=status.HTTP_401_UNAUTHORIZED)
def logout(self, request):
user = request.user
if user.is_anonymous:
return Response({}, status.HTTP_401_UNAUTHORIZED)
token = Token.objects.filter(user_id=user.id)
token.delete()
return Response({
"status": "OK"
})
```
#### File: todolist/core/viewsets.py
```python
from rest_framework import serializers, viewsets
from rest_framework.response import Response
from rest_framework import status
from core.serializers import TaskSerializer, UserLoginSerializer
from core.models import Task
class UserViewset(viewsets.ViewSet):
def login(self, request):
data = request.data
serializer = UserLoginSerializer(data=data)
if serializer.is_valid():
user, token = serializer.save()
data = {
'username': f"{user.first_name} {user.last_name}",
'access_token': token
}
return Response(data)
else:
return Response(serializer.errors)
class TaskViewSet(viewsets.ViewSet):
def list(self, request):
user = request.user
if user.is_anonymous:
return Response({"error": "not found"}, status.HTTP_404_NOT_FOUND)
else:
tasks = Task.objects.filter(user_id=user.id)
serializer = TaskSerializer(tasks, many=True)
return Response(serializer.data)
def create(self, request):
data = request.data
print(request.user)
if not request.user.is_anonymous:
data["user_id"] = request.user.id
else:
return Response({"error": "Invalid Token!"}, status.HTTP_401_UNAUTHORIZED)
serializer = TaskSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
else:
return Response(serializer.errors)
def update(self, request, pk):
if not request.user.is_anonymous:
tasks = Task.objects.filter(pk = pk)
if request.user.id == tasks.first().user_id:
data = request.data
if not "user_id" in data:
tasks.update(**data)
return Response({"status": "OK"}, status.HTTP_201_CREATED)
else:
return Response({"status": "para, user_id is not allowed"}, status.HTTP_400_BAD_REQUEST)
else:
return Response({"error": "You can update only your own tasks!"}, status.HTTP_403_FORBIDDEN)
else:
return Response({"error": "Invalid Token!"}, status.HTTP_401_UNAUTHORIZED)
def destroy(self, request, pk):
if not request.user.is_anonymous:
tasks = Task.objects.filter(pk = pk)
if request.user.id == tasks.first().user_id:
tasks.delete()
return Response({"status": "OK"})
else:
return Response({"error": "You can delete only your own tasks!"}, status.HTTP_403_FORBIDDEN)
else:
return Response({"error": "Invalid Token!"}, status.HTTP_401_UNAUTHORIZED)
```
#### File: sesion28/from_scratch/server.py
```python
from asyncio.streams import start_server
import websockets
import asyncio
col_sockets = []
async def handler(websocket):
col_sockets.append(websocket)
async for message in websocket:
reply = f"Data recibida: {message}"
#await websocket.send(reply)
websockets.broadcast(col_sockets, reply)
async def main():
async with websockets.serve(handler, "localhost", 7000):
await asyncio.Future()
asyncio.run(main())
```
#### File: backend-codigo-10/sesion3/ejercicio1.py
```python
def invertir(palabra):
"""Esta funcion invierte un texto"""
tamano = len(palabra)
nueva_palabra = ""
for i in range( 1, ( tamano + 1 ) ):
nueva_palabra = nueva_palabra + palabra[-i]
return nueva_palabra
def invertir2(palabra):
tamano = len(palabra)
nueva_palabra_col = []
for i in range(1, ( tamano + 1 )):
nueva_palabra_col.append(palabra[-i])
return "".join(nueva_palabra_col)
resultado = invertir("hello world")
print(resultado)
resultado2 = invertir2("hello world")
print(resultado2)
```
#### File: backend-codigo-10/sesion4/poo1.py
```python
class Dog:
def __init__(self, nombre):
self.nombre = nombre
def bark(self):
print(f"{self.nombre} say Woof!")
perro1 = Dog("Firulais")
print(perro1.nombre)
perro2 = Dog("Snoopy")
print(perro2.nombre)
perro1.bark()
perro2.bark()
class Cat:
def __init__(self, nombre):
self.nombre = nombre
print("Init ejecutado")
pelusa = Cat("Pelusa")
```
#### File: backend-codigo-10/sesion4/poo3.py
```python
class Animal:
def __init__(self, nombre):
self.nombre = nombre
def dormir(self):
print("zZzZ")
def mover(self):
print("caminar")
class Sponge(Animal):
def mover(self):
pass
class Cat(Animal):
def hacer_ruido(self):
print("Meow")
class Fish(Animal):
def mover(self):
print("swim")
def hacer_ruido(self):
print("glu glu")
pelusa = Cat("Pelusa")
pelusa.dormir()
pelusa.mover()
pelusa.hacer_ruido()
nemo = Fish("Nemo")
nemo.dormir()
nemo.mover()
nemo.hacer_ruido()
bob = Sponge("Bob")
bob.dormir()
bob.mover()
```
#### File: backend-codigo-10/sesion5/ejercicio1.py
```python
class Alumno:
def __init__(self, nombre):
self.nombre = nombre
self.__notas = []
def registrar_nota(self, nota):
self.__notas.append(nota)
def get_promedio(self):
total = 0
cantidad = len(self.__notas)
for nota in self.__notas:
total = total + nota
return total / cantidad
class Curso:
def __init__(self):
self.alumnos = []
matematica = Curso()
alumno1 = Alumno("Diana")
alumno1.registrar_nota(12)
alumno1.registrar_nota(15)
alumno1.registrar_nota(18)
matematica.alumnos.append(alumno1)
for alumno in matematica.alumnos:
print(f"Promedio {alumno.nombre}: {alumno.get_promedio()}")
```
#### File: backend-codigo-10/sesion5/functional2.py
```python
import functools
a = [12, 11, 21, 8, 10, 5, 22]
def aprobado(nota):
return nota < 10
def aprobar(nota):
return nota + 5
result = list(filter(aprobado, a))
result2 = map(aprobar, result)
print(list(result))
print(list(result2))
print(list(map(lambda y: y+5, list(filter(lambda x: x<10, a)))))
total=0
for i in a:
total=total+i
result3 = functools.reduce(lambda x, y: x+y, a)
print(result3)
``` |
{
"source": "joelibaceta/wakatime-languages-pie-svg",
"score": 3
} |
#### File: wakatime-languages-pie-svg/api/index.py
```python
from http.server import BaseHTTPRequestHandler
from io import StringIO
import requests
import re
import os
import colorsys
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
import random
class handler(BaseHTTPRequestHandler):
def get_param(self, name, path, default=None):
pattern = re.compile(r""+name+"\=([^\=\&]+)")
match = pattern.search(path)
if match is not None:
return match.group(1)
else:
return default
def do_GET(self):
username = self.get_param('username', self.path)
uuid = self.get_param('uuid', self.path)
wakatime_json_url = f"https://wakatime.com/share/@{username}/{uuid}.json"
r = requests.get(wakatime_json_url)
response = r.json()
plt.rcParams['font.size'] = 7.0
plt.rcParams['figure.figsize'] = 8, 3.35
labels = []
sizes = []
for item in response["data"]:
labels.append(f'{item["name"]} ({item["percent"]}%)')
sizes.append(item["percent"])
fig1, ax1 = plt.subplots()
colors = ["#e54335", "#f15789", "#eb8918", "#e9b126", "#e8d948", "#afc526", "#1e9eea", "#a42fba"]
ax1.pie(sizes, labels=labels, autopct='', startangle=90, radius=2, rotatelabels = False, colors = colors, labeldistance=9999999)
ax1.axis('equal')
centre_circle = plt.Circle((0,0),0.90,fc='white')
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
handles, labels = ax1.axes.get_legend_handles_labels()
ax1.legend(handles, labels, prop={'size':8}, bbox_to_anchor=(0.2,1.00))
imgdata = StringIO()
fig.savefig(imgdata, format='svg', dpi=199)
imgdata.seek(0)
svg_dta = imgdata.read()
self.send_response(200)
self.send_header("Accept-Ranges", "bytes")
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Content-Disposition", "attachment")
self.send_header("Content-Length", len(svg_dta))
self.send_header("Content-type", "image/svg+xml")
self.end_headers()
self.wfile.write(str(svg_dta).encode())
return
``` |
{
"source": "joelibaceta/xls-cli",
"score": 3
} |
#### File: xls-cli/xls_cli/frame.py
```python
import os
import math
import xls_cli.ansi as ansi
from xls_cli.grid import Grid
from getkey import getkey, keys
class Frame:
width, height = 0, 0
printable_window = "\x1B[2J"
title = "unititled"
grid = None
def __init__(self, title):
rows, columns = os.popen('stty size', 'r').read().split()
self.title = title
self.height = int(rows)
self.width = int(columns)
def render(self):
self.printable_window += self.draw_title_bar()
self.printable_window += self.draw_grid()
print(self.printable_window)
def loop(self):
while 1:
key = getkey()
if key == keys.UP:
self.grid.move_up()
self.refresh()
if key == keys.DOWN:
self.grid.move_down()
self.refresh()
if key == keys.RIGHT:
self.grid.move_right()
self.refresh()
if key == keys.LEFT:
self.grid.move_left()
self.refresh()
elif key == 'q':
quit()
def refresh(self):
self.printable_window = "\x1B[2J"
self.render()
def draw_title_bar(self):
title = "%s - %s" %("xls-cli", self.title)
return ansi.bg(title.center(self.width, " "), 28)
def draw_grid(self):
grid_to_string = "\n" + "-" * self.width + "\n"
for j in range(0, (len(self.grid.subgrid))):
row = []
for i in range(0, (len(self.grid.subgrid[0]) )):
text = "{:<20}".format(" " + str(self.grid.subgrid[j][i]))
if (j == self.grid.pos["y"] and i == self.grid.pos["x"]):
text = ansi.bg(text, 8)
row.append(text)
line_separator = "-" * self.width
grid_to_string += "%s\n%s\n" %("|".join(row), line_separator)
#grid_to_string += max(0, (self.grid.sheet.nrows - self.grid.max_rows)) * "\n"
return grid_to_string
```
#### File: xls-cli/xls_cli/runner.py
```python
import xlrd
from xls_cli.frame import Frame
from xls_cli.grid import Grid
class Runner:
def open_file(file_path):
frame = None
book = xlrd.open_workbook(file_path)
sh = book.sheet_by_index(0)
grid = Grid()
grid.load_grid(sh)
frame = Frame(file_path)
frame.grid = grid
frame.render()
frame.loop()
``` |
{
"source": "JoeLiberi/DeamonRestartService",
"score": 2
} |
#### File: JoeLiberi/DeamonRestartService/monitorservice.py
```python
import time
import datetime
import grp
import pwd
import argparse
import sys
from daemon import runner
import subprocess
import logging
import re
import imaplib
import socket
import ssl
import email
import re
import os
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from subprocess import call
from subprocess import Popen, PIPE
from config import services as apps
import config
class MonitorService(object):
def __init__(self):
self.pidfile_timeout = 1
self.stdin_path = '/tmp/monitorservice.log'
self.stdout_path = '/tmp/monitorservice.log'
self.stderr_path = '/tmp/monitorservice.log'
self.pidfile_path = '/tmp/daemon.pid'
self.gmailuser = config.gmail["user"]
self.gmailpassword = config.gmail["password"]
self.webapp = config.webapp
# Command and service list
self.servicelist = apps.keys()
# Set logging level
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
def run(self):
while True:
for service in self.servicelist:
if not self.isProcessRunning(service):
self.logger.info("Found service: {}".format(service))
self.logger.info('Executing: {}'.format(apps[service]))
call(apps[service])
self.sendmail(service)
else:
self.logger.debug("{} is running".format(service))
time.sleep(1)
def findProcess(self, processId):
ps = subprocess.Popen("ps aux | grep {} | grep -v grep | grep -v monitorservice".format(processId), shell=True, stdout=subprocess.PIPE)
output = ps.stdout.read()
ps.stdout.close()
ps.wait()
return output
def isProcessRunning(self, processId):
output = self.findProcess(processId)
self.logger.debug(output)
if re.search(processId, output) is None:
return False
else:
return True
def connecttogmail(self):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(self.gmailuser, self.gmailpassword)
self.logger.info("Succesfully logged into Gmail")
return server
except:
self.logger.info("Could not connect to gmail")
return None
def sendmail(self, service):
email_text = """\
Subject: {subject}
From: {sent_from}
To: {to}
{body}
"""
sent_from = self.gmailuser
to = ", ".join(config.email_to)
subject = 'Process Monitor'
# Create timestamp
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
body = config.email_body.format(service=service, timestamp=st, webapp=self.webapp)
text = email_text.format(sent_from=sent_from, to=to, subject=subject, body=body)
s = self.connecttogmail()
self.logger.info("Sending mail.....")
s.sendmail(sent_from, to, text)
s.close()
self.logger.info("mail sent")
if __name__ == '__main__':
app = MonitorService()
daemon_runner = runner.DaemonRunner(app)
daemon_runner.do_action()
``` |
{
"source": "joeliedtke/fairing",
"score": 2
} |
#### File: deployers/gcp/test_gcp.py
```python
import pytest
import fairing
from kubernetes import client
from fairing.deployers.gcp.gcp import GCPJob
PROJECT_ID = fairing.cloud.gcp.guess_project_name()
def create_test_pod_spec():
return client.V1PodSpec(
containers=[client.V1Container(
name='model',
image='test-image'
)]
)
def test_default_params():
job = GCPJob()
request = job.create_request_dict(create_test_pod_spec())
desired = {
'jobId': job._job_name,
'trainingInput': {
'masterConfig': {
'imageUri': 'test-image'
},
'region': 'us-central1'
}
}
assert request == desired
def test_top_level_args():
job = GCPJob(region='us-west1', scale_tier='BASIC')
request = job.create_request_dict(create_test_pod_spec())
desired = {
'jobId': job._job_name,
'trainingInput': {
'masterConfig': {
'imageUri': 'test-image'
},
'region': 'us-west1',
'scaleTier': 'BASIC'
}
}
assert request == desired
def test_custom_job_config():
job = GCPJob(job_config={
'trainingInput': {
'scaleTier': 'CUSTOM',
'masterType': 'standard'
}
})
request = job.create_request_dict(create_test_pod_spec())
desired = {
'jobId': job._job_name,
'trainingInput': {
'masterConfig': {
'imageUri': 'test-image'
},
'region': 'us-central1',
'scaleTier': 'CUSTOM',
'masterType': 'standard'
}
}
assert request == desired
def test_top_level_params_override_job_config():
job = GCPJob(region='us-west1', scale_tier='BASIC', job_config={
'trainingInput': {
'region': 'europe-west1',
'scaleTier': 'PREMIUM_1'
},
'labels': {
'test-key': 'test-value'
}
})
request = job.create_request_dict(create_test_pod_spec())
desired = {
'jobId': job._job_name,
'trainingInput': {
'masterConfig': {
'imageUri': 'test-image'
},
'region': 'us-west1',
'scaleTier': 'BASIC'
},
'labels': {
'test-key': 'test-value'
}
}
assert request == desired
``` |
{
"source": "joelingwei/baby_blog",
"score": 2
} |
#### File: baby_blog/blog/views.py
```python
from django.shortcuts import render
# Create your views here.
from . import models
from django.http import JsonResponse
from django.core.paginator import Paginator
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from hashlib import md5
from django.db.models import F
#首页 我的日记 时光机 关于我
def index(request):
# 翻页
page = request.GET.get('page')
if page:
page = int(page)
else:
page = 1
page_num = 0
page_nums = None #总共多少页
page_count = None #数据总数
next_page = None #下一页
previous_page = None #上一页
about = None
# 所有内容
article_all = None
article_top = None
type = request.get_full_path().split('/')[1]
if type == 'growth':
article_all = models.Article.objects.filter(cat_id=2, status=1)
page_counts = 6
elif type == 'time':
article_all = models.Article.objects.filter(cat_id__in=[2,3], status=1)
page_counts = 12
elif type == 'about':
about = models.Article.objects.get(id=23)
else:
article_top = models.Article.objects.get(cat_id=3,status=1, is_index=2)
photos = article_top.photos.split(',')
if len(photos)>5:
article_top.photos = photos[:5]
article_all = models.Article.objects.filter(is_top=2, status=1,cat_id__in=[2,3],is_index=1)[:5]
if type == 'growth' or type == 'time':
# 主体内容列表
paginator = Paginator(article_all,page_counts)
page_num = paginator.num_pages
page_nums = article_all.count()
article_list = paginator.page(page)
article_all = article_list
if article_list.has_next():
next_page = page+1
else:
next_page = page
if article_list.has_previous():
previous_page = page-1
else:
previous_page = page
return render(request, 'blogs/index.html', {
'web_info': models.System.objects.all()[0],
'BASE_URL': settings.BASE_URL,
'article_list': article_all,
'about':about,
'type':type,
'article_top': article_top,
'article_num': models.Article.objects.filter(cat_id=2, status=1).count(),
'photo_num': models.Article.objects.filter(cat_id=3, status=1).count(),
'link': models.Link.objects.filter(status=1),
'article_news': models.Article.objects.filter(status=1,cat_id__in=[2,3])[:8],
'article_click': models.Article.objects.filter(status=1,cat_id__in=[2,3]).order_by('-hits')[:8],
'photo_list': models.Article.objects.filter(status=1, cat_id=3)[:6],
'page_num':range(1,page_num+1),
'page_nums':page_num,
'page_count':page_nums,
'curr_page':page,
'next_page':next_page,
'previous_page':previous_page
})
#详情页
def growth_detail(request,id):
if models.Article.objects.get(id=id):
article_list = models.Article.objects.filter(cat_id=2,status=1).all()
article_info = None
previous_article = None
next_article = None
previous_article_index = 0
next_article_index = 0
for index, article in enumerate(article_list):
if index == 0:
previous_article_index = 0
next_article_index = index + 1
elif index == len(article_list) - 1:
previous_article_index = index - 1
next_article_index = index
else:
previous_article_index = index - 1
next_article_index = index + 1
if article.id == id:
article_info = article
previous_article = article_list[previous_article_index]
next_article = article_list[next_article_index]
break
return render(request, 'blogs/detail.html', {
'web_info':models.System.objects.all()[0],
'BASE_URL': settings.BASE_URL,
'article_info': article_info,
'previous_article': previous_article,
'next_article': next_article,
'article_num': models.Article.objects.filter(cat_id=2, status=1).count(),
'photo_num': models.Article.objects.filter(cat_id=3, status=1).count(),
'link': models.Link.objects.filter(status=1),
'article_news': models.Article.objects.filter(status=1,cat_id__in=[2,3])[:8],
'article_click': models.Article.objects.filter(status=1,cat_id__in=[2,3]).order_by('-hits')[:8],
'photo_list': models.Article.objects.filter(status=1, cat_id=3)[:6],
})
else:
page_not_found(request)
#我的相册
def album(request):
# 翻页
page = request.GET.get('page')
if page:
page = int(page)
else:
page = 1
page_num = 0
page_nums = None # 总共多少页
page_count = None # 数据总数
next_page = None # 下一页
previous_page = None # 上一页
article_all = None
article_all = models.Article.objects.filter(cat_id=3, status=1)
page_counts = 8
paginator = Paginator(article_all, page_counts)
page_num = paginator.num_pages
page_nums = article_all.count()
article_list = paginator.page(page)
article_all = article_list
if article_list.has_next():
next_page = page + 1
else:
next_page = page
if article_list.has_previous():
previous_page = page - 1
else:
previous_page = page
return render(request, 'blogs/album.html', {
'web_info': models.System.objects.all()[0],
'BASE_URL': settings.BASE_URL,
'article_list': article_all,
'page_num': range(1, page_num + 1),
'page_nums': page_num,
'page_count': page_nums,
'curr_page': page,
'next_page': next_page,
'previous_page': previous_page
})
#相册详情
def album_detail(request,id):
article_info = models.Article.objects.get(id=id)
article_info.photos = article_info.photos.split(',')
return render(request, 'blogs/album_detail.html', {
'web_info': models.System.objects.all()[0],
'BASE_URL': settings.BASE_URL,
'article_info': article_info
})
#留言板
def message(request):
# 翻页
page = request.GET.get('page')
if page:
page = int(page)
else:
page = 1
page_num = 0
page_nums = None # 总共多少页
page_count = None # 数据总数
next_page = None # 下一页
previous_page = None # 上一页
message_all = models.Message.objects.filter(status=0)
page_counts = 8
paginator = Paginator(message_all, page_counts)
page_num = paginator.num_pages
page_nums = message_all.count()
message_list = paginator.page(page)
message_all = message_list
if message_list.has_next():
next_page = page + 1
else:
next_page = page
if message_list.has_previous():
previous_page = page - 1
else:
previous_page = page
return render(request, 'blogs/message.html', {
'web_info': models.System.objects.all()[0],
'BASE_URL':settings.BASE_URL,
'message_all':message_all,
'page_num': range(1, page_num + 1),
'page_nums': page_num,
'page_count': page_nums,
'curr_page': page,
'next_page': next_page,
'previous_page': previous_page
})
#点赞操作
@csrf_exempt
def PostPraise(request):
#文章id
article_id = request.POST.get('id')
#唯一身份标识
session_id = md5_encrpty(get_ip_address(request))
#必须post请求
if request.method != 'POST':
return JsonResponse({"status":'-1',"msg":'must be a post request'})
#判断是否已经点赞
if models.ArticlePraiseLog.objects.filter(article_id=article_id,session_id=session_id).first():
return JsonResponse({"status":"2","msg":"您已点赞过,请勿重复操作"})
#写入点赞数据
models.ArticlePraiseLog.objects.create(article_id=article_id,session_id=session_id,ip=get_ip_address(request))
#增加内容点赞量
models.Article.objects.filter(id=article_id).update(praise=F('praise')+1)
return JsonResponse({"status":"0","msg":"感谢您的喜爱"})
#提交留言
@csrf_exempt
def PostMessage(request):
name = request.POST.get('name')
email = request.POST.get('email')
mycall = request.POST.get('mycall')
content = request.POST.get('content')
#写入数据
models.Message.objects.create(name=name,email=email,mycall=mycall,content=content)
return JsonResponse({"status": "0", "msg": "留言成功"})
def permission_denied(request,template_name='blogs/403.html'):
return render(request,template_name)
def page_not_found(request,template_name='blogs/404.html'):
return render(request,template_name)
def page_error(request,template_name='blogs/500.html'):
return render(request,template_name)
#获取ip
def get_ip_address(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[-1].strip()
else:
ip = request.META.get('REMOTE_ADDR')
return ip
#md5加密
def md5_encrpty(request):
md = md5()
md.update(request.encode('utf-8'))
return md.hexdigest()
``` |
{
"source": "JoelInTheory/manifest_destined_needs",
"score": 3
} |
#### File: JoelInTheory/manifest_destined_needs/manifest.py
```python
from flask import Flask
from flask_restful import Api
import requests
import json
import lxml.etree as ET
import os
from sys import exit
from copy import deepcopy
url = os.environ.get('MANIFEST_URL')
# [{'xpath': <xpath>,
# 'attrib': <attrib to change>,
# 'newvalue': <newvalue for attrib @ xpath>,
# 'action': <action>}]
filter_json = json.loads(os.environ.get('FILTER_JSON'))
if not url:
sys.exit()
app = Flask(__name__)
api = Api(app)
def get_manifest(url):
r = requests.get(url)
headers = r.headers.items()
xmlr = ET.fromstring(r.content)
return xmlr, headers
@app.route("/manifest.xml", methods=["GET"])
def respond_with_manifest(url = url):
xmlr, headers = get_manifest(url)
for fjson in filter_json:
action = fjson.get('action', 'edit')
xpath = fjson.get('xpath')
attrib = fjson.get('attrib')
newvalue = fjson.get('newvalue')
if action == 'edit':
xmlr.xpath(xpath)[0].attrib[attrib] = newvalue
elif action == 'sendtoback':
element = xmlr.xpath(xpath)[0]
element_copy = deepcopy(element)
parent = element.getparent()
parent.remove(element)
parent.append(element_copy)
elif action == 'sendtofront':
element = xmlr.xpath(xpath)[0]
element_copy = deepcopy(element)
parent = element.getparent()
parent.remove(element)
parent.insert(0, element_copy)
return ET.tostring(xmlr), 200, headers
if __name__ == '__main__':
x = ManApp()
``` |
{
"source": "joel-intito/tm1py",
"score": 2
} |
#### File: tm1py/Tests/MonitoringService.py
```python
import configparser
import unittest
from pathlib import Path
from TM1py.Services import TM1Service
from TM1py.Utils import case_and_space_insensitive_equals
config = configparser.ConfigParser()
config.read(Path(__file__).parent.joinpath('config.ini'))
PREFIX = "TM1py_Tests_MonitoringService_"
class TestMonitoringMethods(unittest.TestCase):
tm1 = None
@classmethod
def setUpClass(cls):
cls.tm1 = TM1Service(**config['tm1srv01'])
def test_get_threads(self):
threads = self.tm1.monitoring.get_threads()
self.assertTrue(any(thread["Function"] == "GET /api/v1/Threads" for thread in threads))
def test_get_active_users(self):
current_user = self.tm1.security.get_current_user()
active_users = self.tm1.monitoring.get_active_users()
self.assertTrue(any(case_and_space_insensitive_equals(user.name, current_user.name) for user in active_users))
def test_user_is_active(self):
current_user = self.tm1.security.get_current_user()
self.assertTrue(self.tm1.monitoring.user_is_active(current_user.name))
def test_get_sessions(self):
current_user = self.tm1.security.get_current_user()
sessions = self.tm1.monitoring.get_sessions()
self.assertTrue(any(case_and_space_insensitive_equals(session["User"]["Name"], current_user.name)
for session
in sessions if session["User"]))
def test_close_all_sessions(self):
self.tm1.monitoring.close_all_sessions()
def test_disconnect_all_users(self):
self.tm1.monitoring.disconnect_all_users()
def test_cancel_all_running_threads(self):
self.tm1.monitoring.cancel_all_running_threads()
@classmethod
def tearDownClass(cls):
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
```
#### File: tm1py/Tests/Process.py
```python
import configparser
import copy
import random
import time
import unittest
import uuid
from pathlib import Path
from TM1py.Objects import Process
from TM1py.Objects import Subset
from TM1py.Services import TM1Service
config = configparser.ConfigParser()
config.read(Path(__file__).parent.joinpath('config.ini'))
PROCESS_PREFIX = 'TM1py_Tests_'
class TestProcessMethods(unittest.TestCase):
tm1 = None
@classmethod
def setUpClass(cls):
cls.tm1 = TM1Service(**config['tm1srv01'])
cls.some_name = "some_name"
cls.all_dimension_names = cls.tm1.dimensions.get_all_names()
cls.random_dimension = cls.tm1.dimensions.get(random.choice(cls.all_dimension_names))
cls.random_dimension_all_elements = cls.random_dimension.default_hierarchy.elements
cls.random_dimension_elements = [element for element in cls.random_dimension_all_elements][0:2]
# None process
cls.p_none = Process(name=PROCESS_PREFIX + '_none_' + cls.some_name, datasource_type='None')
# ACII process
cls.p_ascii = Process(name=PROCESS_PREFIX + '_ascii_' + cls.some_name,
datasource_type='ASCII',
datasource_ascii_delimiter_type='Character',
datasource_ascii_delimiter_char=',',
datasource_ascii_header_records=2,
datasource_ascii_quote_character='^',
datasource_ascii_thousand_separator='~',
prolog_procedure="sTestProlog = 'test prolog procedure'",
metadata_procedure="sTestMeta = 'test metadata procedure'",
data_procedure="sTestData = 'test data procedure'",
epilog_procedure="sTestEpilog = 'test epilog procedure'",
datasource_data_source_name_for_server=r'C:\Data\file.csv',
datasource_data_source_name_for_client=r'C:\Data\file.csv')
# Variables
cls.p_ascii.add_variable('v_1', 'Numeric')
cls.p_ascii.add_variable('v_2', 'Numeric')
cls.p_ascii.add_variable('v_3', 'Numeric')
cls.p_ascii.add_variable('v_4', 'Numeric')
# Parameters
cls.p_ascii.add_parameter('p_Year', 'year?', '2016')
cls.p_ascii.add_parameter('p_Number', 'number?', 2)
# View process
cls.p_view = Process(name=PROCESS_PREFIX + '_view_' + cls.some_name,
datasource_type='TM1CubeView',
datasource_view='view1',
datasource_data_source_name_for_client='Plan_BudgetPlan',
datasource_data_source_name_for_server='Plan_BudgetPlan')
# ODBC process
cls.p_odbc = Process(name=PROCESS_PREFIX + '_odbc_' + cls.some_name,
datasource_type='ODBC',
datasource_password='password',
datasource_user_name='user')
# Subset process
cls.subset_name = PROCESS_PREFIX + '_subset_' + cls.some_name
cls.subset = Subset(dimension_name=cls.random_dimension.name,
subset_name=cls.subset_name,
elements=cls.random_dimension_elements)
cls.tm1.dimensions.subsets.create(cls.subset, False)
cls.p_subset = Process(name=PROCESS_PREFIX + '_subset_' + cls.some_name,
datasource_type='TM1DimensionSubset',
datasource_data_source_name_for_server=cls.subset.dimension_name,
datasource_subset=cls.subset.name,
metadata_procedure="sTest = 'abc';")
with open(Path(__file__).parent.joinpath('resources', 'Bedrock.Server.Wait.json'), 'r') as file:
cls.p_bedrock_server_wait = Process.from_json(file.read())
@classmethod
def setUp(cls):
cls.tm1.processes.create(cls.p_none)
cls.tm1.processes.create(cls.p_ascii)
cls.tm1.processes.create(cls.p_view)
cls.tm1.processes.create(cls.p_odbc)
cls.tm1.processes.create(cls.p_subset)
@classmethod
def tearDown(cls):
cls.tm1.processes.delete(cls.p_none.name)
cls.tm1.processes.delete(cls.p_ascii.name)
cls.tm1.processes.delete(cls.p_view.name)
cls.tm1.processes.delete(cls.p_odbc.name)
cls.tm1.processes.delete(cls.p_subset.name)
def test_execute_process(self):
if not self.tm1.processes.exists(self.p_bedrock_server_wait.name):
self.tm1.processes.create(self.p_bedrock_server_wait)
# with parameters argument
start_time = time.time()
self.tm1.processes.execute(self.p_bedrock_server_wait.name, parameters={"Parameters": [
{"Name": "pWaitSec", "Value": "3"}]})
elapsed_time = time.time() - start_time
self.assertGreater(elapsed_time, 3)
# with kwargs
start_time = time.time()
self.tm1.processes.execute(self.p_bedrock_server_wait.name, pWaitSec="1")
elapsed_time = time.time() - start_time
self.assertGreater(elapsed_time, 1)
# without arguments
self.tm1.processes.execute(self.p_bedrock_server_wait.name)
def test_execute_with_return_success(self):
process = self.p_bedrock_server_wait
if not self.tm1.processes.exists(process.name):
self.tm1.processes.create(process)
# with parameters
success, status, error_log_file = self.tm1.processes.execute_with_return(
process_name=process.name,
pWaitSec=2)
self.assertTrue(success)
self.assertEqual(status, "CompletedSuccessfully")
self.assertIsNone(error_log_file)
# without parameters
success, status, error_log_file = self.tm1.processes.execute_with_return(
process_name=process.name)
self.assertTrue(success)
self.assertEqual(status, "CompletedSuccessfully")
self.assertIsNone(error_log_file)
def test_execute_with_return_compile_error(self):
process = Process(name=str(uuid.uuid4()))
process.prolog_procedure = "sText = 'text';sText = 2;"
if not self.tm1.processes.exists(process.name):
self.tm1.processes.create(process)
# with parameters
success, status, error_log_file = self.tm1.processes.execute_with_return(process_name=process.name)
self.assertFalse(success)
self.assertEqual(status, "Aborted")
self.assertIsNotNone(error_log_file)
self.tm1.processes.delete(process.name)
def test_execute_with_return_with_item_reject(self):
process = Process(name=str(uuid.uuid4()))
process.epilog_procedure = "ItemReject('Not Relevant');"
if not self.tm1.processes.exists(process.name):
self.tm1.processes.create(process)
# with parameters
success, status, error_log_file = self.tm1.processes.execute_with_return(process_name=process.name)
self.assertFalse(success)
self.assertEqual(status, "CompletedWithMessages")
self.assertIsNotNone(error_log_file)
self.tm1.processes.delete(process.name)
def test_execute_with_return_with_process_break(self):
process = Process(name=str(uuid.uuid4()))
process.prolog_procedure = "sText = 'Something'; ProcessBreak;"
if not self.tm1.processes.exists(process.name):
self.tm1.processes.create(process)
# with parameters
success, status, error_log_file = self.tm1.processes.execute_with_return(
process_name=process.name)
self.assertTrue(success)
self.assertEqual(status, "CompletedSuccessfully")
self.assertIsNone(error_log_file)
self.tm1.processes.delete(process.name)
def test_execute_with_return_with_process_quit(self):
process = Process(name=str(uuid.uuid4()))
process.prolog_procedure = "sText = 'Something'; ProcessQuit;"
if not self.tm1.processes.exists(process.name):
self.tm1.processes.create(process)
# with parameters
success, status, error_log_file = self.tm1.processes.execute_with_return(
process_name=process.name)
self.assertFalse(success)
self.assertEqual(status, "QuitCalled")
self.assertIsNone(error_log_file)
self.tm1.processes.delete(process.name)
def test_compile_success(self):
p_good = Process(
name=str(uuid.uuid4()),
prolog_procedure="nPro = DimSiz('}Processes');")
self.tm1.processes.create(p_good)
errors = self.tm1.processes.compile(p_good.name)
self.assertTrue(len(errors) == 0)
self.tm1.processes.delete(p_good.name)
def test_compile_with_errors(self):
p_bad = Process(
name=str(uuid.uuid4()),
prolog_procedure="nPro = DimSize('}Processes');")
self.tm1.processes.create(p_bad)
errors = self.tm1.processes.compile(p_bad.name)
self.assertTrue(len(errors) == 1)
self.assertIn("Variable \"dimsize\" is undefined", errors[0]["Message"])
self.tm1.processes.delete(p_bad.name)
# Unbound Tests
def test_execute_process_with_return_success(self):
process = Process(name=str(uuid.uuid4()))
process.prolog_procedure = "Sleep(100);"
success, status, error_log_file = self.tm1.processes.execute_process_with_return(process)
self.assertTrue(success)
self.assertEqual(status, "CompletedSuccessfully")
self.assertIsNone(error_log_file)
def test_execute_process_with_return_compile_error(self):
process = Process(name=str(uuid.uuid4()))
process.prolog_procedure = "sText = 'text';sText = 2;"
success, status, error_log_file = self.tm1.processes.execute_process_with_return(process)
self.assertFalse(success)
self.assertEqual(status, "Aborted")
self.assertIsNotNone(error_log_file)
def test_execute_process_with_return_with_item_reject(self):
process = Process(name=str(uuid.uuid4()))
process.epilog_procedure = "ItemReject('Not Relevant');"
success, status, error_log_file = self.tm1.processes.execute_process_with_return(process)
self.assertFalse(success)
self.assertEqual(status, "CompletedWithMessages")
self.assertIsNotNone(error_log_file)
def test_execute_process_with_return_with_process_break(self):
process = Process(name=str(uuid.uuid4()))
process.prolog_procedure = "sText = 'Something'; ProcessBreak;"
success, status, error_log_file = self.tm1.processes.execute_process_with_return(process)
self.assertTrue(success)
self.assertEqual(status, "CompletedSuccessfully")
self.assertIsNone(error_log_file)
def test_execute_process_with_return_with_process_quit(self):
process = Process(name=str(uuid.uuid4()))
process.prolog_procedure = "sText = 'Something'; ProcessQuit;"
success, status, error_log_file = self.tm1.processes.execute_process_with_return(process)
self.assertFalse(success)
self.assertEqual(status, "QuitCalled")
self.assertIsNone(error_log_file)
def test_compile_process_success(self):
p_good = Process(
name=str(uuid.uuid4()),
prolog_procedure="nPro = DimSiz('}Processes');")
errors = self.tm1.processes.compile_process(p_good)
self.assertTrue(len(errors) == 0)
def test_compile_process_with_errors(self):
p_bad = Process(
name=str(uuid.uuid4()),
prolog_procedure="nPro = DimSize('}Processes');")
errors = self.tm1.processes.compile_process(p_bad)
self.assertTrue(len(errors) == 1)
self.assertIn("Variable \"dimsize\" is undefined", errors[0]["Message"])
# Get Process
def test_get_process(self):
p_ascii_orig = copy.deepcopy(self.p_ascii)
p_none_orig = copy.deepcopy(self.p_none)
p_view_orig = copy.deepcopy(self.p_view)
p_subset_orig = copy.deepcopy(self.p_subset)
p_odbc_orig = copy.deepcopy(self.p_odbc)
p1 = self.tm1.processes.get(p_ascii_orig.name)
self.assertEqual(p1.body, p_ascii_orig.body)
p2 = self.tm1.processes.get(p_none_orig.name)
self.assertEqual(p2.body, p_none_orig.body)
p3 = self.tm1.processes.get(p_view_orig.name)
self.assertEqual(p3.body, p_view_orig.body)
p4 = self.tm1.processes.get(p_odbc_orig.name)
p4.datasource_password = None
p_odbc_orig.datasource_password = None
self.assertEqual(p4.body, p_odbc_orig.body)
p5 = self.tm1.processes.get(p_subset_orig.name)
self.assertEqual(p5.body, p_subset_orig.body)
# Update process
def test_update_process(self):
# get
p = self.tm1.processes.get(self.p_ascii.name)
# modify
p.data_procedure = "SaveDataAll;"
# update on Server
self.tm1.processes.update(p)
# get again
p_ascii_updated = self.tm1.processes.get(p.name)
# assert
self.assertNotEqual(p_ascii_updated.data_procedure, self.p_ascii.data_procedure)
def test_get_error_log_file_content(self):
process = Process(name=str(uuid.uuid4()))
process.epilog_procedure = "ItemReject('Not Relevant');"
if not self.tm1.processes.exists(process.name):
self.tm1.processes.create(process)
# with parameters
success, status, error_log_file = self.tm1.processes.execute_with_return(process_name=process.name)
self.assertFalse(success)
self.assertEqual(status, "CompletedWithMessages")
self.assertIsNotNone(error_log_file)
content = self.tm1.processes.get_error_log_file_content(file_name=error_log_file)
self.assertIn("Not Relevant", content)
self.tm1.processes.delete(process.name)
# Delete process
def test_delete_process(self):
process = self.p_bedrock_server_wait
process.name = str(uuid.uuid4())
if not self.tm1.processes.exists(process.name):
self.tm1.processes.create(process)
self.tm1.processes.delete(process.name)
@classmethod
def tearDownClass(cls):
cls.tm1.dimensions.subsets.delete(
dimension_name=cls.subset.dimension_name,
subset_name=cls.subset_name,
private=False)
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
```
#### File: TM1py/Objects/ChoreStartTime.py
```python
import datetime
from TM1py.Objects.TM1Object import TM1Object
class ChoreStartTime(TM1Object):
""" Utility class to handle time representation for Chore Start Time
"""
def __init__(self, year: int, month: int, day: int, hour: int, minute: int, second: int):
"""
:param year: year
:param month: month
:param day: day
:param hour: hour or None
:param minute: minute or None
:param second: second or None
"""
self._datetime = datetime.datetime.combine(datetime.date(year, month, day), datetime.time(hour, minute, second))
@classmethod
def from_string(cls, start_time_string: str) -> 'ChoreStartTime':
# f to handle strange timestamp 2016-09-25T20:25Z instead of common 2016-09-25T20:25:01Z
f = lambda x: int(x) if x else 0
return cls(year=f(start_time_string[0:4]),
month=f(start_time_string[5:7]),
day=f(start_time_string[8:10]),
hour=f(start_time_string[11:13]),
minute=f(start_time_string[14:16]),
second=f(start_time_string[17:19]))
@property
def start_time_string(self) -> str:
return self._datetime.strftime("%Y-%m-%dT%H:%M:%SZ")
@property
def datetime(self) -> datetime:
return self._datetime
def __str__(self):
return self.start_time_string
def set_time(self, year: int = None, month: int = None, day: int = None, hour: int = None, minute: int = None,
second: int = None):
if year:
self._datetime = self._datetime.replace(year=year)
if month:
self._datetime = self._datetime.replace(month=month)
if day:
self._datetime = self._datetime.replace(day=day)
if hour:
self._datetime = self._datetime.replace(hour=hour)
if minute:
self._datetime = self._datetime.replace(minute=minute)
if second:
self._datetime = self._datetime.replace(second=second)
def add(self, days: int = 0, hours: int = 0, minutes: int = 0, seconds: int = 0):
self._datetime = self._datetime + datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)
def subtract(self, days: int = 0, hours: int = 0, minutes: int = 0, seconds: int = 0):
self._datetime = self._datetime - datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)
```
#### File: TM1py/Services/CellService.py
```python
import functools
import json
import uuid
import warnings
from collections import OrderedDict
from io import StringIO
from typing import List, Union, Dict, Iterable
import pandas as pd
from requests import Response
from TM1py.Exceptions.Exceptions import TM1pyException
from TM1py.Objects.Process import Process
from TM1py.Services.ObjectService import ObjectService
from TM1py.Services.RestService import RestService
from TM1py.Services.ViewService import ViewService
from TM1py.Utils import Utils, CaseAndSpaceInsensitiveSet, format_url
from TM1py.Utils.Utils import build_pandas_dataframe_from_cellset, dimension_name_from_element_unique_name, \
CaseAndSpaceInsensitiveTuplesDict, abbreviate_mdx
# Overrides the deletion of cellset
DELETE_CELLSET = False
def tidy_cellset(func):
""" Higher Order Function to tidy up cellset after usage
"""
@functools.wraps(func)
def wrapper(self, cellset_id, *args, **kwargs):
try:
return func(self, cellset_id, *args, **kwargs)
finally:
if kwargs.get("delete_cellset", True) and DELETE_CELLSET:
self.delete_cellset(cellset_id=cellset_id)
return wrapper
class CellService(ObjectService):
""" Service to handle Read and Write operations to TM1 cubes
"""
def __init__(self, tm1_rest: RestService):
"""
:param tm1_rest: instance of RestService
"""
super().__init__(tm1_rest)
def get_value(self, cube_name: str, element_string: str, dimensions: List[str] = None,
**kwargs) -> Union[str, float]:
""" Element_String describes the Dimension-Hierarchy-Element arrangement
:param cube_name: Name of the cube
:param element_string: "Hierarchy1::Element1 && Hierarchy2::Element4, Element9, Element2"
- Dimensions are not specified! They are derived from the position.
- The , seperates the element-selections
- If more than one hierarchy is selected per dimension && splits the elementselections
- If no Hierarchy is specified. Default Hierarchy will be addressed
:param dimensions: List of dimension names in correct order
:return:
"""
mdx_template = "SELECT {} ON ROWS, {} ON COLUMNS FROM [{}]"
mdx_rows_list = []
from TM1py.Services.CubeService import CubeService
if not dimensions:
dimensions = CubeService(self._rest).get(cube_name).dimensions
element_selections = element_string.split(',')
# Build the ON ROWS statement:
# Loop through the comma seperated element selection, except for the last one
for dimension_name, element_selection in zip(dimensions[:-1], element_selections[:-1]):
if "&&" not in element_selection:
mdx_rows_list.append("{[" + dimension_name + "].[" + dimension_name + "].[" + element_selection + "]}")
else:
for element_selection_part in element_selection.split('&&'):
hierarchy_name, element_name = element_selection_part.split('::')
mdx_rows_list.append("{[" + dimension_name + "].[" + hierarchy_name + "].[" + element_name + "]}")
mdx_rows = "*".join(mdx_rows_list)
# Build the ON COLUMNS statement from last dimension
mdx_columns = ""
if "&&" not in element_selections[-1]:
mdx_columns = "{[" + dimensions[-1] + "].[" + dimensions[-1] + "].[" + element_selections[-1] + "]}"
else:
mdx_columns_list = []
for element_selection_part in element_selections[-1].split('&&'):
hierarchy_name, element_name = element_selection_part.split('::')
mdx_columns_list.append("{[" + dimensions[-1] + "].[" + hierarchy_name + "].[" + element_name + "]}")
mdx_columns = "*".join(mdx_columns_list)
# Construct final MDX
mdx = mdx_template.format(mdx_rows, mdx_columns, cube_name)
# Execute MDX
cellset = dict(self.execute_mdx(mdx, **kwargs))
return next(iter(cellset.values()))["Value"]
def relative_proportional_spread(
self,
value: float,
cube: str,
unique_element_names: Iterable[str],
reference_unique_element_names: Iterable[str],
reference_cube: str = None,
**kwargs) -> Response:
""" Execute relative proportional spread
:param value: value to be spread
:param cube: name of the cube
:param unique_element_names: target cell coordinates as unique element names (e.g. ["[d1].[c1]","[d2].[e3]"])
:param reference_cube: name of the reference cube. Can be None
:param reference_unique_element_names: reference cell coordinates as unique element names
:return:
"""
mdx = """
SELECT
{{ {rows} }} ON 0
FROM [{cube}]
""".format(rows="}*{".join(unique_element_names), cube=cube)
cellset_id = self.create_cellset(mdx=mdx, **kwargs)
payload = {
"BeginOrdinal": 0,
"Value": "RP" + str(value),
"<EMAIL>": list(),
"<EMAIL>":
format_url("Cubes('{}')", reference_cube if reference_cube else cube)}
for unique_element_name in reference_unique_element_names:
payload["<EMAIL>"].append(
format_url(
"Dimensions('{}')/Hierarchies('{}')/Elements('{}')",
*Utils.dimension_hierarchy_element_tuple_from_unique_name(unique_element_name)))
return self._post_against_cellset(cellset_id=cellset_id, payload=payload, delete_cellset=True, **kwargs)
def clear_spread(
self,
cube: str,
unique_element_names: Iterable[str],
**kwargs) -> Response:
""" Execute clear spread
:param cube: name of the cube
:param unique_element_names: target cell coordinates as unique element names (e.g. ["[d1].[c1]","[d2].[e3]"])
:return:
"""
mdx = """
SELECT
{{ {rows} }} ON 0
FROM [{cube}]
""".format(rows="}*{".join(unique_element_names), cube=cube)
cellset_id = self.create_cellset(mdx=mdx, **kwargs)
payload = {
"BeginOrdinal": 0,
"Value": "C",
"<EMAIL>": list()}
for unique_element_name in unique_element_names:
payload["<EMAIL>"].append(
format_url(
"Dimensions('{}')/Hierarchies('{}')/Elements('{}')",
*Utils.dimension_hierarchy_element_tuple_from_unique_name(unique_element_name)))
return self._post_against_cellset(cellset_id=cellset_id, payload=payload, delete_cellset=True, **kwargs)
def clear_with_mdx(self, cube: str, mdx: str, **kwargs):
view_name = "".join(['}TM1py', str(uuid.uuid4())])
code = "".join([
f"ViewCreateByMdx('{cube}','{view_name}','{mdx}',1);",
f"ViewZeroOut('{cube}','{view_name}');"])
process = Process(name="")
process.prolog_procedure = code
from TM1py import ProcessService
process_service = ProcessService(self._rest)
try:
success, _, _ = process_service.execute_process_with_return(process, **kwargs)
if not success:
raise TM1pyException(f"Failed to clear cube: '{cube}' with mdx: '{abbreviate_mdx(mdx, 100)}'")
finally:
view_service = ViewService(self._rest)
if view_service.exists(cube, view_name, private=False):
view_service.delete(cube, view_name, private=False)
@tidy_cellset
def _post_against_cellset(self, cellset_id: str, payload: Dict, **kwargs) -> Response:
""" Execute a post request against a cellset
:param cellset_id:
:param payload:
:param kwargs:
:return:
"""
url = format_url("/api/v1/Cellsets('{}')/tm1.Update", cellset_id)
return self._rest.POST(url=url, data=json.dumps(payload), **kwargs)
def get_dimension_names_for_writing(self, cube_name: str, **kwargs) -> List[str]:
""" Get dimensions of a cube. Skip sandbox dimension
:param cube_name:
:param kwargs:
:return:
"""
from TM1py.Services import CubeService
cube_service = CubeService(self._rest)
dimensions = cube_service.get_dimension_names(cube_name, True, **kwargs)
return dimensions
def write_value(self, value: Union[str, float], cube_name: str, element_tuple: Iterable,
dimensions: Iterable[str] = None, **kwargs) -> Response:
""" Write value into cube at specified coordinates
:param value: the actual value
:param cube_name: name of the target cube
:param element_tuple: target coordinates
:param dimensions: optional. Dimension names in their natural order. Will speed up the execution!
:return: response
"""
if not dimensions:
dimensions = self.get_dimension_names_for_writing(cube_name=cube_name)
url = format_url("/api/v1/Cubes('{}')/tm1.Update", cube_name)
body_as_dict = OrderedDict()
body_as_dict["Cells"] = [{}]
body_as_dict["Cells"][0]["<EMAIL>"] = [
format_url("Dimensions('{}')/Hierarchies('{}')/Elements('{}')", dim, dim, elem)
for dim, elem
in zip(dimensions, element_tuple)]
body_as_dict["Value"] = str(value) if value else ""
data = json.dumps(body_as_dict, ensure_ascii=False)
return self._rest.POST(url=url, data=data, **kwargs)
def write_values(self, cube_name: str, cellset_as_dict: Dict, dimensions: Iterable[str] = None,
**kwargs) -> Response:
""" Write values in cube.
For cellsets with > 1000 cells look into "write_values_through_cellset"
:param cube_name: name of the cube
:param cellset_as_dict: {(elem_a, elem_b, elem_c): 243, (elem_d, elem_e, elem_f) : 109}
:param dimensions: optional. Dimension names in their natural order. Will speed up the execution!
:return: Response
"""
if not dimensions:
dimensions = self.get_dimension_names_for_writing(cube_name=cube_name)
url = format_url("/api/v1/Cubes('{}')/tm1.Update", cube_name)
updates = []
for element_tuple, value in cellset_as_dict.items():
body_as_dict = OrderedDict()
body_as_dict["Cells"] = [{}]
body_as_dict["Cells"][0]["<EMAIL>"] = [
format_url(
"Dimensions('{}')/Hierarchies('{}')/Elements('{}')",
dim, dim, elem)
for dim, elem
in zip(dimensions, element_tuple)]
body_as_dict["Value"] = str(value) if value else ""
updates.append(json.dumps(body_as_dict, ensure_ascii=False))
updates = '[' + ','.join(updates) + ']'
return self._rest.POST(url=url, data=updates, **kwargs)
def write_values_through_cellset(self, mdx: str, values: List, **kwargs) -> Response:
""" Significantly faster than write_values function
Cellset gets created according to MDX Expression. For instance:
[[61, 29 ,13],
[42, 54, 15],
[17, 28, 81]]
Each value in the cellset can be addressed through its position: The ordinal integer value.
Ordinal-enumeration goes from top to bottom from left to right
Number 61 has Ordinal 0, 29 has Ordinal 1, etc.
The order of the iterable determines the insertion point in the cellset.
For instance:
[91, 85, 72, 68, 51, 42, 35, 28, 11]
would lead to:
[[91, 85 ,72],
[68, 51, 42],
[35, 28, 11]]
When writing large datasets into TM1 Cubes it can be convenient to call this function asynchronously.
:param mdx: Valid MDX Expression.
:param values: List of values. The Order of the List/ Iterable determines the insertion point in the cellset.
:return:
"""
cellset_id = self.create_cellset(mdx, **kwargs)
return self.update_cellset(cellset_id=cellset_id, values=values, **kwargs)
@tidy_cellset
def update_cellset(self, cellset_id: str, values: List, **kwargs) -> Response:
""" Write values into cellset
Number of values must match the number of cells in the cellset
:param cellset_id:
:param values: iterable with Numeric and String values
:return:
"""
request = format_url("/api/v1/Cellsets('{}')/Cells", cellset_id)
data = []
for o, value in enumerate(values):
data.append({
"Ordinal": o,
"Value": value
})
return self._rest.PATCH(request, json.dumps(data, ensure_ascii=False), **kwargs)
def execute_mdx(self, mdx: str, cell_properties: List[str] = None, top: int = None, skip_contexts: bool = False,
skip: int = None, **kwargs) -> CaseAndSpaceInsensitiveTuplesDict:
""" Execute MDX and return the cells with their properties
:param mdx: MDX Query, as string
:param cell_properties: properties to be queried from the cell. E.g. Value, Ordinal, RuleDerived, ...
:param top: integer
:param skip: integer
:param skip_contexts: skip elements from titles / contexts in response
:return: content in sweet concise structure.
"""
cellset_id = self.create_cellset(mdx=mdx, **kwargs)
return self.extract_cellset(
cellset_id=cellset_id,
cell_properties=cell_properties,
top=top,
skip=skip,
skip_contexts=skip_contexts,
delete_cellset=True,
**kwargs)
def execute_view(self, cube_name: str, view_name: str, cell_properties: Iterable[str] = None, private: bool = False,
top: int = None, skip_contexts: bool = False, skip: int = None,
**kwargs) -> CaseAndSpaceInsensitiveTuplesDict:
""" get view content as dictionary with sweet and concise structure.
Works on NativeView and MDXView !
:param cube_name: String
:param view_name: String
:param cell_properties: List, cell properties: [Values, Status, HasPicklist, etc.]
:param private: Boolean
:param top: Int, number of cells to return (counting from top)
:param skip: Int, number of cells to skip (counting from top)
:param skip_contexts: skip elements from titles / contexts in response
:return: Dictionary : {([dim1].[elem1], [dim2][elem6]): {'Value':3127.312, 'Ordinal':12} .... }
"""
cellset_id = self.create_cellset_from_view(cube_name=cube_name, view_name=view_name, private=private, **kwargs)
return self.extract_cellset(
cellset_id=cellset_id,
cell_properties=cell_properties,
top=top,
skip=skip,
skip_contexts=skip_contexts,
delete_cellset=True,
**kwargs)
def execute_mdx_raw(
self,
mdx: str,
cell_properties: Iterable[str] = None,
elem_properties: Iterable[str] = None,
member_properties: Iterable[str] = None,
top: int = None,
skip_contexts: bool = False,
skip: int = None,
**kwargs) -> Dict:
""" Execute MDX and return the raw data from TM1
:param mdx: String, a valid MDX Query
:param cell_properties: List of properties to be queried from the cell. E.g. ['Value', 'RuleDerived', ...]
:param elem_properties: List of properties to be queried from the elements. E.g. ['Name','Attributes', ...]
:param member_properties: List of properties to be queried from the members. E.g. ['Name','Attributes', ...]
:param top: Integer limiting the number of cells and the number or rows returned
:param skip: Integer limiting the number of cells and the number or rows returned
:param skip_contexts: skip elements from titles / contexts in response
:return: Raw format from TM1.
"""
cellset_id = self.create_cellset(mdx=mdx, **kwargs)
return self.extract_cellset_raw(
cellset_id=cellset_id,
cell_properties=cell_properties,
elem_properties=elem_properties,
member_properties=member_properties,
top=top,
skip=skip,
delete_cellset=True,
skip_contexts=skip_contexts,
**kwargs)
def execute_view_raw(
self,
cube_name: str,
view_name: str,
private: bool = False,
cell_properties: Iterable[str] = None,
elem_properties: Iterable[str] = None,
member_properties: Iterable[str] = None,
top: int = None,
skip_contexts: bool = False,
skip: int = None,
**kwargs) -> Dict:
""" Execute a cube view and return the raw data from TM1
:param cube_name: String, name of the cube
:param view_name: String, name of the view
:param private: True (private) or False (public)
:param cell_properties: List of properties to be queried from the cell. E.g. ['Value', 'RuleDerived', ...]
:param elem_properties: List of properties to be queried from the elements. E.g. ['Name','Attributes', ...]
:param member_properties: List of properties to be queried from the members. E.g. ['Name','Attributes', ...]
:param top: Integer limiting the number of cells and the number or rows returned
:param skip_contexts: skip elements from titles / contexts in response
:param skip: Integer limiting the number of cells and the number or rows returned
:return: Raw format from TM1.
"""
cellset_id = self.create_cellset_from_view(cube_name=cube_name, view_name=view_name, private=private, **kwargs)
return self.extract_cellset_raw(
cellset_id=cellset_id,
cell_properties=cell_properties,
elem_properties=elem_properties,
member_properties=member_properties,
top=top,
skip=skip,
skip_contexts=skip_contexts,
delete_cellset=True,
**kwargs)
def execute_mdx_values(self, mdx: str, **kwargs):
""" Optimized for performance. Query only raw cell values.
Coordinates are omitted !
:param mdx: a valid MDX Query
:return: Generator of cell values
"""
cellset_id = self.create_cellset(mdx=mdx, **kwargs)
return self.extract_cellset_values(cellset_id, delete_cellset=True, **kwargs)
def execute_view_values(self, cube_name: str, view_name: str, private: bool = False, **kwargs):
""" Execute view and retrieve only the cell values
:param cube_name:
:param view_name:
:param private:
:param kwargs:
:return:
"""
cellset_id = self.create_cellset_from_view(cube_name=cube_name, view_name=view_name, private=private, **kwargs)
return self.extract_cellset_values(cellset_id, delete_cellset=True, **kwargs)
def execute_mdx_rows_and_values(self, mdx: str, element_unique_names: bool = True,
**kwargs) -> CaseAndSpaceInsensitiveTuplesDict:
""" Execute MDX and retrieve row element names and values in a case and space insensitive dictionary
:param mdx:
:param element_unique_names:
:param kwargs:
:return:
"""
cellset_id = self.create_cellset(mdx=mdx, **kwargs)
return self.extract_cellset_rows_and_values(cellset_id, element_unique_names, delete_cellset=True, **kwargs)
def execute_view_rows_and_values(self, cube_name: str, view_name: str, private: bool = False,
element_unique_names: bool = True, **kwargs) -> CaseAndSpaceInsensitiveTuplesDict:
""" Execute cube view and retrieve row element names and values in a case and space insensitive dictionary
:param cube_name:
:param view_name:
:param private:
:param element_unique_names:
:param kwargs:
:return:
"""
cellset_id = self.create_cellset_from_view(cube_name=cube_name, view_name=view_name, private=private, **kwargs)
return self.extract_cellset_rows_and_values(cellset_id, element_unique_names, delete_cellset=True, **kwargs)
def execute_mdx_csv(self, mdx: str, **kwargs) -> str:
""" Optimized for performance. Get csv string of coordinates and values.
Context dimensions are omitted !
Cells with Zero/null are omitted !
:param mdx: Valid MDX Query
:return: String
"""
cellset_id = self.create_cellset(mdx, **kwargs)
return self.extract_cellset_csv(cellset_id=cellset_id, delete_cellset=True, **kwargs)
def execute_view_csv(self, cube_name: str, view_name: str, private: bool = False, **kwargs) -> str:
cellset_id = self.create_cellset_from_view(cube_name=cube_name, view_name=view_name, private=private)
return self.extract_cellset_csv(cellset_id=cellset_id, delete_cellset=True, **kwargs)
def execute_mdx_dataframe(self, mdx: str, **kwargs) -> pd.DataFrame:
""" Optimized for performance. Get Pandas DataFrame from MDX Query.
Context dimensions are omitted in the resulting Dataframe !
Cells with Zero/null are omitted !
Takes all arguments from the pandas.read_csv method:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
:param mdx: Valid MDX Query
:return: Pandas Dataframe
"""
cellset_id = self.create_cellset(mdx, **kwargs)
return self.extract_cellset_dataframe(cellset_id, **kwargs)
def execute_view_dataframe_pivot(self, cube_name: str, view_name: str, private: bool = False, dropna: bool = False,
fill_value: bool = None, **kwargs) -> pd.DataFrame:
""" Execute a cube view to get a pandas pivot dataframe, in the shape of the cube view
:param cube_name:
:param view_name:
:param private:
:param dropna:
:param fill_value:
:return:
"""
cellset_id = self.create_cellset_from_view(cube_name=cube_name, view_name=view_name, private=private, **kwargs)
return self.extract_cellset_dataframe_pivot(
cellset_id=cellset_id,
dropna=dropna,
fill_value=fill_value,
**kwargs)
def execute_mdx_dataframe_pivot(self, mdx: str, dropna: bool = False, fill_value: bool = None) -> pd.DataFrame:
""" Execute MDX Query to get a pandas pivot data frame in the shape as specified in the Query
:param mdx:
:param dropna:
:param fill_value:
:return:
"""
cellset_id = self.create_cellset(mdx=mdx)
return self.extract_cellset_dataframe_pivot(
cellset_id=cellset_id,
dropna=dropna,
fill_value=fill_value)
def execute_view_dataframe(self, cube_name: str, view_name: str, private: bool = False, **kwargs) -> pd.DataFrame:
""" Optimized for performance. Get Pandas DataFrame from an existing Cube View
Context dimensions are omitted in the resulting Dataframe !
Cells with Zero/null are omitted !
Takes all arguments from the pandas.read_csv method:
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html
:param cube_name: Name of the
:param view_name:
:param private:
:return: Pandas Dataframe
"""
cellset_id = self.create_cellset_from_view(cube_name=cube_name, view_name=view_name, private=private, **kwargs)
return self.extract_cellset_dataframe(cellset_id, **kwargs)
def execute_mdx_cellcount(self, mdx: str, **kwargs) -> int:
""" Execute MDX in order to understand how many cells are in a cellset.
Only return number of cells in the cellset. FAST!
:param mdx: MDX Query, as string
:return: Number of Cells in the CellSet
"""
cellset_id = self.create_cellset(mdx, **kwargs)
return self.extract_cellset_cellcount(cellset_id, delete_cellset=True, **kwargs)
def execute_view_cellcount(self, cube_name: str, view_name: str, private: bool = False, **kwargs) -> int:
""" Execute cube view in order to understand how many cells are in a cellset.
Only return number of cells in the cellset. FAST!
:param cube_name: cube name
:param view_name: view name
:param private: True (private) or False (public)
:return:
"""
cellset_id = self.create_cellset_from_view(cube_name=cube_name, view_name=view_name, private=private, **kwargs)
return self.extract_cellset_cellcount(cellset_id, delete_cellset=True, **kwargs)
def execute_mdx_rows_and_values_string_set(
self,
mdx: str,
exclude_empty_cells: bool = True,
**kwargs) -> CaseAndSpaceInsensitiveSet:
""" Retrieve row element names and **string** cell values in a case and space insensitive set
:param exclude_empty_cells:
:param mdx:
:return:
"""
rows_and_values = self.execute_mdx_rows_and_values(mdx, element_unique_names=False, **kwargs)
return self._extract_string_set_from_rows_and_values(rows_and_values, exclude_empty_cells)
def execute_view_rows_and_values_string_set(self, cube_name: str, view_name: str, private: bool = False,
exclude_empty_cells: bool = True,
**kwargs) -> CaseAndSpaceInsensitiveSet:
""" Retrieve row element names and **string** cell values in a case and space insensitive set
:param cube_name:
:param view_name:
:param private:
:param exclude_empty_cells:
:return:
"""
rows_and_values = self.execute_view_rows_and_values(cube_name, view_name, private, False, **kwargs)
return self._extract_string_set_from_rows_and_values(rows_and_values, exclude_empty_cells)
def execute_mdx_ui_dygraph(
self,
mdx: str,
elem_properties: Iterable[str] = None,
member_properties: Iterable[str] = None,
value_precision: Iterable[str] = 2,
top: int = None,
skip: int = None,
**kwargs) -> Dict:
""" Execute MDX get dygraph dictionary
Useful for grids or charting libraries that want an array of cell values per column
Returns 3-dimensional cell structure for tabbed grids or multiple charts
Example 'cells' return format:
'cells': {
'10100': [
['Q1-2004', 28981046.50724231, 19832724.72429739],
['Q2-2004', 29512482.207418434, 20365654.788303416],
['Q3-2004', 29913730.038971487, 20729201.329183243],
['Q4-2004', 29563345.9542385, 20480205.20121749]],
'10200': [
['Q1-2004', 13888143.710000003, 9853293.623709997],
['Q2-2004', 14300216.43, 10277650.763958748],
['Q3-2004', 14502421.63, 10466934.096533755],
['Q4-2004', 14321501.940000001, 10333095.839474997]]
},
:param top:
:param skip:
:param mdx: String, valid MDX Query
:param elem_properties: List of properties to be queried from the elements. E.g. ['UniqueName','Attributes']
:param member_properties: List of properties to be queried from the members. E.g. ['UniqueName','Attributes']
:param value_precision: Integer (optional) specifying number of decimal places to return
:return: dict: { titles: [], headers: [axis][], cells: { Page0: [ [column name, column values], [], ... ], ...}}
"""
cellset_id = self.create_cellset(mdx)
data = self.extract_cellset_raw(cellset_id=cellset_id,
cell_properties=["Value"],
elem_properties=elem_properties,
member_properties=list(set(member_properties or []) | {"Name"}),
top=top,
skip=skip,
delete_cellset=True,
**kwargs)
return Utils.build_ui_dygraph_arrays_from_cellset(raw_cellset_as_dict=data, value_precision=value_precision)
def execute_view_ui_dygraph(
self,
cube_name: str,
view_name: str,
private: bool = False,
elem_properties: Iterable[str] = None,
member_properties: Iterable[str] = None,
value_precision: int = 2,
top: int = None,
skip: int = None,
**kwargs):
"""
Useful for grids or charting libraries that want an array of cell values per row.
Returns 3-dimensional cell structure for tabbed grids or multiple charts.
Rows and pages are dicts, addressable by their name. Proper order of rows can be obtained in headers[1]
Example 'cells' return format:
'cells': {
'10100': {
'Net Operating Income': [ 19832724.72429739,
20365654.788303416,
20729201.329183243,
20480205.20121749],
'Revenue': [ 28981046.50724231,
29512482.207418434,
29913730.038971487,
29563345.9542385]},
'10200': {
'Net Operating Income': [ 9853293.623709997,
10277650.763958748,
10466934.096533755,
10333095.839474997],
'Revenue': [ 13888143.710000003,
14300216.43,
14502421.63,
14321501.940000001]}
},
:param top:
:param skip:
:param cube_name: cube name
:param view_name: view name
:param private: True (private) or False (public)
:param elem_properties: List of properties to be queried from the elements. E.g. ['UniqueName','Attributes']
:param member_properties: List of properties to be queried from the members. E.g. ['UniqueName','Attributes']
:param value_precision: Integer (optional) specifying number of decimal places to return
:return:
"""
cellset_id = self.create_cellset_from_view(cube_name=cube_name, view_name=view_name, private=private, **kwargs)
data = self.extract_cellset_raw(cellset_id=cellset_id,
cell_properties=["Value"],
elem_properties=elem_properties,
member_properties=list(set(member_properties or []) | {"Name"}),
top=top,
skip=skip,
delete_cellset=True,
**kwargs)
return Utils.build_ui_dygraph_arrays_from_cellset(raw_cellset_as_dict=data, value_precision=value_precision)
def execute_mdx_ui_array(
self,
mdx: str,
elem_properties: Iterable[str] = None,
member_properties: Iterable[str] = None,
value_precision: int = 2,
top: int = None,
skip:int = None,
**kwargs):
"""
Useful for grids or charting libraries that want an array of cell values per row.
Returns 3-dimensional cell structure for tabbed grids or multiple charts.
Rows and pages are dicts, addressable by their name. Proper order of rows can be obtained in headers[1]
Example 'cells' return format:
'cells': {
'10100': {
'Net Operating Income': [ 19832724.72429739,
20365654.788303416,
20729201.329183243,
20480205.20121749],
'Revenue': [ 28981046.50724231,
29512482.207418434,
29913730.038971487,
29563345.9542385]},
'10200': {
'Net Operating Income': [ 9853293.623709997,
10277650.763958748,
10466934.096533755,
10333095.839474997],
'Revenue': [ 13888143.710000003,
14300216.43,
14502421.63,
14321501.940000001]}
},
:param top:
:param skip:
:param mdx: a valid MDX Query
:param elem_properties: List of properties to be queried from the elements. E.g. ['UniqueName','Attributes']
:param member_properties: List of properties to be queried from the members. E.g. ['UniqueName','Attributes']
:param value_precision: Integer (optional) specifying number of decimal places to return
:return: dict :{ titles: [], headers: [axis][], cells:{ Page0:{ Row0:{ [row values], Row1: [], ...}, ...}, ...}}
"""
cellset_id = self.create_cellset(mdx, **kwargs)
data = self.extract_cellset_raw(cellset_id=cellset_id,
cell_properties=["Value"],
elem_properties=elem_properties,
member_properties=list(set(member_properties or []) | {"Name"}),
top=top,
skip=skip,
delete_cellset=True,
**kwargs)
return Utils.build_ui_arrays_from_cellset(raw_cellset_as_dict=data, value_precision=value_precision)
def execute_view_ui_array(
self,
cube_name: str,
view_name: str,
private: bool = False,
elem_properties: Iterable[str] = None,
member_properties: Iterable[str] = None,
value_precision: int = 2,
top: int = None,
skip: int = None,
**kwargs):
"""
Useful for grids or charting libraries that want an array of cell values per row.
Returns 3-dimensional cell structure for tabbed grids or multiple charts.
Rows and pages are dicts, addressable by their name. Proper order of rows can be obtained in headers[1]
Example 'cells' return format:
'cells': {
'10100': {
'Net Operating Income': [ 19832724.72429739,
20365654.788303416,
20729201.329183243,
20480205.20121749],
'Revenue': [ 28981046.50724231,
29512482.207418434,
29913730.038971487,
29563345.9542385]},
'10200': {
'Net Operating Income': [ 9853293.623709997,
10277650.763958748,
10466934.096533755,
10333095.839474997],
'Revenue': [ 13888143.710000003,
14300216.43,
14502421.63,
14321501.940000001]}
},
:param top:
:param skip:
:param cube_name: cube name
:param view_name: view name
:param private: True (private) or False (public)
:param elem_properties: List of properties to be queried from the elements. E.g. ['UniqueName','Attributes']
:param member_properties: List properties to be queried from the member. E.g. ['Name', 'UniqueName']
:param value_precision: Integer (optional) specifying number of decimal places to return
:return: dict :{ titles: [], headers: [axis][], cells:{ Page0:{ Row0: {[row values], Row1: [], ...}, ...}, ...}}
"""
cellset_id = self.create_cellset_from_view(cube_name=cube_name, view_name=view_name, private=private, **kwargs)
data = self.extract_cellset_raw(cellset_id=cellset_id,
cell_properties=["Value"],
elem_properties=elem_properties,
member_properties=list(set(member_properties or []) | {"Name"}),
top=top,
skip=skip,
delete_cellset=True,
**kwargs)
return Utils.build_ui_arrays_from_cellset(raw_cellset_as_dict=data, value_precision=value_precision)
@tidy_cellset
def extract_cellset_raw(
self,
cellset_id: str,
cell_properties: Iterable[str] = None,
elem_properties: Iterable[str] = None,
member_properties: Iterable[str] = None,
top: int = None,
skip: int = None,
skip_contexts: bool = False,
**kwargs) -> Dict:
""" Extract full cellset data and return the raw data from TM1
:param cellset_id: String; ID of existing cellset
:param cell_properties: List of properties to be queried from cells. E.g. ['Value', 'RuleDerived', ...]
:param elem_properties: List of properties to be queried from elements. E.g. ['UniqueName','Attributes', ...]
:param member_properties: List properties to be queried from the member. E.g. ['Name', 'UniqueName']
:param top: Integer limiting the number of cells and the number or rows returned
:param skip: Integer limiting the number of cells and the number or rows returned
:param skip_contexts:
:return: Raw format from TM1.
"""
if not cell_properties:
cell_properties = ['Value']
# select Name property if member_properties is None or empty.
# Necessary, as tm1 default behaviour is to return all properties if no $select is specified in the request.
if member_properties is None or len(list(member_properties)) == 0:
member_properties = ["Name"]
select_member_properties = "$select={}".format(",".join(member_properties))
expand_elem_properties = ";$expand=Element($select={elem_properties})".format(
elem_properties=",".join(elem_properties)) \
if elem_properties is not None and len(list(elem_properties)) > 0 \
else ""
filter_axis = "$filter=Ordinal ne 2;" if skip_contexts else ""
url = "/api/v1/Cellsets('{cellset_id}')?$expand=" \
"Cube($select=Name;$expand=Dimensions($select=Name))," \
"Axes({filter_axis}$expand=Tuples($expand=Members({select_member_properties}" \
"{expand_elem_properties}){top_rows}))," \
"Cells($select={cell_properties}{top_cells}{skip_cells})" \
.format(cellset_id=cellset_id,
top_rows=f";$top={top}" if top and not skip else "",
cell_properties=",".join(cell_properties),
filter_axis=filter_axis,
select_member_properties=select_member_properties,
expand_elem_properties=expand_elem_properties,
top_cells=f";$top={top}" if top else "",
skip_cells=f";$skip={skip}" if skip else "")
response = self._rest.GET(url=url, **kwargs)
return response.json()
@tidy_cellset
def extract_cellset_values(self, cellset_id: str, **kwargs):
""" Extract cellset data and return only the cells and values
:param cellset_id: String; ID of existing cellset
:return: Raw format from TM1.
"""
url = format_url("/api/v1/Cellsets('{}')?$expand=Cells($select=Value)", cellset_id)
response = self._rest.GET(url=url, **kwargs)
return (cell["Value"] for cell in response.json()["Cells"])
@tidy_cellset
def extract_cellset_rows_and_values(self, cellset_id: str, element_unique_names: bool = True,
**kwargs) -> CaseAndSpaceInsensitiveTuplesDict:
""" Retrieve row element names and values in a case and space insensitive dictionary
:param cellset_id:
:param element_unique_names:
:param kwargs:
:return:
"""
url = "/api/v1/Cellsets('{}')?$expand=" \
"Axes($filter=Ordinal eq 1;$expand=Tuples(" \
"$expand=Members($select=Element;$expand=Element($select={}))))," \
"Cells($select=Value)".format(cellset_id, "UniqueName" if element_unique_names else "Name")
response = self._rest.GET(url=url, **kwargs)
response_json = response.json()
rows = response_json["Axes"][0]["Tuples"]
cell_values = [cell["Value"] for cell in response_json["Cells"]]
result = CaseAndSpaceInsensitiveTuplesDict()
number_rows = len(rows)
# avoid division by zero
if not number_rows:
return result
number_cells = len(cell_values)
number_columns = int(number_cells / number_rows)
cell_values_by_row = [cell_values[cell_counter:cell_counter + number_columns]
for cell_counter
in range(0, number_cells, number_columns)]
element_names_by_row = [tuple(member["Element"]["UniqueName" if element_unique_names else "Name"]
for member
in tupl["Members"])
for tupl
in rows]
for element_tuple, cells in zip(element_names_by_row, cell_values_by_row):
result[element_tuple] = cells
return result
@tidy_cellset
def extract_cellset_composition(self, cellset_id: str, **kwargs):
""" Retrieve composition of dimensions on the axes in the cellset
:param cellset_id:
:param kwargs:
:return:
"""
url = "/api/v1/Cellsets('{}')?$expand=" \
"Cube($select=Name)," \
"Axes($expand=Hierarchies($select=UniqueName))".format(cellset_id)
response = self._rest.GET(url=url, **kwargs)
response_json = response.json()
cube = response_json["Cube"]["Name"]
rows, titles, columns = [], [], []
if response_json["Axes"][0]["Hierarchies"]:
columns = [hierarchy["UniqueName"] for hierarchy in response_json["Axes"][0]["Hierarchies"]]
if response_json["Axes"][1]["Hierarchies"]:
rows = [hierarchy["UniqueName"] for hierarchy in response_json["Axes"][1]["Hierarchies"]]
if len(response_json["Axes"]) > 2:
titles = [hierarchy["UniqueName"] for hierarchy in response_json["Axes"][2]["Hierarchies"]]
return cube, titles, rows, columns
@tidy_cellset
def extract_cellset_cellcount(self, cellset_id: str, **kwargs) -> int:
""" Retrieve number of cells in the cellset
:param cellset_id:
:param kwargs:
:return:
"""
url = "/api/v1/Cellsets('{}')/Cells/$count".format(cellset_id)
response = self._rest.GET(url, **kwargs)
return int(response.content)
@tidy_cellset
def extract_cellset_csv(self, cellset_id: str, **kwargs) -> str:
""" Execute cellset and return only the 'Content', in csv format
:param cellset_id: String; ID of existing cellset
:return: Raw format from TM1.
"""
url = "/api/v1/Cellsets('{}')/Content".format(cellset_id)
data = self._rest.GET(url, **kwargs)
return data.text
def extract_cellset_dataframe(self, cellset_id: str, **kwargs) -> pd.DataFrame:
""" Build pandas data frame from cellset_id
:param cellset_id:
:param kwargs:
:return:
"""
raw_csv = self.extract_cellset_csv(cellset_id=cellset_id, delete_cellset=True, **kwargs)
if not raw_csv:
return pd.DataFrame()
memory_file = StringIO(raw_csv)
# make sure all element names are strings and values column is derived from data
if 'dtype' not in kwargs:
kwargs['dtype'] = {'Value': None, **{col: str for col in range(999)}}
return pd.read_csv(memory_file, sep=',', **kwargs)
@tidy_cellset
def extract_cellset_power_bi(self, cellset_id: str, **kwargs) -> pd.DataFrame:
url = "/api/v1/Cellsets('{}')?$expand=" \
"Axes($filter=Ordinal eq 0 or Ordinal eq 1;$expand=Tuples(" \
"$expand=Members($select=Name)),Hierarchies($select=Name))," \
"Cells($select=Value)".format(cellset_id)
response = self._rest.GET(url=url, **kwargs)
response_json = response.json()
rows = response_json["Axes"][1]["Tuples"]
column_headers = [tupl["Members"][0]["Name"] for tupl in response_json["Axes"][0]["Tuples"]]
row_headers = [hierarchy["Name"] for hierarchy in response_json["Axes"][1]["Hierarchies"]]
cell_values = [cell["Value"] for cell in response_json["Cells"]]
headers = row_headers + column_headers
body = []
number_rows = len(rows)
# avoid division by zero
if not number_rows:
return pd.DataFrame(body, columns=headers)
number_cells = len(cell_values)
number_columns = int(number_cells / number_rows)
element_names_by_row = [tuple(member["Name"] for member in tupl["Members"])
for tupl
in rows]
# case: skip attributes and skip parents
if not number_columns:
return pd.DataFrame(data=element_names_by_row, columns=headers)
cell_values_by_row = [cell_values[cell_counter:cell_counter + number_columns]
for cell_counter
in range(0, number_cells, number_columns)]
for element_tuple, cells in zip(element_names_by_row, cell_values_by_row):
body.append(list(element_tuple) + cells)
return pd.DataFrame(body, columns=headers, dtype=str)
def extract_cellset_dataframe_pivot(self, cellset_id: str, dropna: bool = False, fill_value: bool = False,
**kwargs) -> pd.DataFrame:
""" Extract a pivot table (pandas dataframe) from a cellset in TM1
:param cellset_id:
:param dropna:
:param fill_value:
:param kwargs:
:return:
"""
data = self.extract_cellset(
cellset_id=cellset_id,
delete_cellset=False,
**kwargs)
cube, titles, rows, columns = self.extract_cellset_composition(
cellset_id=cellset_id,
delete_cellset=True,
**kwargs)
df = build_pandas_dataframe_from_cellset(data, multiindex=False)
return pd.pivot_table(
data=df,
index=[dimension_name_from_element_unique_name(hierarchy_unique_name) for hierarchy_unique_name in rows],
columns=[dimension_name_from_element_unique_name(hierarchy_unique_name) for hierarchy_unique_name in
columns],
values=["Values"],
dropna=dropna,
fill_value=fill_value,
aggfunc='sum')
def extract_cellset(
self,
cellset_id: str,
cell_properties: Iterable[str] = None,
top: int = None,
skip: int = None,
delete_cellset: bool = True,
skip_contexts: bool = False,
**kwargs) -> CaseAndSpaceInsensitiveTuplesDict:
""" Execute cellset and return the cells with their properties
:param skip_contexts:
:param delete_cellset:
:param cellset_id:
:param cell_properties: properties to be queried from the cell. E.g. Value, Ordinal, RuleDerived, ...
:param top: integer
:param skip: integer
:return: Content in sweet consice strcuture.
"""
if not cell_properties:
cell_properties = ['Value']
if skip and 'Ordinal' not in cell_properties:
cell_properties.append('Ordinal')
raw_cellset = self.extract_cellset_raw(
cellset_id,
cell_properties=cell_properties,
elem_properties=['UniqueName'],
member_properties=['UniqueName'],
top=top,
skip=skip,
skip_contexts=skip_contexts,
delete_cellset=delete_cellset,
**kwargs)
return Utils.build_content_from_cellset(
raw_cellset_as_dict=raw_cellset,
top=top)
def create_cellset(self, mdx: str, **kwargs) -> str:
""" Execute MDX in order to create cellset at server. return the cellset-id
:param mdx: MDX Query, as string
:return:
"""
url = '/api/v1/ExecuteMDX'
data = {
'MDX': mdx
}
response = self._rest.POST(url=url, data=json.dumps(data, ensure_ascii=False), **kwargs)
cellset_id = response.json()['ID']
return cellset_id
def create_cellset_from_view(self, cube_name: str, view_name: str, private: bool, **kwargs) -> str:
""" create cellset from a cube view. return the cellset-id
:param cube_name:
:param view_name:
:param private:
:param kwargs:
:return:
"""
url = format_url("/api/v1/Cubes('{cube_name}')/{views}('{view_name}')/tm1.Execute",
cube_name=cube_name,
views='PrivateViews' if private else 'Views',
view_name=view_name)
return self._rest.POST(url=url, **kwargs).json()['ID']
def delete_cellset(self, cellset_id: str, **kwargs) -> Response:
""" Delete a cellset
:param cellset_id:
:return:
"""
url = "/api/v1/Cellsets('{}')".format(cellset_id)
return self._rest.DELETE(url, **kwargs)
def deactivate_transactionlog(self, *args: str, **kwargs) -> Response:
""" Deactivate Transactionlog for one or many cubes
:param args: one or many cube names
:return:
"""
updates = {}
for cube_name in args:
updates[(cube_name, "Logging")] = "NO"
return self.write_values(cube_name="}CubeProperties", cellset_as_dict=updates, **kwargs)
def activate_transactionlog(self, *args: str, **kwargs) -> Response:
""" Activate Transactionlog for one or many cubes
:param args: one or many cube names
:return:
"""
updates = {}
for cube_name in args:
updates[(cube_name, "Logging")] = "YES"
return self.write_values(cube_name="}CubeProperties", cellset_as_dict=updates, **kwargs)
def get_cellset_cells_count(self, mdx: str) -> int:
""" Execute MDX in order to understand how many cells are in a cellset
:param mdx: MDX Query, as string
:return: Number of Cells in the CellSet
"""
warnings.simplefilter('always', PendingDeprecationWarning)
warnings.warn(
"Function deprecated. Use execute_mdx_cellcount(self, mdx) instead.",
PendingDeprecationWarning
)
warnings.simplefilter('default', PendingDeprecationWarning)
return self.execute_mdx_cellcount(mdx)
def get_view_content(self, cube_name: str, view_name: str, cell_properties: Iterable[str] = None,
private: bool = False, top: int = None):
warnings.simplefilter('always', PendingDeprecationWarning)
warnings.warn(
"Function deprecated. Use execute_view instead.",
PendingDeprecationWarning
)
warnings.simplefilter('default', PendingDeprecationWarning)
return self.execute_view(cube_name, view_name, cell_properties, private, top)
@staticmethod
def _extract_string_set_from_rows_and_values(
rows_and_values: CaseAndSpaceInsensitiveTuplesDict,
exclude_empty_cells: bool) -> CaseAndSpaceInsensitiveSet:
""" Helper function for execute_..._string_set methods
:param rows_and_values:
:param exclude_empty_cells:
:return:
"""
result_set = CaseAndSpaceInsensitiveSet()
for row_elements, cell_values in rows_and_values.items():
for row_element in row_elements:
result_set.add(row_element)
for cell_value in cell_values:
if isinstance(cell_value, str):
if cell_value or not exclude_empty_cells:
result_set.add(cell_value)
return result_set
``` |
{
"source": "joelios/doppelpass",
"score": 3
} |
#### File: doppelpass/doppelpass/swissunihockey.py
```python
from __future__ import unicode_literals
import frappe
from frappe import throw, _
import json
import base64
import requests
from requests.auth import HTTPBasicAuth
try:
from urllib import request as http
except ImportError:
import urllib2 as http
from datetime import datetime
def execute(host):
try:
response = requests.request(method='GET', url=host)
response.encoding = 'utf-8'
json = response.json()
return json
except:
frappe.throw("Execution of http request failed. Please check host and API token.")
def get_tabelle(season, league, game_class, group):
host = "https://api-v2.swissunihockey.ch/api/rankings?season={season}&league={league}&game_class={game_class}&group={group}".format(season=season, league=league, game_class=game_class, group=group)
results = execute(host)
return results
def get_resultate(team_id, season):
host = "https://api-v2.swissunihockey.ch/api/games?mode=team&games_per_page=100&team_id={team_id}&season={season}".format(team_id=team_id, season=season)
results = execute(host)
return results
``` |
{
"source": "joelios/frappe",
"score": 2
} |
#### File: patches/v11_0/create_contact_for_user.py
```python
from __future__ import unicode_literals
import frappe
from frappe.core.doctype.user.user import create_contact
import re
def execute():
""" Create Contact for each User if not present """
frappe.reload_doc('integrations', 'doctype', 'google_contacts')
frappe.reload_doc('contacts', 'doctype', 'contact')
frappe.reload_doc('core', 'doctype', 'dynamic_link')
frappe.reload_doc('communication', 'doctype', 'call_log')
contact_meta = frappe.get_meta("Contact")
if contact_meta.has_field("phone_nos") and contact_meta.has_field("email_ids"):
frappe.reload_doc('contacts', 'doctype', 'contact_phone')
frappe.reload_doc('contacts', 'doctype', 'contact_email')
users = frappe.get_all('User', filters={"name": ('not in', 'Administrator, Guest')}, fields=["*"])
for user in users:
if user.first_name:
user.first_name = re.sub("[<>]+", '', frappe.safe_decode(user.first_name))
if user.last_name:
user.last_name = re.sub("[<>]+", '', frappe.safe_decode(user.last_name))
create_contact(user, ignore_links=True, ignore_mandatory=True)
``` |
{
"source": "joelios/TeamPlaner",
"score": 2
} |
#### File: teamplaner/config/teamplaner.py
```python
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Verein / Team / Mitglieder"),
"items": [
{
"type": "doctype",
"name": "Verein",
},
{
"type": "doctype",
"name": "Team",
"dependencies": ["Verein"]
},
{
"type": "doctype",
"name": "Mitglied",
"dependencies": ["Team"]
}
]
},
{
"label": _("Events"),
"items": [
{
"type": "doctype",
"name": "TP Event",
"dependencies": ["Team"],
"label": "Event"
}
]
}
]
``` |
{
"source": "Joelith/reloqtr_backend",
"score": 3
} |
#### File: Joelith/reloqtr_backend/server.py
```python
from flask import Flask, json, request
from flask_cors import CORS, cross_origin
from sklearn.neighbors import NearestNeighbors
from pandas import read_hdf
from random import shuffle
import pandas as pd
import random
app = Flask(__name__)
CORS(app)
model_s = read_hdf('datasets/model_set.h5')
all_attr = model_s.columns
n_subs = model_s.shape[0]
def get_burbs(user, attirbutes=all_attr, df=model_s):
'''
Takes a user feature vector and returns a ranked list of preffered suburbs
The model attributes assessed can be changed by feeding in a more limited list of attributes
'''
X = df[attirbutes[1:]].values # TODO: reduce model or reorder attributes by editing the attirbutes here
Y = df['Suburb'].values
nbrs = NearestNeighbors(n_neighbors=n_subs, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors([user])
df['score']=100
for i, s in zip(indices, distances):
df['score'].loc[i]=s
#print(df.columns)
return model_s.sort_values(by='score',ascending=False)[['Suburb']].values
@app.route("/")
def hello():
return "Hello World!"
@app.route("/match", methods=['POST'])
def match():
content = request.json
content_list = list(content.values())
print(content_list )
# Jake's test array
#from numpy import random
#test_user = random.uniform(low=0,high=1,size=len(all_attr)-1)
#print(test_user)
# compatible suburb results
cs_results = get_burbs(content_list)
# Pulls data from govhack csv to match selected suburb
csv_file_df = pd.read_csv('datasets/govhack-act.csv')
rand_idx = random.randint(0, 9)
first_picked_name = cs_results[rand_idx][0].lower().title()
print(first_picked_name)
govhack_values = (csv_file_df[csv_file_df['Suburb'] == first_picked_name])
url = "http://172.16.17.32/images"
# Array to contain graffiti url objects
graffiti_urls = []
print(govhack_values)
graffiti_url_split = govhack_values.GRAFFITI_IMG.astype(str).item().split(", ")
if(len(graffiti_url_split) > 1):
for idx, val in enumerate(govhack_values.GRAFFITI_IMG.item().split(", ")):
graffiti_urls.append({'id' : idx, 'image_url' : val})
resp_data = {
'name' : first_picked_name,
'age' : str(govhack_values.AGE.astype(int).item()),
'description' : govhack_values.DESCRIPTION.astype(str).item(),
'mode_demo' : govhack_values.MODE_DEM.astype(str).item(),
'commute_time' : govhack_values.COMMUTE.item(),
'no_arts' : govhack_values.NO_ARTS.item(),
'no_sports' : govhack_values.NO_FIT_SITES.item(),
'no_basketball' : govhack_values.NO_BB_CRTS.item(),
'no_skate_parks' : govhack_values.NO_SKATE_PARKS.item(),
'no_hospitals' : govhack_values.NO_HOSPITALS.item(),
'dog_park' : govhack_values.DOG_PARK.item(),
'no_graffiti' : govhack_values.NO_GRAFITTI.item(),
'no_bbq' : govhack_values.NO_BBQ.item(),
'no_parks' : govhack_values.NO_PARKS_AND_PLAYGROUNDS.item(),
'distance' : random.randint(5,15),
'pop' : govhack_values.TOTAL_POP.item(),
'fem_pop' : govhack_values.FEM_POP.item(),
'male_pop' : govhack_values.MALE_POP.item(),
'image_url' : "%s/%s.png" % (url, first_picked_name),
'graffiti' : graffiti_urls
}
response = app.response_class(
response=json.dumps(resp_data),
status=200,
mimetype='application/json'
)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
def main():
app.run("0.0.0.0",8000,True)
if __name__== "__main__":
main()
``` |
{
"source": "JoeLiu2015/PT",
"score": 3
} |
#### File: PT/test/test_ptctx.py
```python
import unittest
import sys
sys.path.insert(0, "../")
from ptctx import *
EXPRS_STR = '''
{{['a', 'b', 'c']|'|'.join(self)|self+'{}[]()'}}
{{'joe is a good man'|self.replace(' ', '-')}}
{% f = 'haha' %}
{{f.startswith('ha') ? ('starts with ha'|self.replace(' ', '()')) : 'not starts with ha'}}
{{'joe'.startswith('ha') ? ('starts with ha'|self.replace(' ', '()')) : ('not starts with ha' | self[1:] )}}
'''
class ptctx_test(unittest.TestCase):
def test_next_tok_custom_toks(self):
tz = Tokenizer(EXPRS_STR)
i = 0
while True:
tok = tz.next_tok(custom_toks=['{{','}}','{%', '%}'])
if tok is None:
break
if tok.is_blank_or_newline:
continue
print(i, ':', tok.text)
i += 1
self.assertEqual(i, 91)
def test_next_tok(self):
tz = Tokenizer(EXPRS_STR)
i = 0
while True:
tok = tz.next_tok()
if tok is None:
break
if tok.is_blank_or_newline:
continue
print(i, ':', tok.text)
i += 1
self.assertEqual(i, 101)
def test_next_tok_comments(self):
txt = '''
#this is a good 'dock'
#######AT########
Hello
###########
'''
tz = Tokenizer(txt)
ret = ''
i = 0
while True:
tok = tz.next_tok()
if tok is None:
break
if tok.is_blank_or_newline:
continue
print(i, ':', tok.text)
ret += tok.text
i += 1
self.assertEqual(ret, "#this is a good 'dock'#######AT######## Hello########### ")
self.assertEqual(i, 4)
def test_PT_eval_plain_text(self):
template = '''
测试中文
“string"==\'\'\'
'''
ret = PT.eval(template)
print(ret)
self.assertEqual(template, ret)
def test_PT_eval_for(self):
template = '''
a
{%for i in range(0, 5)%}
test
{%endfor%}
b
'''
expect_ret = '''
a
test
test
test
test
test
b
'''
ret = PT.eval(template)
print(ret)
self.assertEqual(expect_ret, ret)
def test_PT_eval_if(self):
template = '''
a
{%
m = 'joel'
#This is a comments
if len(m) == 4:
n = True
else:
n = False
%}
{%if n%}
{%for i in range(0,5)%}
if
{%endfor%}
{%else%}
else
{%endif%}
b
'''
expect_ret = '''
a
if
if
if
if
if
b
'''
ret = PT.eval(template)
print(ret)
self.assertEqual(expect_ret, ret)
def test_expr(self):
template = '''[
{%for i in range(1,101)%}
{{i}}{{(i % 10 != 0) ? ', ' : '\\n'-}}
{%endfor%}
{%
f = {'val2': 3}
t = {}
t['val1'] = f
%}
{{3 + t.val1.val2 * (7 - 1 - t.val1['val2'] | self*self) | self - 10}}
{{['a', 'b', 'c']|'|'.join(self)|self+'{}[]()'}}
{{'joe is a good man'|self.replace(' ', '-')}}
{% f = 'haha' %}
{{f.startswith('ha') ? ('starts with ha'|self.replace(' ', '()')) : 'not starts with ha'}}
{{'joe'.startswith('ha') ? ('starts with ha'|self.replace(' ', '()')) : ('not starts with ha' | self[1:] )}}
]'''
expected_ret = '''[
1, 2, 3, 4, 5, 6, 7, 8, 9, 10
11, 12, 13, 14, 15, 16, 17, 18, 19, 20
21, 22, 23, 24, 25, 26, 27, 28, 29, 30
31, 32, 33, 34, 35, 36, 37, 38, 39, 40
41, 42, 43, 44, 45, 46, 47, 48, 49, 50
51, 52, 53, 54, 55, 56, 57, 58, 59, 60
61, 62, 63, 64, 65, 66, 67, 68, 69, 70
71, 72, 73, 74, 75, 76, 77, 78, 79, 80
81, 82, 83, 84, 85, 86, 87, 88, 89, 90
91, 92, 93, 94, 95, 96, 97, 98, 99, 100
20
a|b|c{}[]()
joe-is-a-good-man
starts()with()ha
ot starts with ha
]'''
ret = PT.eval(template)
self.assertEqual(ret, expected_ret)
print(ret)
def test_outputfile(self):
template = '''
{% for i in range(1,101) %}
{% @output_file('./test' + str(i) + '.txt') %}
{{i}}--filename-{{'./test' + str(i) + '.txt'}}
{%endfor%}'''
ret = PT.eval(template)
for i in range(1, 101):
filename = './test' + str(i) + '.txt'
self.assertTrue(ptutil.file_exists(filename))
self.assertTrue(ptutil.file_read(filename), str(i) + '--filename-' + filename)
ptutil.file_delete(filename)
print(ret)
def test_outputfile1(self):
expect_page = '''<!DOCTYPE html>
<html>
<head>
<title>Page {{i}}</title>
</head>
<body>
<p>This is page {{i}}.</p>
</body>
</html>'''
expect_index = '''<!DOCTYPE html>
<html>
<head>
<title>Index</title>
</head>
<body>
<ul>
<li><a href=".\pages\page1.html">Page 1</a></li>
<li><a href=".\pages\page2.html">Page 2</a></li>
<li><a href=".\pages\page3.html">Page 3</a></li>
<li><a href=".\pages\page4.html">Page 4</a></li>
<li><a href=".\pages\page5.html">Page 5</a></li>
<li><a href=".\pages\page6.html">Page 6</a></li>
<li><a href=".\pages\page7.html">Page 7</a></li>
<li><a href=".\pages\page8.html">Page 8</a></li>
<li><a href=".\pages\page9.html">Page 9</a></li>
<li><a href=".\pages\page10.html">Page 10</a></li>
</ul>
</body>
</html>'''
ret = PT.eval('./templates/multiple_files.pt')
index = './templates/index.html'
self.assertTrue(ptutil.file_exists(index))
self.assertTrue(ptutil.file_read(index), expect_index)
ptutil.file_delete(index)
for i in range(1, 11):
filename = './templates/pages/page' + str(i) + '.html'
self.assertTrue(ptutil.file_exists(filename))
self.assertTrue(ptutil.file_read(filename), expect_page.replace('{{i}}', str(i)))
ptutil.file_delete(filename)
print(ret)
def test_extension(self):
template = '''
{% @extension('./pyutils.py') %}
{% for i in range(1,10) %}
{{i}}--filename-{{i|convert}}
{%endfor%}'''
ret = PT.eval(template)
expect_ret = '''
1--filename-1
2--filename-4
3--filename-9
4--filename-16
5--filename-25
6--filename-36
7--filename-49
8--filename-64
9--filename-81
'''
self.assertEqual(ret, expect_ret)
def test_extension_file(self):
ret = PT.eval('./templates/extension.pt')
expect_ret = '''--1---1--
--2---4--
--3---9--
--4---16--
--5---25--
'''
self.assertEqual(ret, expect_ret)
def test_include(self):
template = '''
{% for i in range(1,3) %}
{% @include('./templates/num_matrix.pt', {'loop': i}) %}
{%endfor%}'''
ret = PT.eval(template)
expect_ret = '''
===1===
1, 2, 3, 4, 5, 6, 7, 8, 9, 10
11, 12, 13, 14, 15, 16, 17, 18, 19, 20
21, 22, 23, 24, 25, 26, 27, 28, 29, 30
31, 32, 33, 34, 35, 36, 37, 38, 39, 40
41, 42, 43, 44, 45, 46, 47, 48, 49, 50
51, 52, 53, 54, 55, 56, 57, 58, 59, 60
61, 62, 63, 64, 65, 66, 67, 68, 69, 70
71, 72, 73, 74, 75, 76, 77, 78, 79, 80
81, 82, 83, 84, 85, 86, 87, 88, 89, 90
91, 92, 93, 94, 95, 96, 97, 98, 99, 100
a|b|c{}[]()
joe-is-a-good-man
starts()with()ha
ot starts with ha
====1 haha====
===2===
1, 2, 3, 4, 5, 6, 7, 8, 9, 10
11, 12, 13, 14, 15, 16, 17, 18, 19, 20
21, 22, 23, 24, 25, 26, 27, 28, 29, 30
31, 32, 33, 34, 35, 36, 37, 38, 39, 40
41, 42, 43, 44, 45, 46, 47, 48, 49, 50
51, 52, 53, 54, 55, 56, 57, 58, 59, 60
61, 62, 63, 64, 65, 66, 67, 68, 69, 70
71, 72, 73, 74, 75, 76, 77, 78, 79, 80
81, 82, 83, 84, 85, 86, 87, 88, 89, 90
91, 92, 93, 94, 95, 96, 97, 98, 99, 100
a|b|c{}[]()
joe-is-a-good-man
starts()with()ha
ot starts with ha
====2 haha====
'''
self.assertEqual(ret, expect_ret)
def test_include_1(self):
ret = PT.eval('./templates/students.pt', {'students': [{'name': 'joe', 'score': 88}, {'name': 'martin', 'score': 90}]})
expect_ret = '''All students as follows:
==No index==
This is the first line. Print the input parameters as follows:
a. Name : joe
b. Score: 88
End.
This is the first line. Print the input parameters as follows:
a. Name : martin
b. Score: 90
End.
==With index==
== 0 ==
This is the first line. Print the input parameters as follows:
a. Name : joe
b. Score: 88
End.
== 1 ==
This is the first line. Print the input parameters as follows:
a. Name : martin
b. Score: 90
End.
'''
self.assertEqual(ret, expect_ret)
def test_expr_ternary(self):
expr = '''{{(i % 10 != 0) ? ',': '\\r\\n' }}'''
self.assertEqual(',', PT.eval(expr, {'i': 9}))
self.assertEqual('\r\n', PT.eval(expr, {'i': 10}))
def test_expr_filter(self):
expr = '''{{t|self+val}}'''
self.assertEqual('13', PT.eval(expr, {'val': 7, 't': 6}))
def test_expr_filter_custom_func(self):
expr = '''{{t|self+val|tt}}'''
def tt(v):
return v*v
self.assertEqual('169', PT.eval(expr, {'val': 7, 't': 6, 'tt': tt}))
def test_expr_prop(self):
expr = '''{{3 + t.val1.val2 * (7 - 1 - t.val1['val2'] | self*self) | self - 10}}'''
f = {'val2': 3}
t = {'val1': f}
self.assertEqual('20', PT.eval(expr, {'t': t}))
def test_tokens(self):
s = '{{abc_def 7878.89\r\n \t \t_abc123 \n%} 9 a \t'
t = Tokenizer(s)
self.assertEqual(t.next_tok().text, '{')
self.assertEqual(t.next_tok().text, '{')
self.assertEqual(t.next_tok().text, 'abc_def')
self.assertEqual(t.next_tok().text, ' ')
self.assertEqual(t.next_tok().text, '7878')
self.assertEqual(t.next_tok().text, '.')
self.assertEqual(t.next_tok().text, '89')
self.assertEqual(t.next_tok().text, '\r\n')
self.assertEqual(t.next_tok().text, ' \t \t')
self.assertEqual(t.next_tok().text, '_abc123')
self.assertEqual(t.next_tok().text, ' ')
self.assertEqual(t.next_tok().text, '\n')
self.assertEqual(t.next_tok().text, '%')
self.assertEqual(t.next_tok().text, '}')
self.assertEqual(t.next_tok().text, ' ')
self.assertEqual(t.next_tok().text, '9')
self.assertEqual(t.next_tok().text, ' ')
self.assertEqual(t.next_tok().text, 'a')
self.assertEqual(t.next_tok().text, ' \t')
```
#### File: PT/test/test_ptutil.py
```python
import unittest
import sys
sys.path.insert(0, "../")
from ptutil import *
class test_ptutil(unittest.TestCase):
def test_file_ops1(self):
fp = './temp/1/2/3/4/5/6/7/8/test.txt'
self.assertFalse(file_exists(fp))
file_create(fp)
file_write_all(fp, 'hello\r\nworld\tA\rB\nC')
self.assertEqual(file_read(fp), 'hello\r\nworld\tA\rB\nC')
self.assertTrue(file_exists(fp))
file_delete(fp)
self.assertFalse(file_exists(fp))
path_delete('./temp')
def test_file_ops2(self):
fp = './test.txt'
self.assertFalse(file_exists(fp))
file_create(fp)
file_write_all(fp, 'hello\r\nworld\tA\rB\nC')
self.assertEqual(file_read(fp), 'hello\r\nworld\tA\rB\nC')
self.assertTrue(file_exists(fp))
file_delete(fp)
self.assertFalse(file_exists(fp))
def test_path_ops(self):
fp = './temp/1/2/3/4/5/6/7/8'
self.assertFalse(path_exists(fp))
path_create(fp)
self.assertTrue(path_exists(fp))
path_delete('./temp')
self.assertFalse(path_exists('./temp'))
def test_path_files(self):
fp = './temp'
path_delete(fp)
fs = ['./temp/a.txt',
'./temp/b.txt',
'./temp/c.txt',
'./temp/d.txt',
'./temp/1.py',
'./temp/2.py',
'./temp/3.py']
self.assertFalse(path_exists(fp))
path_create(fp)
self.assertTrue(path_exists(fp))
for f in fs:
file_create(f)
txt = path_files(fp,'*.txt')
py = path_files(fp, '*.py')
txt.sort()
py.sort()
self.assertEqual(len(txt), 4)
self.assertEqual(len(py), 3)
self.assertEqual(txt[0], './temp/a.txt')
self.assertEqual(txt[1], './temp/b.txt')
self.assertEqual(txt[2], './temp/c.txt')
self.assertEqual(txt[3], './temp/d.txt')
self.assertEqual(py[0], './temp/1.py')
self.assertEqual(py[1], './temp/2.py')
self.assertEqual(py[2], './temp/3.py')
def test_data_xml(self):
input = '''
<joe attr1="abc" attr2="3" attr4="true">
<node n="aaa" m="bbb">this is test</node>
<node>
i'm joe
</node>
</joe>
'''
m = data_xml(input)
self.assertEqual(m.attr1, 'abc')
self.assertEqual(m.attr2, '3')
self.assertEqual(m.attr4, 'true')
self.assertEqual(m.node[0].n, 'aaa')
self.assertEqual(m.node[0].m, 'bbb')
self.assertEqual(m.node[0].text, 'this is test')
self.assertEqual(m.node[1].text.strip(), 'i\'m joe')
def test_data_json(self):
input = '''
{
"num" : 1234,
"bool" : true,
"array": ["str", 123, true],
"dic" : {
"key1": "val1",
"key2": "val2"
}
}
'''
m = data_json(input)
self.assertEqual(m['num'], 1234)
self.assertEqual(m['bool'], True)
self.assertEqual(len(m['array']), 3)
self.assertEqual(m['array'][0], 'str')
self.assertEqual(m['array'][1], 123)
self.assertEqual(m['array'][2], True)
self.assertEqual(m['dic']['key1'], 'val1')
self.assertEqual(m['dic']['key2'], 'val2')
def test_data_ini(self):
input = '''
[sec1]
a = 123
b = ab
c = ['a','b']
d = {'aa':1,'bb':true,'cc':'haha'}
[sec2]
e = 34
f = ff
g = fsafdsa
'''
m = data_ini(input)
self.assertEqual(m['sec1']['a'], '123')
self.assertEqual(m['sec1']['b'], 'ab')
self.assertEqual(m['sec1']['c'], "['a','b']")
self.assertEqual(m['sec1']['d'], "{'aa':1,'bb':true,'cc':'haha'}")
self.assertEqual(m['sec2']['e'], '34')
self.assertEqual(m['sec2']['f'], 'ff')
self.assertEqual(m['sec2']['g'], 'fsafdsa')
if __name__ == '__main__':
pass
#unittest.main()
``` |
{
"source": "joeljgeo/landlab",
"score": 2
} |
#### File: flow_accum/tests/test_flow_accums.py
```python
import numpy as np
from numpy.testing import assert_array_equal
from landlab.components.flow_accum.flow_accum_to_n import (
find_drainage_area_and_discharge_to_n
)
from landlab.components.flow_accum import find_drainage_area_and_discharge
def test_boundary_to_n():
r = np.array(
[
[1, 2],
[4, 5],
[1, 5],
[6, 2],
[4, -1],
[4, -1],
[5, 7],
[4, 5],
[6, 7],
[7, 8],
]
)
p = np.array(
[
[0.6, 0.4],
[0.85, 0.15],
[0.65, 0.35],
[0.9, 0.1],
[1., 0.],
[1., 0.],
[0.75, 0.25],
[0.55, 0.45],
[0.8, 0.2],
[0.95, 0.05],
]
)
s = np.array([4, 5, 1, 7, 2, 6, 0, 8, 3, 9])
a, q = find_drainage_area_and_discharge_to_n(s, r, p, boundary_nodes=[0])
true_a = np.array([0., 1.715, 1.1, 1., 9., 4.9775, 2.74, 2.845, 1.05, 1.])
assert_array_equal(a, true_a)
def test_boundary_bw():
r = np.array([2, 5, 2, 7, 5, 5, 6, 5, 7, 8]) - 1
s = np.array([4, 1, 0, 2, 5, 6, 3, 8, 7, 9])
a, q = find_drainage_area_and_discharge(s, r, boundary_nodes=[0])
true_a = np.array([0., 2., 1., 1., 9., 4., 3., 2., 1., 1.])
assert_array_equal(a, true_a)
```
#### File: components/flow_director/flow_director_to_many.py
```python
from landlab import FieldError
from landlab.components.flow_director.flow_director import _FlowDirector
import numpy
from landlab import BAD_INDEX_VALUE
class _FlowDirectorToMany(_FlowDirector):
"""
Private class for creating components to calculate flow directions.
This class is not meant to be used directly in modeling efforts. It
inherits from the _FlowDirector class and builds on it to provide the
functionality that all flow direction calculators need if they direct flow
only to multiple nodes, as in D infinity or MFD direction finding. It
exists in contrast to the other intermediate flow director class
_FlowDirectorToOne which provides equivalent functionality for flow
direction algorithms such as D8 or steepest descent which directs flow only
to one other node. As the primary difference between these two methods is
the names of the fields they create and use, the primary function of this
class is to create model grid fields.
The primary method of this class, :func:`run_one_step` is not implemented.
Parameters
----------
grid : ModelGrid
A grid.
surface : field name at node or array of length node
The surface to direct flow across.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components.flow_director.flow_director_to_many import(
... _FlowDirectorToMany)
>>> mg = RasterModelGrid((3,3), spacing=(1, 1))
>>> mg.set_closed_boundaries_at_grid_edges(True, True, True, False)
>>> _ = mg.add_field('topographic__elevation',
... mg.node_x + mg.node_y,
... at = 'node')
>>> fd = _FlowDirectorToMany(mg, 'topographic__elevation')
>>> fd.surface_values
array([ 0., 1., 2., 1., 2., 3., 2., 3., 4.])
>>> sorted(list(mg.at_node.keys()))
['flow__sink_flag', 'topographic__elevation']
"""
_name = "FlowDirectorToMany"
_input_var_names = ("topographic__elevation",)
_output_var_names = ("flow__sink_flag",)
_var_units = {"topographic__elevation": "m", "flow__sink_flag": "-"}
_var_mapping = {"topographic__elevation": "node", "flow__sink_flag": "node"}
_var_doc = {
"topographic__elevation": "Land surface topographic elevation",
"flow__sink_flag": "Boolean array, True at local lows",
}
def __init__(self, grid, surface):
"""Initialize the _FlowDirectorTo_One class."""
# run init for the inherited class
super(_FlowDirectorToMany, self).__init__(grid, surface)
self.to_n_receivers = "many"
# initialize new fields
def run_one_step(self):
"""run_one_step is not implemented for this component."""
raise NotImplementedError("run_one_step()")
@property
def proportions_of_flow(self):
"""Return the proportions of flow going to recievers."""
return self._grid["node"]["flow__receiver_proportions"]
if __name__ == "__main__": # pragma: no cover
import doctest
doctest.testmod()
```
#### File: components/lithology/litholayers.py
```python
import numpy as np
from landlab.components.lithology.lithology import Lithology
class LithoLayers(Lithology):
"""Create LithoLayers component.
A LithoLayers is a three dimentional representation of material operated on
by landlab components. Material can be removed through erosion or added to
through deposition. Rock types can have multiple attributes (e.g. age,
erodability or other parameter values, etc).
If the tracked properties are model grid fields, they will be updated to
the surface values of the Lithology. If the properties are not grid fields
then at-node grid fields will be created with their names.
It is constructed by specifying a series of depths below the surface, an
anchor point, a series of rock type ids, and the functional form of a
surface. Depths and IDs are both specified in order of closest
to the surface to furthest from the surface.
Additionally, an attribute dictionary specifies the properties of each
rock type. This dictionary is expected to have the form of:
.. code-block:: python
attrs = {'K_sp': {1: 0.001,
2: 0.0001},
'D': {1: 0.01,
2: 0.001}}
Where ``'K_sp'`` and ``'D'`` are properties to track, and ``1`` and ``2``
are rock type IDs. The rock type IDs can be any type that is valid as a
python dictionary key.
"""
_name = 'LithoLayers'
_cite_as = """@article{barnhart2018lithology,
title = "Lithology: A Landlab submodule for spatially variable rock properties",
journal = "Journal of Open Source Software",
volume = "",
pages = "",
year = "2018",
doi = "10.21105/joss.00979",
author = "<NAME> and <NAME> and <NAME> and <NAME>",
}"""
def __init__(self, grid, z0s, ids, attrs, x0=0, y0=0,
function=lambda x, y: 0*x + 0*y,
layer_type='EventLayers'):
"""Create a new instance of a LithoLayers.
Parameters
----------
grid : Landlab ModelGrid
z0s : ndarray of shape `(n_layers, )`
Values of layer depth from surface at horizontal location (x0, y0).
ids : ndarray of shape `(n_layers, )`
Values of rock type IDs corresponding to each layer specified in
**z0s**.
attrs : dict
Rock type property dictionary. See class docstring for example of
required format.
x0 : float, optional
x value of anchor point for all layers.
y0 : float, optional
y value of anchor point for all layers.
function : function, optional
Functional form of layers as a function of two variables, x and y.
Default value is lambda x, y: 0*x + 0*y for flatlying layers.
layer_type : str, optional
Type of Landlab layers object used to store the layers. If
MaterialLayers (default) is specified, then erosion removes material
and does not create a layer of thickness zero. If EventLayers is
used, then erosion removes material and creates layers of thickness
zero. Thus, EventLayers may be appropriate if the user is interested
in chronostratigraphy.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components import LithoLayers
>>> mg = RasterModelGrid(3, 3)
>>> z = mg.add_zeros('node', 'topographic__elevation')
Create a LithoLayers with flatlying layers that altrnate between
layers of type 1 and type 2 rock.
>>> z0s = [-4, -3, -2, -1, 0, 1, 2, 3, 4]
>>> ids = [1, 2, 1, 2, 1, 2, 1, 2, 1]
>>> attrs = {'K_sp': {1: 0.001,
... 2: 0.0001}}
>>> lith = LithoLayers(mg, z0s, ids, attrs)
>>> lith.dz
array([[ 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
Now create a set of layers that dip. Our anchor point will be the
default value of (x0, y0) = (0, 0)
>>> lith = LithoLayers(mg, z0s, ids, attrs, function=lambda x, y: x+y)
>>> lith.dz
array([[ 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 0., 1., 1., 1., 1., 1., 1., 1., 1.],
[ 0., 0., 1., 0., 1., 1., 1., 1., 1.],
[ 0., 0., 0., 0., 0., 1., 0., 1., 1.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 1.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
We can get the surface values, and as we'd expect, they alternate as
the dipping layers are exposed at the surface.
>>> lith['K_sp']
array([ 0.0001, 0.001 , 0.0001, 0.001 , 0.0001, 0.001 , 0.0001,
0.001 , 0.0001])
"""
self._grid = grid
function_args = function.__code__.co_varnames
if len(function_args) != 2:
msg = 'LithoLayers: function must take exactly two arguments, x and y.'
raise ValueError(msg)
if np.asarray(z0s).size != np.asarray(ids).size:
msg = 'LithoLayers: Size of layer depths and layer IDs must be the same'
raise ValueError(msg)
if np.any(np.diff(z0s) < 0):
msg = 'LithoLayers: Bad layer depth order passed.'
raise ValueError(msg)
z_surf = function(self._grid.x_of_node - x0, self._grid.y_of_node - y0)
if hasattr(z_surf, "shape"):
if z_surf.shape != self._grid.x_of_node.shape:
msg = "LithoLayers: function must return an array of shape (n_nodes,)"
raise ValueError(msg)
else:
msg = "LithoLayers: function must return an array of shape (n_nodes,)"
raise ValueError(msg)
layer_thicknesses = []
layer_ids = []
num_layers = np.asarray(z0s).size
last_layer_elev = np.zeros(self._grid.number_of_nodes)
# create layers (here listed from the top to the bottom.)
for i in range(num_layers):
layer_depth = z_surf + z0s[i]
layer_depth[layer_depth<0] = 0
layer_thickness = layer_depth.copy() - last_layer_elev.copy()
last_layer_elev = layer_depth.copy()
layer_thicknesses.append(layer_thickness)
layer_ids.append(ids[i] * np.ones(z_surf.size))
super(LithoLayers, self).__init__(grid, layer_thicknesses, layer_ids, attrs, layer_type=layer_type)
```
#### File: sink_fill/tests/conftest.py
```python
import pytest
import numpy as np
from landlab import RasterModelGrid
from landlab import BAD_INDEX_VALUE as XX
from landlab.components.sink_fill import SinkFiller
@pytest.fixture
def sink_grid1():
"""Create a 7x7 test grid with a well defined hole in it."""
sink_grid = RasterModelGrid((7, 7), spacing=1.)
z = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.0],
[0.0, 2.0, 1.6, 1.5, 1.6, 2.0, 0.0],
[0.0, 2.0, 1.7, 1.6, 1.7, 2.0, 0.0],
[0.0, 2.0, 1.8, 2.0, 2.0, 2.0, 0.0],
[0.0, 1.0, 0.6, 1.0, 1.0, 1.0, 0.0],
[0.0, 0.0, -0.5, 0.0, 0.0, 0.0, 0.0],
]
).flatten()
sink_grid.add_field("topographic__elevation", z, at="node", units="-")
sink_grid.outlet = 30
sink_grid.lake_code = 17
sink_grid.lake = np.array([16, 17, 18, 23, 24, 25])
return sink_grid
@pytest.fixture
def sink_grid2():
"""
Create a 10x10 test grid with a well defined hole in it, from a flat
surface.
"""
sink_grid = RasterModelGrid((10, 10), spacing=1.)
lake = np.array([44, 45, 46, 54, 55, 56, 64, 65, 66])
z = np.ones(100, dtype=float)
z[lake] = 0.
sink_grid.add_field("topographic__elevation", z, at="node", units="-")
sink_grid.lake = lake
return sink_grid
@pytest.fixture
def sink_grid3():
"""
Create a 10x10 test grid with two well defined holes in it, into an
inclined surface.
"""
sink_grid = RasterModelGrid((10, 10), spacing=1.)
lake1 = np.array([34, 35, 36, 44, 45, 46, 54, 55, 56])
lake2 = np.array([77, 78, 87, 88])
guard_nodes = np.array([23, 33, 53, 63])
lake = np.concatenate((lake1, lake2))
z = np.ones(100, dtype=float)
# add slope
z += sink_grid.node_x
z[guard_nodes] += 0.001
z[lake] = 0.
sink_grid.add_field("node", "topographic__elevation", z, units="-")
sink_grid.lake1 = lake1
sink_grid.lake2 = lake2
return sink_grid
@pytest.fixture
def sink_grid4():
"""
Create a 10x10 test grid with two well defined holes in it, into an
inclined surface. This time, one of the holes is a stupid shape, which
will require the component to arrange flow back "uphill".
"""
sink_grid = RasterModelGrid((10, 10), spacing=1.)
lake1 = np.array([34, 35, 36, 44, 45, 46, 54, 55, 56, 65, 74])
lake2 = np.array([78, 87, 88])
guard_nodes = np.array([23, 33, 53, 63, 73, 83])
lake = np.concatenate((lake1, lake2))
# outlet = 35 # shouldn't be needed
# outlet_array = np.array([outlet])
z = np.ones(100, dtype=float)
# add slope
z += sink_grid.node_x
z[guard_nodes] += 0.001 # forces the flow out of a particular node
z[lake] = 0.
# depr_outlet_target = np.empty(100, dtype=float)
# depr_outlet_target.fill(XX)
# depr_outlet_target = XX # not well defined in this simplest case...?
sink_grid.add_field("node", "topographic__elevation", z, units="-")
sink_grid.lake1 = lake1
sink_grid.lake2 = lake2
return sink_grid
@pytest.fixture
def sink_grid5():
"""
Create a 10x10 test grid with two well defined holes in it, into an
inclined surface. This time, one of the holes is a stupid shape, which
will require the component to arrange flow back "uphill". Exactly as
V4, but this version tests D4 routing.
Notes
-----
Here is the elevation grid::
1. 2. 3. 4. 5. 6. 7. 8. 9. 10.
1. 2. 3. 4. 5. 6. 7. 8. 9. 10.
1. 2. 3. 4.001 5. 6. 7. 8. 9. 10.
1. 2. 3. 4.001 0. 0. 0. 8. 9. 10.
1. 2. 3. 4. 5. 6. 7. 8. 9. 10.
1. 2. 3. 4.001 0. 0. 0. 8. 9. 10.
1. 2. 3. 4.001 5. 0. 7. 8. 9. 10.
1. 2. 3. 4.001 0. 6. 7. 8. 0. 10.
1. 2. 3. 4.001 5. 6. 7. 0. 0. 10.
1. 2. 3. 4. 5. 6. 7. 8. 9. 10.
"""
sink_grid = RasterModelGrid((10, 10), spacing=1.)
lake1 = np.array([34, 35, 36, 44, 45, 46, 54, 55, 56, 65, 74])
lake2 = np.array([78, 87, 88])
guard_nodes = np.array([23, 33, 53, 63, 73, 83])
lake = np.concatenate((lake1, lake2))
# outlet = 35 # shouldn't be needed
# outlet_array = np.array([outlet])
z = np.ones(100, dtype=float)
# add slope
z += sink_grid.node_x
z[guard_nodes] += 0.001 # forces the flow out of a particular node
z[lake] = 0.
# depr_outlet_target = np.empty(100, dtype=float)
# depr_outlet_target.fill(XX)
# depr_outlet_target = XX # not well defined in this simplest case...?
sink_grid.add_field("node", "topographic__elevation", z, units="-")
sink_grid.lake1 = lake1
sink_grid.lake2 = lake2
return sink_grid
```
#### File: stream_power/tests/test_sp_storms.py
```python
import os
import pylab
import time
import copy
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from landlab import RasterModelGrid
from landlab import ModelParameterDictionary
from landlab.components import (FlowAccumulator,
StreamPowerEroder,
PrecipitationDistribution)
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
def test_storms():
input_file_string = os.path.join(_THIS_DIR, 'drive_sp_params_storms.txt')
inputs = ModelParameterDictionary(input_file_string, auto_type=True)
nrows = inputs.read_int('nrows')
ncols = inputs.read_int('ncols')
dx = inputs.read_float('dx')
dt = inputs.read_float('dt')
time_to_run = inputs.read_float('run_time')
uplift = inputs.read_float('uplift_rate')
mean_duration = inputs.read_float('mean_storm')
mean_interstorm = inputs.read_float('mean_interstorm')
mean_depth = inputs.read_float('mean_depth')
storm_run_time = inputs.read_float('storm_run_time')
delta_t = inputs.read_float('delta_t')
mg = RasterModelGrid(nrows, ncols, dx)
mg.add_zeros('topographic__elevation', at='node')
z = mg.zeros(at='node')
mg['node']['topographic__elevation'] = z + np.random.rand(len(z)) / 1000.
mg.add_zeros('water__unit_flux_in', at='node')
precip = PrecipitationDistribution(mean_storm_duration = mean_duration,
mean_interstorm_duration = mean_interstorm,
mean_storm_depth = mean_depth,
total_t = storm_run_time, delta_t = delta_t)
fr = FlowAccumulator(mg, flow_director='D8')
sp = StreamPowerEroder(mg, **inputs)
for (interval_duration, rainfall_rate) in \
precip.yield_storm_interstorm_duration_intensity():
if rainfall_rate != 0.:
mg.at_node['water__unit_flux_in'].fill(rainfall_rate)
fr.run_one_step()
sp.run_one_step(dt)
mg.at_node['topographic__elevation'][
mg.core_nodes] += uplift * interval_duration
```
#### File: data_record/tests/test_data_record_item.py
```python
import pytest
import numpy as np
from landlab import RasterModelGrid
grid = RasterModelGrid((3,3))
shape = (3,3)
my_items2 = {'grid_element': np.array(('node', 'link'), dtype=str),
'element_id': np.array([1, 3])}
def test_dr_item_name(dr_item):
assert dr_item._name == 'DataRecord'
def test_grid_shape(dr_item):
assert dr_item._grid.number_of_node_rows == shape[0]
assert dr_item._grid.number_of_node_columns == shape[1]
def test_permitted_locations(dr_item):
assert dr_item.permitted_locations == grid.groups
def test_coordinates(dr_item):
assert len(dr_item.dims) == 1
assert list(dr_item.item_id.values) == [0, 1]
assert list(dr_item.item_coordinates) == [0, 1]
assert dr_item.number_of_items == len(my_items2['element_id'])
with pytest.raises(AttributeError):
dr_item.time
with pytest.raises(AttributeError):
dr_item.time_coordinates
with pytest.raises(AttributeError):
dr_item.number_of_timesteps
with pytest.raises(AttributeError):
dr_item.earliest_time
with pytest.raises(AttributeError):
dr_item.latest_time
with pytest.raises(AttributeError):
dr_item.prior_time
def test_variable_names(dr_item):
assert sorted(dr_item.variable_names) == sorted(
['grid_element', 'element_id'])
def test_add_item(dr_item):
dr_item.add_item(new_item={'grid_element' : np.array(
['node', 'node']),
'element_id' : np.array([4,4])},
new_item_spec={'size': (['item_id'], [10,5])})
assert (dr_item['grid_element'].values[3],
dr_item['element_id'].values[3],
dr_item['size'].values[3]) == ('node', 4.0, 5.0)
def test_get_data(dr_item):
assert dr_item.get_data(item_id=[1],
data_variable='grid_element') == 'link'
assert dr_item.get_data(data_variable='element_id')[1] == 3
def test_set_data(dr_item):
dr_item.set_data(item_id=[1],
data_variable='element_id',
new_value=2)
assert dr_item['element_id'].values[1] == 2
```
#### File: data_record/tests/test_data_record_time.py
```python
import pytest
import numpy as np
from landlab import RasterModelGrid
grid = RasterModelGrid((3,3))
shape = (3,3)
time=[0.]
data_vars={'mean_elevation' : (['time'], np.array([100]))}
attrs={'time_units' : 'y'}
def test_dr_time_name(dr_time):
assert dr_time._name == 'DataRecord'
def test_grid_shape(dr_time):
assert dr_time._grid.number_of_node_rows == shape[0]
assert dr_time._grid.number_of_node_columns == shape[1]
def test_permitted_locations(dr_time):
assert dr_time.permitted_locations == grid.groups
def test_coordinates(dr_time):
assert len(dr_time.dims) == 1
assert list(dr_time.time.values) == list(np.array(time))
assert list(dr_time.time_coordinates) == list(np.array(time))
# properties:
assert dr_time.number_of_timesteps == 1
assert dr_time.earliest_time == 0.
assert dr_time.latest_time == 0.
assert np.isnan(dr_time.prior_time)
# no item_id coord:
with pytest.raises(AttributeError):
dr_time.item_id
with pytest.raises(AttributeError):
dr_time.item_coordinates
with pytest.raises(AttributeError):
dr_time.number_of_items
def test_variable_names(dr_time):
assert dr_time.variable_names == ['mean_elevation']
def test_add_record(dr_time):
dr_time.add_record(time = [50.],
new_record={
'mean_elevation' : (['time'], np.array([120]))})
dr_time.add_record(time = [100.],
new_record={'new_variable' : (['time'], ['new_data'])})
assert np.isnan(dr_time['mean_elevation'].values[2])
def test_get_data(dr_time):
assert dr_time.get_data(time=[0.],
data_variable='mean_elevation') == 100.
assert dr_time.get_data(data_variable='mean_elevation') == [100]
def test_set_data(dr_time):
dr_time.set_data(time=[0.],
data_variable='mean_elevation',
new_value=105.)
assert dr_time['mean_elevation'].values[0] == 105.
```
#### File: plot/tests/test_channel_profile.py
```python
import pytest
from landlab import RasterModelGrid
from landlab.components import FlowAccumulator
from landlab.plot import analyze_channel_network_and_plot
from landlab.plot.channel_profile import channel_nodes
def test_route_to_multiple_error_raised():
mg = RasterModelGrid((10, 10))
z = mg.add_zeros('node', 'topographic__elevation')
z += mg.x_of_node + mg.y_of_node
fa = FlowAccumulator(mg, flow_director='MFD')
fa.run_one_step()
with pytest.raises(NotImplementedError):
analyze_channel_network_and_plot(mg)
with pytest.raises(NotImplementedError):
channel_nodes(mg,
mg.at_node['topographic__steepest_slope'],
mg.at_node['drainage_area'],
mg.at_node['flow__receiver_node'],
number_of_channels=1,
threshold=1.0)
```
#### File: utils/tests/test_flow__distance.py
```python
import pytest
import numpy as np
import math
from numpy.testing import assert_array_equal, assert_almost_equal
from landlab import RasterModelGrid, FieldError, HexModelGrid
from landlab.components import FlowAccumulator, FlowDirectorSteepest
from landlab.utils.flow__distance import calculate_flow__distance
def test_no_flow_recievers():
"""Test that correct error is raised when no flow recievers are
on the grid."""
# instantiate a model grid, do not run flow accumulation on it
mg = RasterModelGrid((30, 70))
# test that the flow distance utility will fail because of a ValueError
with pytest.raises(FieldError):
calculate_flow__distance(mg)
def test_no_upstream_array():
"""Test that correct error is raised when no flow__upstream_node_order."""
# instantiate a model grid, do not run flow accumulation on it
mg = RasterModelGrid(30, 70)
#Add a field called topographic__elevation to mg
z = mg.add_ones('node','topographic__elevation')
#Run the FlowDirectorSteepest component
fd = FlowDirectorSteepest(mg)
fd.run_one_step()
# test that the flow distance utility will fail because of a ValueError
with pytest.raises(FieldError):
calculate_flow__distance(mg)
def test_flow__distance_regular_grid_d8():
"""Test to demonstrate that flow__distance utility works as expected with
regular grids"""
# instantiate a model grid
mg = RasterModelGrid((5, 4), spacing=(1, 1))
# instantiate an elevation array
z = np.array([[0, 0, 0, 0], [0, 21, 10, 0], [0, 31, 20, 0], [0, 32, 30, 0],
[0, 0, 0, 0]], dtype='float64')
# add the elevation field to the grid
mg.add_field('node', 'topographic__elevation', z)
# instantiate the expected flow__distance array
# considering flow directions calculated with D8 algorithm
flow__distance_expected = np.array([[0, 0, 0, 0], [0, 1, 0, 0],
[0, math.sqrt(2), 1, 0],
[0, 1+math.sqrt(2), 2, 0], [0, 0, 0, 0]],
dtype='float64')
flow__distance_expected = np.reshape(flow__distance_expected,
mg.number_of_node_rows *
mg.number_of_node_columns)
#setting boundary conditions
mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True)
# calculating flow directions with FlowAccumulator component
fr = FlowAccumulator(mg, flow_director='D8')
fr.run_one_step()
# calculating flow distance map
flow__distance = calculate_flow__distance(mg, add_to_grid=True,
noclobber=False)
flow__distance = np.reshape(flow__distance, mg.number_of_node_rows *
mg.number_of_node_columns)
# modifying the flow distance map because boundary and outlet nodes should
# not have flow__distance value different from 0
flow__distance[mg.boundary_nodes] = 0
outlet_id = 6
flow__distance[outlet_id] = 0
# test that the flow distance utility works as expected
assert_array_equal(flow__distance_expected, flow__distance)
def test_flow__distance_regular_grid_d4():
"""Test to demonstrate that flow__distance utility works as expected with
regular grids"""
# instantiate a model grid
mg = RasterModelGrid((5, 4), spacing=(1, 1))
# instantiate an elevation array
z = np.array([[0, 0, 0, 0], [0, 21, 10, 0], [0, 31, 20, 0], [0, 32, 30, 0],
[0, 0, 0, 0]], dtype='float64')
# add the elevation field to the grid
mg.add_field('node', 'topographic__elevation', z)
# instantiate the expected flow__distance array
# considering flow directions calculated with D4 algorithm
flow__distance_expected = np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 2, 1, 0],
[0, 3, 2, 0], [0, 0, 0, 0]],
dtype='float64')
flow__distance_expected = np.reshape(flow__distance_expected,
mg.number_of_node_rows *
mg.number_of_node_columns)
#setting boundary conditions
mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True)
# calculating flow directions with FlowAccumulator component
fr = FlowAccumulator(mg, flow_director='D4')
fr.run_one_step()
# calculating flow distance map
flow__distance = calculate_flow__distance(mg, add_to_grid=True,
noclobber=False)
flow__distance = np.reshape(flow__distance,mg.number_of_node_rows *
mg.number_of_node_columns)
# modifying the flow distance map because boundary and outlet nodes
# should not have flow__distance value different from 0
flow__distance[mg.boundary_nodes] = 0
outlet_id = 6
flow__distance[outlet_id] = 0
# test that the flow__distance utility works as expected
assert_array_equal(flow__distance_expected, flow__distance)
def test_flow__distance_irregular_grid_d4():
"""Test to demonstrate that flow__distance utility works as expected with irregular grids"""
# instantiate a model grid
dx = 1.0
hmg = HexModelGrid(5, 3, dx)
# instantiate and add the elevation field
hmg.add_field('topographic__elevation', hmg.node_x + np.round(hmg.node_y),
at='node')
# instantiate the expected flow__distance array
flow__distance_expected = np.array([0., 0., 0.,
0., 0., dx, 0.,
0., dx, dx, 2.*dx, 0.,
0., 2.*dx, 2.*dx, 0.,
0., 0., 0.])
#setting boundary conditions
hmg.set_closed_nodes(hmg.boundary_nodes)
# calculating flow directions with FlowAccumulator component: D4 algorithm
fr = FlowAccumulator(hmg, flow_director = 'D4')
fr.run_one_step()
# calculating flow distance map
flow__distance = calculate_flow__distance(hmg, add_to_grid=True,
noclobber=False)
# test that the flow__distance utility works as expected
assert_almost_equal(flow__distance_expected, flow__distance, decimal=10)
def test_flow__distance_raster_MFD_diagonals_true():
"""Test of flow__distance utility with a raster grid and MFD."""
# instantiate a model grid
mg = RasterModelGrid((5, 4), spacing=(1, 1))
# instantiate an elevation array
z = np.array([[0 ,0, 0, 0], [0, 21, 10, 0], [0, 31, 20, 0], [0, 32, 30, 0],
[0, 0, 0, 0]], dtype='float64')
# add the elevation field to the grid
mg.add_field('node', 'topographic__elevation', z)
# instantiate the expected flow__distance array
# considering flow directions calculated with MFD algorithm
flow__distance_expected = np.array([[0, 0, 0, 0], [0, 1, 0, 0],
[0, math.sqrt(2), 1, 0],
[0, 1+math.sqrt(2), 2, 0], [0, 0, 0, 0]],
dtype='float64')
flow__distance_expected = np.reshape(flow__distance_expected,
mg.number_of_node_rows *
mg.number_of_node_columns)
#setting boundary conditions
mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True)
# calculating flow directions with FlowAccumulator component
fa = FlowAccumulator(mg, 'topographic__elevation', flow_director='MFD',
diagonals=True)
fa.run_one_step()
# calculating flow distance map
flow__distance = calculate_flow__distance(mg, add_to_grid=True,
noclobber=False)
# test that the flow__distance utility works as expected
assert_array_equal(flow__distance_expected, flow__distance)
def test_flow__distance_raster_MFD_diagonals_false():
"""Test of flow__distance utility with a raster grid and MFD."""
# instantiate a model grid
mg = RasterModelGrid((5, 4), spacing=(1, 1))
# instantiate an elevation array
z = np.array([[0, 0, 0, 0], [0, 21, 10, 0], [0, 31, 20, 0], [0, 32, 30, 0],
[0, 0, 0, 0]], dtype='float64')
# add the elevation field to the grid
mg.add_field('node', 'topographic__elevation', z)
# instantiate the expected flow__distance array
# considering flow directions calculated with MFD algorithm
flow__distance_expected = np.array([[0, 0, 0, 0], [0, 1, 0, 0], [0, 2, 1, 0],
[0, 3, 2, 0], [0, 0, 0, 0]],
dtype='float64')
flow__distance_expected = np.reshape(flow__distance_expected,
mg.number_of_node_rows *
mg.number_of_node_columns)
#setting boundary conditions
mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True)
# calculating flow directions with FlowAccumulator component
fa = FlowAccumulator(mg, 'topographic__elevation', flow_director='MFD',
diagonals=False)
fa.run_one_step()
# calculating flow distance map
flow__distance = calculate_flow__distance(mg, add_to_grid=True,
noclobber=False)
# test that the flow__distance utility works as expected
assert_array_equal(flow__distance_expected, flow__distance)
def test_flow__distance_raster_D_infinity():
"""Test of flow__distance utility with a raster grid and D infinity."""
mg = RasterModelGrid((5, 4), spacing=(1, 1))
# instantiate an elevation array
z = mg.x_of_node + 3.0 * mg.y_of_node
# add the elevation field to the grid
mg.add_field('node', 'topographic__elevation', z)
# instantiate the expected flow_length array
flow__distance_expected = np.array([[0, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0+math.sqrt(2.), 0],
[0, 2, 1+math.sqrt(2.), 0],
[0, 0, 0, 0]], dtype='float64')
#setting boundary conditions
mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
left_is_closed=True,
right_is_closed=True,
top_is_closed=True)
# calculating flow directions with FlowAccumulator component
fa = FlowAccumulator(mg, 'topographic__elevation', flow_director='DINF')
fa.run_one_step()
# calculating flow distance map
flow__distance = calculate_flow__distance(mg,
add_to_grid=True,
noclobber=False).reshape(mg.shape)
# test that the flow__distance utility works as expected
assert_array_equal(flow__distance_expected, flow__distance)
```
#### File: utils/tests/test_watershed.py
```python
import pytest
import numpy as np
from landlab import RasterModelGrid
from landlab.components import FlowAccumulator
from landlab.utils import (
get_watershed_nodes,
get_watershed_outlet,
get_watershed_masks_with_area_threshold,
get_watershed_mask,
)
def test_get_watershed_nodes():
grid = RasterModelGrid((7, 7), 1)
z = np.array(
[
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
26.,
0.,
30.,
32.,
34.,
-9999.,
-9999.,
28.,
1.,
25.,
28.,
32.,
-9999.,
-9999.,
30.,
3.,
3.,
11.,
34.,
-9999.,
-9999.,
32.,
11.,
25.,
18.,
38.,
-9999.,
-9999.,
34.,
32.,
34.,
36.,
40.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
]
)
grid.at_node["topographic__elevation"] = z
outlet_id = 2
grid.set_watershed_boundary_condition_outlet_id(outlet_id, z, nodata_value=-9999.)
fr = FlowAccumulator(grid, flow_director="D8")
fr.run_one_step()
ws_nodes = get_watershed_nodes(grid, outlet_id)
# Given the watershed boundary conditions, the number of watershed nodes
# should be equal to the number of core nodes plus 1 for the outlet node.
np.testing.assert_equal(len(ws_nodes), grid.number_of_core_nodes + 1)
def test_get_watershed_masks_with_area_threshold():
rmg = RasterModelGrid((7, 7), 200)
z = np.array(
[
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
26.,
0.,
26.,
30.,
34.,
-9999.,
-9999.,
28.,
1.,
28.,
5.,
32.,
-9999.,
-9999.,
30.,
3.,
30.,
10.,
34.,
-9999.,
-9999.,
32.,
11.,
32.,
15.,
38.,
-9999.,
-9999.,
34.,
32.,
34.,
36.,
40.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
]
)
rmg.at_node["topographic__elevation"] = z
rmg.set_closed_boundaries_at_grid_edges(True, True, True, False)
# Route flow.
fr = FlowAccumulator(rmg, flow_director="D8")
fr.run_one_step()
# Get the masks of watersheds greater than or equal to 80,000
# square-meters.
critical_area = 80000
mask = get_watershed_masks_with_area_threshold(rmg, critical_area)
# Assert that mask null nodes have a drainage area below critical area.
null_nodes = np.where(mask == -1)[0]
A = rmg.at_node["drainage_area"][null_nodes]
below_critical_area_nodes = A < critical_area
trues = np.ones(len(A), dtype=bool)
np.testing.assert_equal(below_critical_area_nodes, trues)
def test_get_watershed_outlet():
grid = RasterModelGrid((7, 7), 1)
z = np.array(
[
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
26.,
0.,
30.,
32.,
34.,
-9999.,
-9999.,
28.,
1.,
25.,
28.,
32.,
-9999.,
-9999.,
30.,
3.,
3.,
11.,
34.,
-9999.,
-9999.,
32.,
11.,
25.,
18.,
38.,
-9999.,
-9999.,
34.,
32.,
34.,
36.,
40.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
-9999.,
]
)
grid.at_node["topographic__elevation"] = z
imposed_outlet = 2
grid.set_watershed_boundary_condition_outlet_id(
imposed_outlet, z, nodata_value=-9999.
)
fr = FlowAccumulator(grid, flow_director="D8")
fr.run_one_step()
test_node = 32
determined_outlet = get_watershed_outlet(grid, test_node)
np.testing.assert_equal(determined_outlet, imposed_outlet)
# Create a pit.
pit_node = 38
grid.at_node["topographic__elevation"][pit_node] -= 32
fr.run_one_step()
pit_outlet = get_watershed_outlet(grid, test_node)
np.testing.assert_equal(pit_outlet, pit_node)
def test_route_to_multiple_error_raised_watershed_outlet():
mg = RasterModelGrid((10, 10))
z = mg.add_zeros("node", "topographic__elevation")
z += mg.x_of_node + mg.y_of_node
fa = FlowAccumulator(mg, flow_director="MFD")
fa.run_one_step()
with pytest.raises(NotImplementedError):
get_watershed_outlet(mg, 10)
def test_route_to_multiple_error_raised_watershed_mask():
mg = RasterModelGrid((10, 10))
z = mg.add_zeros("node", "topographic__elevation")
z += mg.x_of_node + mg.y_of_node
fa = FlowAccumulator(mg, flow_director="MFD")
fa.run_one_step()
with pytest.raises(NotImplementedError):
get_watershed_mask(mg, 10)
``` |
{
"source": "joeljiezhu/anagram-example",
"score": 4
} |
#### File: anagram-example/python/anagram.py
```python
from sys import argv
from mylib.main import WORDS_DIR, jsonData, getWords, getAnagram
def anagram(str):
"""
The main method to get the anagram
"""
l = len(str)
max = jsonData['MAX_CHAR']
min = jsonData['MIN_CHAR']
if l > max or l < min:
print(f"Error: please provide a word between {min} and {max}")
else:
words = getWords(WORDS_DIR, l)
return getAnagram(str, words)
def main(word):
"""
wrapper method to hold everything together
"""
result = anagram(word)
# strange error: TypeError: 'bool' object is not subscriptable
# if we do this: result[0] != False
foundWord = result[0]
if foundWord:
print(f"We found an angram for {word} > {foundWord} after {result[1]}({result[2]}) try")
else:
print(f"Sorry could not find anything, after {result[1]}({result[2]}) try")
if __name__ == '__main__':
script, word = argv
main(word)
```
#### File: python/mylib/main.py
```python
import sys
import json
import time
import random
import math
from functools import reduce
from pathlib import Path
from mylib.algo import getTotalCombinationNum
from mylib.word import scrambleWords
# prepare the configuration data
p = Path(__file__)
# @TODO this is getting really silly need to pass this via a param
WORDS_DIR = p.parent.parent.parent.joinpath('share')
configFile = WORDS_DIR.joinpath('config.json')
jsonObj = open(str(configFile))
jsonData = json.load(jsonObj)
# Get the number of maximum recursion calls
# we only use 90% of what its allow on the safe side
# in fact the number is not accurate, system report 1000 but the
# recursion stop at 997
recursionLimit = sys.getrecursionlimit() * 0.9
# decorator import (note we need to explictly name the import)
# from mydeco import timer_decorator
# Functions
def getWords(dir, name):
"""
import the words file and turn into usable format (array)
"""
filePath = WORDS_DIR.joinpath(jsonData['DOT'].join([str(name), jsonData['FILE_EXT']]))
fileObj = open(filePath)
fileContent = fileObj.read()
return fileContent.strip().split(' ')
def getPossibleWord(str, triedWords, combTotal, recursionLimit, totalTry = 0):
"""
We need to get around that maxium recursionError
"""
result = getPossibleWordInner(str, triedWords, recursionLimit)
totalTry += result[1] # this is the i
word = result[0]
if (word == False): # which means it ended at the recursionLimit
return getPossibleWordInner(str, triedWords)
else:
return (word, totalTry)
def getPossibleWordInner(str, triedWords, maxAllowTry, i = 0):
"""
get a possible word that we haven't tried before
@BUG if this recursion run over 1000 (997 in fact) times,
it will throw RecursionError
because it reaches the system maximum
"""
# if this interaction has reach the max allow number
# we just terminate it and try in the next loop
if (i >= maxAllowTry):
return (False, i)
i += 1
# continue with guessing a word
possibleWord = scrambleWords(str)
# if its already tried then run itself again
if (possibleWord in triedWords):
return getPossibleWord(str, triedWords, maxAllowTry, i)
return (possibleWord, i)
# need to decorator the function when we declare the function
# something fucks up, it crash the function when using decorator
# Error: TypeError: 'NoneType' object is not callable
# @timer_decorator
def getAnagram(str, words):
"""
find the anagram from the input str
V.2 with small changes to get around the recursion limit
"""
# first we check if this word is in the dict first
# otherwise there is no point of running the follow code
exist = str in words
if (not exist):
return (False, 0, 0) # the problem is here
# filter out the provided word
dict = [w for w in words if w != str]
totalCombinationNum = getTotalCombinationNum(len(str))
guessTotal = 0
tried = 0
possibleWords = []
# V.2 we move the while loop into the getPossibleWord
# because we need to check the maximum allow loop in each call
while tried <= totalCombinationNum:
# print(f"Tried number: {tried}")
# V.2 we add maxTry parameter
result = getPossibleWord(str, possibleWords, totalCombinationNum, recursionLimit)
word = result[0]
guessTotal += result[1]
if (word in dict):
return (word, tried, guessTotal)
# if the word is False that just terminate because it reaches the maxTry
# we don't count that as one try
if (word):
possibleWords.append(word)
tried += 1
return (False, tried, guessTotal) # couldn't find anything that should be impossible
def getCharSeq(word):
"""
input the possible world and sort the character by A-Z
"""
seq = [ch for ch in word]
seq.sort()
return ''.join(seq)
def getMinuteSecond(seconds):
minute = math.floor(seconds/60)
secondsLeft = seconds - minute*60
# getting too long and ugly so break it down
msg = f"{minute} minute{'s' if minute > 1 else ''}"
if (secondsLeft > 0):
msg += f" {secondsLeft} second{'s' if secondsLeft > 1 else ''}"
return msg
def countDownMsg(seconds, msg=""):
for c in range(seconds, 0, -1):
print(f"{msg}run again in {getMinuteSecond(c)}", end="\r")
time.sleep(1)
def getDuration(s):
"""
return how many days / hours / minutes / seconds
"""
days = 0
hrs = 0
mins = math.floor(s/60)
secs = s - mins * 60
if (mins > 60): # over an hour
hrs = math.floor(mins/60)
mins = mins - hrs * 60
if (hrs > 24): # over a day
days = math.floor(hrs/24)
hrs = hrs - days * 24
return (days, hrs, mins, secs)
def getFormatDuration(s):
days, hrs, mins, secs = getDuration(s)
msg = []
if (days > 0):
msg.append(f"{days} day{'s' if days > 1 else ''}")
if (hrs > 0):
msg.append(f"{hrs} hour{'s' if hrs > 1 else ''}")
if (mins > 0):
msg.append(f"{mins} minute{'s' if mins > 1 else ''}")
msg.append(f"{secs} second{'s' if secs > 1 else ''}")
return ' '.join(msg)
``` |
{
"source": "joeljjoy1908/Student-Info",
"score": 2
} |
#### File: studinfo/administrator/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
# Create your models here.
class Dept(models.Model):
deptname = models.CharField(max_length=50)
d_description = models.CharField(max_length=50)
def __str__(self):
return self.deptname
class course(models.Model):
coursename = models.CharField(max_length=50)
deptname = models.ForeignKey(Dept, on_delete=models.CASCADE, related_name='deptn')
def __str__(self):
return self.coursename
class Studentdetails(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='userinfo')#rollnumber
gender = models.CharField(max_length=50)
dob = models.DateField()
coursename = models.ForeignKey(course, on_delete=models.CASCADE, related_name='coursen')
phone = models.IntegerField()
address = models.CharField(max_length=50)
startbatch = models.IntegerField()
endbatch = models.IntegerField()
image = models.ImageField(upload_to='studentimages/', blank=True, null=True)
def __str__(self):
return str(self.user)
class subject(models.Model):
deptname = models.ForeignKey(Dept, on_delete=models.CASCADE, related_name='deptnames')
coursename = models.ForeignKey(course, on_delete=models.CASCADE, related_name='coursenames')
subjectname = models.CharField(max_length=50)
def __str__(self):
return self.subjectname
class marks(models.Model):
username = models.ForeignKey(User, on_delete=models.CASCADE, related_name='usernam')
course = models.ForeignKey(course, on_delete=models.CASCADE, related_name='cours')
subject = models.ForeignKey(subject, on_delete=models.CASCADE, related_name='subjec')
marks = models.IntegerField()
#view code
class notice(models.Model):
title = models.CharField(max_length=250)
description = models.TextField(max_length=250)
venue = models.CharField(max_length=250)
fromdate = models.DateField()
todate = models.DateField()
class exam(models.Model):
description = models.TextField(max_length=250)
file = models.FileField(upload_to='documemts/', blank=True, null=True)
class fee(models.Model):
fee_name = models.CharField(max_length=50, blank=True, null=True)
amount = models.IntegerField()
course = models.ForeignKey(course, on_delete=models.CASCADE, related_name='cou')
student = models.ForeignKey(Studentdetails, on_delete=models.CASCADE, related_name='st')
due_date = models.DateField(blank=True, null=True)
pay_status = models.BooleanField(default=False)
# slug = models.SlugField()
def __str__(self):
return self.fee_name
# class FeePaid(models.Model):
# stud_uname = models.ForeignKey(User, on_delete=models.CASCADE, related_name='studuname')
# fee = models.ForeignKey(fee, on_delete=models.CASCADE, related_name='fees')
# course = models.ForeignKey(course, on_delete=models.CASCADE, related_name='co')
# pay_status = models.BooleanField(default=False)
#
``` |
{
"source": "joeljo2104/Fancy-Calculator",
"score": 4
} |
#### File: Fancy-Calculator/src/calculator.py
```python
from tkinter import Button, Label, Entry, Tk
class Calculator:
# Constructor or the initializer method which is automatically called when this class is instantiated or when an instance / object is created
def __init__(self, master):
# List to be accessible everywhere.
self.list = []
# Global variable i which tracks the index position of elements within the list.
self.i = -1
# The final result.
self.result = 0
# Building up the UI part.
# Setting the title which appears on the title bar of the window
master.title('Calculator')
# Setting both the width and height to 400 pixels
master.geometry('400x400')
# Setting background colour of the whole window
master.config(bg='#24009c')
# Preventing the windows from being resized.
master.resizable(False, False)
# Title
self.heading_label = Label(
text="Calculator", font='times 45 bold', fg='white', bg='#24009c')
self.heading_label.grid(row=0, column=0, columnspan=4)
# TextField where the operations performed will appear
self.textfield = Entry(font='times 35 bold', background="#021759",
width=16, relief='flat')
self.textfield.grid(row=2, column=0, columnspan=4)
# Buttons being instantiated from the Button class by passing the requried parameters and placing them in order using the grid() method
# First row
self.seven = Button(text="7", font='times 25 bold', relief='flat',
width=4, fg='#10eaf0', bg='#0028ff', command=self.seven_clicked)
self.seven.grid(row=3, column=0)
self.eigth = Button(text="8", font='times 25 bold', relief='flat',
width=4, fg='#10eaf0', bg='#0028ff', command=self.eight_clicked)
self.eigth.grid(row=3, column=1)
self.nine = Button(text="9", font='times 25 bold', relief='flat',
width=4, fg='#10eaf0', bg='#0028ff', command=self.nine_clicked)
self.nine.grid(row=3, column=2)
self.addition = Button(text="+", font='times 25 bold', relief='flat',
width=4, fg='#24009c', bg='#10eaf0', command=self.add)
self.addition.grid(row=3, column=3)
# Second row
self.four = Button(text="4", font='times 25 bold', relief='flat',
width=4, fg='#10eaf0', bg='#0028ff', command=self.four_clicked)
self.four.grid(row=4, column=0)
self.five = Button(text="5", font='times 25 bold', relief='flat',
width=4, fg='#10eaf0', bg='#0028ff', command=self.five_clicked)
self.five.grid(row=4, column=1)
self.six = Button(text="6", font='times 25 bold', relief='flat',
width=4, fg='#10eaf0', bg='#0028ff', command=self.six_clicked)
self.six.grid(row=4, column=2)
self.subtraction = Button(text="-", font='times 25 bold', relief='flat',
width=4, fg='#24009c', bg='#10eaf0', command=self.subtract)
self.subtraction.grid(row=4, column=3)
# Third row
self.one = Button(text="1", font='times 25 bold', relief='flat',
width=4, fg='#10eaf0', bg='#0028ff', command=self.one_clicked)
self.one.grid(row=5, column=0)
self.two = Button(text="2", font='times 25 bold', relief='flat',
width=4, fg='#10eaf0', bg='#0028ff', command=self.two_clicked)
self.two.grid(row=5, column=1)
self.three = Button(text="3", font='times 25 bold', relief='flat',
width=4, fg='#10eaf0', bg='#0028ff', command=self.three_clicked)
self.three.grid(row=5, column=2)
self.multiplication = Button(text="*", font='times 25 bold', relief='flat',
width=4, fg='#24009c', bg='#10eaf0', command=self.multiply)
self.multiplication.grid(row=5, column=3)
# Fourth row
self.negation = Button(text="+/-", font='times 25 bold', relief='flat',
width=4, fg='#0028ff', bg='#10eaf0', command=self.negate)
self.negation.grid(row=6, column=0)
self.ans = Button(text="=", font='times 25 bold', relief='flat',
width=4, fg='#0028ff', bg='#10eaf0', command=self.equals)
self.ans.grid(row=6, column=1)
self.delete = Button(text="DEL", font='times 25 bold', relief='flat',
width=4, fg='#0028ff', bg='#10eaf0', command=self.clear)
self.delete.grid(row=6, column=2)
self.division = Button(text="/", font='times 25 bold', relief='flat',
width=4, fg='#24009c', bg='#10eaf0', command=self.divide)
self.division.grid(row=6, column=3)
# These functions are invoked when buttons from 1-9 are tapped
def one_clicked(self):
self.textfield.insert(16, "1")
def two_clicked(self):
self.textfield.insert(16, "2")
def three_clicked(self):
self.textfield.insert(16, "3")
def four_clicked(self):
self.textfield.insert(16, "4")
def five_clicked(self):
self.textfield.insert(16, "5")
def six_clicked(self):
self.textfield.insert(16, "6")
def seven_clicked(self):
self.textfield.insert(16, "7")
def eight_clicked(self):
self.textfield.insert(16, "8")
def nine_clicked(self):
self.textfield.insert(16, "9")
# Mathematical operations performed by these functions are invoked by their respective button clicks.
def add(self):
self.i += 1
try:
self.list.append(int(self.textfield.get()))
except:
self.list.append(0)
self.btn_clicked = 'addition'
if self.i is 0:
self.textfield.delete(0, 16)
self.result = self.list[self.i]
else:
self.textfield.delete(0, 16)
if self.list[self.i] is not self.result:
self.result += self.list[self.i]
def subtract(self):
self.i += 1
try:
self.list.append(int(self.textfield.get()))
except:
self.list.append(0)
self.btn_clicked = 'subtraction'
print(self.list, ", ", self.result, ", ", self.list[self.i])
if self.i is 0:
self.textfield.delete(0, 16)
self.result = self.list[self.i]
else:
self.textfield.delete(0, 16)
if self.list[self.i] is not self.result:
self.result -= self.list[self.i]
def multiply(self):
self.i += 1
try:
self.list.append(int(self.textfield.get()))
except:
self.list.append(0)
self.btn_clicked = 'multiplication'
if self.i is 0:
self.textfield.delete(0, 16)
self.result = self.list[self.i]
else:
self.textfield.delete(0, 16)
if self.list[self.i] is not self.result:
self.result *= self.list[self.i]
def divide(self):
self.i += 1
try:
self.list.append(int(self.textfield.get()))
except:
self.list.append(0)
self.btn_clicked = 'division'
if self.i is 0:
self.result = self.list[self.i]
self.textfield.delete(0, 16)
else:
try:
if self.list[self.i] is not self.result:
self.result //= self.list[self.i]
except ZeroDivisonError:
self.messagebox.showwarning(
"warning", "Division by zero is not possible!")
self.textfield.delete(0, 16)
def negate(self):
self.textfield.delete(0, 16)
try:
self.element = int(self.textfield.get())
print(self.element)
self.textfield.insert(16, self.element*-1)
except:
print('Exception')
def equals(self):
try:
self.list.append(int(self.textfield.get()))
if self.btn_clicked is 'addition':
self.result += int(self.textfield.get())
elif self.btn_clicked is 'subtraction':
self.result -= int(self.textfield.get())
elif self.btn_clicked is 'multiplication':
self.result *= int(self.textfield.get())
else:
self.result //= int(self.textfield.get())
self.textfield.delete(0, 16)
self.textfield.insert(16, self.result)
except:
pass
def clear(self):
print(self.result)
print(self.list)
self.i = -1
self.list = []
self.result = 0
self.textfield.delete(0, 20)
if __name__ == '__main__':
root = Tk()
gui = Calculator(root)
root.mainloop()
``` |
{
"source": "joeljogy/RobSim",
"score": 3
} |
#### File: RobSim/backend_files/create_graphs.py
```python
import json
import numpy as np
from pprint import pprint
import networkx as nx
import matplotlib.pyplot as plt
class DesignGraphs():
"""docstring for DesignGraphs"""
def __init__(self):
None
def node_to_node_graph(self, data_file):
G=nx.Graph()
with open(data_file) as f:
json_data = json.load(f)
#adding nodes
G.add_nodes_from(xrange(json_data['nodes_count']))
storage=[]
for item1 in (json_data['features']):
for m in item1['properties']['edges']:
for item2 in (json_data['features']):
temp=(item1['properties']['network_id'],item2['properties']['network_id'])
if (m in item2['properties']['edges']) and (item1!=item2) and (temp not in storage):
G.add_edge(temp)
#print m,(temp)
storage.append(temp)
temp_list = list(temp)
temp_list.reverse()
temp_tup = tuple(temp_list)
storage.append(temp_tup)
nx.draw_networkx(G)
A = degree_centrality(G)
print A
if __name__ == "__main__":
# class instance
create_graph = DesignGraphs()
# creating a table with all the nodes and roads networks
create_graph.node_to_node_graph(data_file="node.json")
```
#### File: RobSim/backend_files/geo_dataparser.py
```python
import json
import random
import os
import numpy as np
import networkx as nx
from copy import copy
from pprint import pprint
from urllib import urlretrieve
from zipfile import ZipFile
class GeoDataParser():
"""docstring for GeoDataParser"""
def __init__(self, city= None):
self.city = city
def get_data(self, link, city_name, country_name, category_name):
# download mapzen extract zip file for specific country
urlretrieve("https://s3.amazonaws.com/metro-extracts.mapzen.com/"+city_name+"_"+country_name+".imposm-geojson.zip", "data.zip")
# extract all files from the zip file to folder name data
with ZipFile("data.zip") as z:
z.extractall("data//")
# remove zip file from directory
os.remove("data.zip")
#consider only building geojson file from data folder
fname_path = "data/{}_{}_{}.geojson".format(city_name, country_name, category_name)
return fname_path
def get_roads(self, fname, class_name,category):
number_of_roads=0
json_data = None
osm_ids=[]
country_name=((((fname.partition('_'))[2]).partition('_'))[0]).title()
with open(fname) as f:
json_data = json.load(f)
geojson_data = {"type": "FeatureCollection", "features": []}
random_weight=[0.2,0.4,0.5,0.6,0.8,1.0]
for i, item in enumerate(json_data['features']):
# using In is better then using ==, you might encounter problems with non well formed strings eg: place =" Qatar University" and in the field is "Qatar University, Doha"
if (item['properties']['class'] == class_name)and (item['properties']['type'] == 'motorway'):
# adding category field to geojson object
item['properties']['category'] = 'Roads'
item['properties']['country'] = country_name
item['properties']['weight'] = random.choice(random_weight)
item['properties']['visibility'] = 1 #Visibility = 0 ----> Disabled and Visibility = 1 ----> Enabled
item['properties']['network_id']=item['properties']['osm_id']
item['properties']['degree'] = 0.0
del item['properties']['osm_id']
geojson_data["features"].append(item)
osm_ids.append(item['properties']['network_id'])
number_of_roads+=1
geojson_data['roads_count']=number_of_roads
return geojson_data
def export_data(self, json_data, output):
# write file into the disk
with open(output, 'w') as outfile:
json.dump(json_data, outfile)
def get_nodes(self,line_data):
number_of_nodes=0
node_ids=[]
node_coordinates=[]
json_data = None
def remove_duplicates(l):
return list(set(l))
with open(line_data) as f:
json_data = json.load(f)
geojson_data2 = {"type": "FeatureCollection", "features": []}
for item1 in (json_data['features']):
for m in item1['geometry']['coordinates']:
a=copy(item1)
edge=[]
count=0
for item2 in (json_data['features']):
if m in item2['geometry']['coordinates']:
node=copy(m)
count+=1
edge.append(item2['properties']['network_id'])
edge.append(item1['properties']['network_id'])
if count>0 and (m not in node_coordinates):
a['geometry']['type']='Point'
a['geometry']['coordinates']=copy(node)
edge2=remove_duplicates(edge)
country_name = a['properties']['country']
del a['properties']
a['properties']={}
a['properties']['name']="Undefined"
a['properties']['edges']=copy(edge2)
a['properties']['weight']=0
a['properties']['visibility'] = 1 #Visibility = 0 ----> Disabled and Visibility = 1 ----> Enabled
a['properties']['country']=copy(country_name)
a['properties']['network_id']=copy(number_of_nodes)*(-1)
a['properties']['category']='Node'
node_ids.append(number_of_nodes)
node_coordinates.append(m)
geojson_data2["features"].append(a)
number_of_nodes+=1
geojson_data2['nodes_count']=number_of_nodes
with open('node.json', 'w') as outfile:
json.dump(geojson_data2, outfile)
def node_to_node_graph(self, data_file):
G=nx.Graph()
with open(data_file) as f:
json_data = json.load(f)
number_of_nodes = json_data['nodes_count']
#adding nodes
G.add_nodes_from(xrange(number_of_nodes))
storage=[]
for item1 in (json_data['features']):
for m in item1['properties']['edges']:
for item2 in (json_data['features']):
temp=(item1['properties']['network_id'],item2['properties']['network_id'])
if (m in item2['properties']['edges']) and (item1!=item2) and (temp not in storage):
G.add_edge(item1['properties']['network_id'],item2['properties']['network_id'])
#print m,(temp)
storage.append(temp)
temp_list = list(temp)
temp_list.reverse()
temp_tup = tuple(temp_list)
storage.append(temp_tup)
nx.draw_networkx(G)
A = nx.degree_centrality(G)
with open(data_file) as f:
json_data = json.load(f)
geojson_data2 = {"type": "FeatureCollection", "features": []}
for i, item in enumerate(json_data['features']):
network_id = item['properties']['network_id']
print A[network_id]
item['properties']['degree'] = A[network_id]
geojson_data2["features"].append(item)
geojson_data2['nodes_count']=number_of_nodes
with open('node.json', 'w') as outfile:
json.dump(geojson_data2, outfile)
def modify_metrostations(self, fname):
number_of_stations=0
json_data = None
metro_ids=[]
country_name= 'Qatar'
with open(fname) as f:
json_data = json.load(f)
geojson_data4 = {"type": "FeatureCollection", "features": []}
for i, item in enumerate(json_data['features']):
item['properties']['category'] = 'Metro'
item['properties']['country'] = country_name
item['properties']['weight']=0
item['properties']['visibility'] = 1 #Visibility = 0 ----> Disabled and Visibility = 1 ----> Enabled
item['properties']['network_id']=item['properties']['id']
item['properties']['degree'] = 0
geojson_data4["features"].append(item)
metro_ids.append(item['properties']['network_id'])
number_of_stations+=1
geojson_data4['stations_count']=number_of_stations
with open('metro_stations.json', 'w') as outfile:
json.dump(geojson_data4, outfile)
def modify_busstops(self, fname):
number_of_stops=0
json_data = None
stop_ids=[]
country_name= 'Qatar'
with open(fname) as f:
json_data = json.load(f)
geojson_data5 = {"type": "FeatureCollection", "features": []}
for i, item in enumerate(json_data['features']):
item['properties']['category'] = 'Bus'
item['properties']['name'] = item['properties']['stop_name']
item['properties']['country'] = country_name
item['properties']['weight']=0
item['properties']['visibility'] = 1 #Visibility = 0 ----> Disabled and Visibility = 1 ----> Enabled
item['properties']['network_id']=item['properties']['stop_id']
item['properties']['degree'] = 0
geojson_data5["features"].append(item)
stop_ids.append(item['properties']['network_id'])
number_of_stops+=1
geojson_data5['stops_count']=number_of_stops
with open('bus_stops.json', 'w') as outfile:
json.dump(geojson_data5, outfile)
def combine_data(self,line_data,node_data,metro_data,bus_data):
geojson_data3 = {"type": "FeatureCollection", "features": []}
with open(line_data) as l:
json_data = json.load(l)
for i, item in enumerate(json_data['features']):
geojson_data3["features"].append(item)
geojson_data3['roads_count']=json_data['roads_count']
with open(node_data) as n:
json_data = json.load(n)
for i, item in enumerate(json_data['features']):
geojson_data3["features"].append(item)
geojson_data3['nodes_count']=json_data['nodes_count']
with open(metro_data) as n:
json_data = json.load(n)
for i, item in enumerate(json_data['features']):
geojson_data3["features"].append(item)
geojson_data3['stations_count']=json_data['stations_count']
with open(bus_data) as n:
json_data = json.load(n)
for i, item in enumerate(json_data['features']):
geojson_data3["features"].append(item)
geojson_data3['stops_count']=json_data['stops_count']
with open('complete_data.json', 'w') as outfile:
json.dump(geojson_data3, outfile)
#os.remove('line.json')
#os.remove('node.json')
if __name__ == "__main__":
# class instance
geo_data_parser = GeoDataParser()
# downloading and parsing data
fname_path = geo_data_parser.get_data(link= None, city_name= "doha", country_name= "qatar", category_name= "roads")
# searching for a place "place" category "category"
geojson_data = geo_data_parser.get_roads(fname_path, class_name= "highway",category="LineString")
# export file in geojson format
geo_data_parser.export_data(geojson_data, "line.json")
# search for all the nodes in the lines file
geo_data_parser.get_nodes(line_data="line.json")
# add degree centrality for all nodes
geo_data_parser.node_to_node_graph(data_file="node.json")
# add id and country name to the metro_stations file
geo_data_parser.modify_metrostations(fname="metro_stations.json")
# add id and country name to the bus_stops file
geo_data_parser.modify_busstops(fname="bus_stops.json")
# combine the nodes and lines files into a single json file
geo_data_parser.combine_data(line_data="line.json",node_data="node.json",metro_data="metro_stations.json",bus_data="bus_stops.json")
```
#### File: RobSim/backend_files/geo_datasearch.py
```python
import json
from pprint import pprint
from urllib import urlretrieve
from zipfile import ZipFile
import os
import random
class GeoDataParser():
"""docstring for GeoDataParser"""
def __init__(self, city= None):
self.city = city
def get_data(self, link, city_name, country_name, category_name):
# download mapzen extract zip file for specific country
urlretrieve("https://s3.amazonaws.com/metro-extracts.mapzen.com/"+city_name+"_"+country_name+".imposm-geojson.zip", "data.zip")
# extract all files from the zip file to folder name data
with ZipFile("data.zip") as z:
z.extractall("data//")
# remove zip file from directory
os.remove("data.zip")
#consider only category names - building/amenities/roads geojson file from data folder
fname_path = "data/{}_{}_{}.geojson".format(city_name, country_name, category_name)
return fname_path
def search_place(self, fname, place,category):
json_data = None
with open(fname) as f:
json_data = json.load(f)
geojson_data = {"type": "FeatureCollection", "features": []}
values=[0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
for i, item in enumerate(json_data['features']):
# using In is better then using ==, you might encounter problems with non well formed strings eg: place =" Qatar University" and in the field is "Qatar University, Doha"
if item['properties']['name'] is not None:
if type(item['properties']['name']) is not "unicode":
if place.lower() in item['properties']['name'].lower():
# adding category field & weight of segment to geojson object
item['properties']['category'] = item['geometry']['type']
item['properties']['weight']=random.choice(values)
geojson_data["features"].append(item)
pprint(item)
return geojson_data
def export_data(self, json_data, output):
# write file into the disk
with open(output, 'w') as outfile:
json.dump(json_data, outfile)
if __name__ == "__main__":
# class instance
geo_data_parser = GeoDataParser()
# downloading and parsing data
fname_path = geo_data_parser.get_data(link= None, city_name= "doha", country_name= "qatar", category_name= "buildings")
# searching for a place "place" category "category"
geojson_data = geo_data_parser.search_place(fname_path, place= "Q Mall",category="Polygon")
# export file in geojson format
geo_data_parser.export_data(geojson_data, "filtered_data.json")
# search for all the nodes in the filtered data and store in a filename nodes
geo_data_parser.get_nodes(fil_data="filtered_data.json",nodes_outfile="nodes.json")
``` |
{
"source": "joeljohnston/mediagen",
"score": 3
} |
#### File: mediagen/mediagen/creatempg.py
```python
import os
import sys
import numpy
from PIL import Image
import random
import string
import time
import datetime
import cv2
from util import util
class creatempg:
"""Creates a mpg of random size (number of frames) and random modified date as fed by the parent mediagen package"""
def __init__(self):
print("Create a JPG Image")
def mpgnamegen(self):
#Create a random name of the movie file
moviename = ''.join([random.choice(string.ascii_letters) for n in range(16)])
return moviename
def mpggen(self,filestruct,frames,picx=300,picy=300):
#instantiate a name
mpgname = self.mpgnamegen()
#print("mpgname: ",mpgname)
#print("mpgpath: ",filestruct[0][0] )
#print("mpgdate: ",filestruct[1][0] )
#print("mpgpath: ",filestruct[2][0] )
fullpath = ("%s/%s.%s") % (filestruct[0][0],mpgname,filestruct[2][0])
print("mpg movie fullpath: ", fullpath)
#create movie passing randomframes and target path
self.makeVideo( frames, fullpath)
#Set randomly assigned datetime to created file
if os.path.isfile(fullpath):
utime = time.mktime(filestruct[1][0].timetuple())
os.utime(fullpath, (utime,utime))
def makeVideo(self, inPattern, outFile):
#e= None
framelist = inPattern.split(",")
fw= open( outFile, 'w' )
pathlist = inPattern.split(",")
frame = cv2.imread(pathlist[0])
#cv2.imshow('video',frame)
#grab movie dimensions from first frame (all frames should be consistent)
if os.path.isfile(pathlist[0]):
height, width, channels = frame.shape
else:
height=300
width=300
channels=1
out = cv2.VideoWriter(outFile,cv2.VideoWriter_fourcc(*'mp4v'), 10, (width,height))
for path in inPattern.split(","):
print("frame: ", path)
if os.path.isfile( path ):
frame = cv2.imread(path)
out.write(frame)
#cv2.imshow('video',frame)
if (cv2.waitKey(1) & 0xFF) == ord('q'):
break
else:
#print('%d frames written in %.2f secs( %.2f fps )' % ( i, time.time()- t, float( i )/ ( time.time()- t ) ))
print("movie made")
out.release()
for dpath in inPattern.split(","):
if os.path.isfile( dpath ):
print("dpath: ",dpath)
os.remove(dpath)
#cv2.destroyAllWindows()
print("The output video is {}".format(outFile))
```
#### File: mediagen/mediagen/dirstruct.py
```python
import os
import sys
import random
import string
from glob import glob
class dirstruct:
""" Builds the Directory structure randomly in the target directory """
def __init__(self,basedir,_dirdepth,_maxbasesubs):
#Make sure basedir exists
if not os.path.exists(basedir):
print("Your Base Directory %s doesn't seem to exist, shall I create it? " % (str(basedir)))
basedir_create = input("(yes/no): ")
if basedir_create == 'yes':
os.makedirs(basedir)
else:
print("Sorry can't help you without a Base Directory")
sys.exit()
print(type(basedir))
print("Base Directory %s exists" % (str(basedir)))
randmaxrangesubs = random.randint(0,int(_maxbasesubs))
for i in range(random.randint(1,randmaxrangesubs)):
self.gensubdir(str(basedir),_dirdepth)
self.list_files(str(basedir))
def dirname(self):
#Generate random name for directory
randomdirname= ''.join([random.choice(string.ascii_letters) for n in range(16)])
return randomdirname
def createdirs(self,targetdir):
os.makedirs(targetdir)
def list_files(self, startpath):
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
#print("level: ", level)
indent = ' ' * 4 * (level)
#print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
def gensubdir2(self,_basedir):
for root, dirs, files in os.walk(_basedir):
print("dirs: ", dirs)
def gensubdir(self,_basedir, _dirdepth):
i = 0
j = 0
sub_path = _basedir
for folder, subs, files in os.walk(_basedir):
#print("number of subs: ", len(subs))
_dirarray = []
#randomdir = random.randint(1,len(subs))
while i < random.randint(0,int(_dirdepth)):
_dirarray.append(self.dirname())
i = i + 1
while j < random.randint(0,len(_dirarray)):
sub_path = (sub_path + '/' + _dirarray[j])
j = j + 1
print("sub_path: ", sub_path)
if not os.path.exists(sub_path):
self.createdirs(sub_path)
```
#### File: mediagen/mediagen/__main__.py
```python
import os
import sys
import argparse
import random
import datetime
#import local classes
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from dirstruct import dirstruct
from filestruct import filestruct
from createjpg import createjpg
from creatempg import creatempg
from createtxt import createtxt
def _launch():
#Gather arguments from cli
parser = argparse.ArgumentParser(description='Generate Media')
parser.add_argument('--basedir', metavar='basedir', type=str, nargs='+', help='Directory to start generating data in')
parser.add_argument('--mediasize', metavar='mediasize', type=int, nargs='+', help='Total Sum of media to generate in GB')
parser.add_argument('--filesizemin', metavar='filesizemin', type=int, nargs='+', help='Minimum file size to generate in GB')
parser.add_argument('--filesizemax', metavar='filesizemax', type=int, nargs='+', help='Maximum file size to generate in GB')
parser.add_argument('--excludetypes', metavar='excludetypes', type=str, nargs='+', help='Filetypes you dont want to generate')
parser.add_argument('--daterangestart', metavar='daterangestart', type=str, nargs='+', help='Start Date of file creation daterange')
parser.add_argument('--daterangeend', metavar='daterangeend', type=str, nargs='+', help='End Date of file creation daterange')
parser.add_argument('--dirdepth', metavar='dirdepth', type=str, nargs='+', help='Number of directories down to create')
parser.add_argument('--maxbasesubs', metavar='maxbasesubs', type=int, nargs='+', help='Maximum number of directories created in the top of the tree')
parser.add_argument('--maxnumfiles', metavar='maxnumfiles', type=int, nargs='+', help='Maximum number of files created in the top of the tree')
args = parser.parse_args()
filestructure = []
#invoke dirstruct class to create the random base directory structure
a = dirstruct(str(args.basedir[0]),args.dirdepth[0],args.maxbasesubs[0])
#invoke the random file generation class to framework jobs
b = filestruct()
#set vars for building a random filestructure
i = 0
j = 0
k = 0
now = datetime.datetime.now()
#loop through the maxnumfiles argument to create a structure that represents a random sampling of filetypes across the directory structure
while i in range(args.maxnumfiles[0]):
fstructtype = b.randomtype()
fstructdate = b.randomdate(args.daterangestart[0], args.daterangeend[0], '%Y-%m-%d', random.random())
fstructpath = b.structcrawl(args.basedir[0])
fileinfo = [[fstructpath],[fstructdate],[fstructtype]]
filestructure.append(fileinfo)
i = i + 1
for j in filestructure:
print(j[2][0])
#if the selected filetype is jpg
if j[2][0] == 'jpg':
c = createjpg()
c.jpggen(j,0,0)
#if the selected filetype is mp4
if j[2][0] == 'mp4':
#we need to create the movie frames before handing of a list to the movie create function
framestruct = []
numframes = random.randint(500, 1800)
#today = now.strftime("%Y-%m-%d %H:%M")
today = j[1][0]
frames = ""
print("numframes: ",numframes)
print("k: ",k)
while k in range(numframes):
framename = j[0][0].split("/")
print("today: ", today)
print("k: ", k)
print("framename: ", framename[3])
frameinfo = [['/tmp', [framename[3] + str(k)]],[today],['jpg']]
framestruct.append(frameinfo)
c = createjpg()
print("framestruct: ", len(framestruct) )
k = k + 1
k = 0
for l in framestruct:
frames = c.jpggen(l,768,1080) + "," + frames
print("frames: ", frames)
m = creatempg()
m.mpggen(j,frames)
#if the selected filetype is txt
if j[2][0] == 'txt':
t = createtxt()
t.txtgen(j)
if __name__ == '__main__':
_launch()
``` |
{
"source": "joeljohnston/mediastruct",
"score": 3
} |
#### File: mediastruct/mediastruct/crawl.py
```python
import os
import re
import logging
import xxhash
import shutil
import json
import glob
import uuid
import time
import datetime
from mediastruct.utils import *
from os import walk, remove, stat
from os.path import join as joinpath
#setup logging from the parent class
log = logging.getLogger(__name__)
class crawl:
"""Iterate a dir tree and build a sum index - We first load our json data file and inspect the value of du. This is
compared to a fresh 'quick' check of the directory size using utils.getFolderSize. If they are different we are
going to re-index this directory and re-write all of our file hashes to the json file. This saves time on directory structures
such as the archive, that rarely change"""
def __init__(self,force,rootdir,datadir):
dirname = re.split(r"\/",rootdir)
dirname_len = len(dirname) -1
print('dirname_len: ', dirname_len)
log.info("Crawl - Crawling %s" % (rootdir))
if os.path.isdir(rootdir):
if force == True:
log.info('Crawl - Force Attribute set to True - indexing %s' % (rootdir))
index = crawl.index_sum(self,rootdir,datadir)
else:
#if our data file exists for this directory load it and compare
if os.path.isfile('%s/%s_index.json' % (datadir,dirname[dirname_len])):
print('dirname: ',dirname[dirname_len])
with open('%s/%s_index.json' % (datadir,dirname[dirname_len]), 'r') as f:
array = json.load(f)
#here we are comparing
if array['du']:
currentdu = utils.getFolderSize(self,rootdir)
if currentdu != array['du'] or array['du'] == 0:
index = crawl.index_sum(self,rootdir,datadir)
else:
log.info("Crawl - The Index matches the Directory")
#otherwise start the index process
else:
index = crawl.index_sum(self,rootdir,datadir)
def index_sum(self,rootdir,datadir):
"""Index hash sum of all files in a directory tree and write to Json file"""
#isolate the name of the directory from our argument
dirname = re.split(r"\/",rootdir)
dirname_len = len(dirname) -1
sum_dict = {}
#walk the structure of the target dir tree
for path, dirs, files in walk(rootdir):
for filename in files:
index_line = {}
fileid = str(uuid.uuid1())
filepath = joinpath(path,filename)
filesize = stat(filepath).st_size
this_year = str(datetime.datetime.fromtimestamp(os.path.getmtime(filepath))).split('-')[0]
#this can be changed out with any hash library you prefer
log.info("Crawl - Hashing File: %s" % (filepath))
try:
filehash = xxhash.xxh64(open(filepath,'rb').read()).hexdigest()
if filehash != '':
index_line.update([('filehash',filehash),('path',filepath),('filesize',filesize),('year',this_year)])
sum_dict[fileid] = index_line
except:
print("broken file: ", filepath)
log.info("Crawl - broken file: %s" % (filepath))
time.sleep(120)
#we're creating a key-based dictionary here
sum_dict['du'] = utils.getFolderSize(self,rootdir)
indexfilepath = ('%s/%s_index.json' % (datadir, dirname[dirname_len]))
indexfile = open(indexfilepath,"w")
jsonoutput = json.dumps(sum_dict)
indexfile.write(jsonoutput)
indexfile.close()
#return the key-based dictionary with updated hash values
log.info("Crawl - Completed crawl of %s)" % (rootdir))
return sum_dict
```
#### File: mediastruct/mediastruct/ingest.py
```python
import os
import sys
import time
import shutil
import logging
from glob import glob
log = logging.getLogger(__name__)
log.info('Ingest - Launching the Ingest Class')
class ingest(object):
'''the ingest class manages contents entering the workflow by organizing files by their last modified date
into the working directory / media directory'''
#class init
def __init__(self,_sourcedir,_destdir):
#setup logging for this child class
log = logging.getLogger(__name__)
ingest.mvrnm(self,_sourcedir,_destdir)
#Move and Rename as Necessary
def mvrnm(self,sourcedir,destdir):
'''this function ensures that no data is lost via file collisions as files are moved into the working dir
by renaming them with a .<unixdatetimestamp. addition to the existing filename'''
log.info("Ingest - Directory root: %s" % (sourcedir))
#ensure the source directory exists
if os.path.isdir(sourcedir):
#change parser to the sourcedir
#os.chdir(sourcedir)
#loop through contents of the ingest directory
for folder, subs, files in os.walk(sourcedir):
for filename in files:
#split the filename up
ext = os.path.splitext(filename)[1][1:]
newfile = os.path.splitext(filename)[0]
#rename the file with a unique timestamp based name
millis = int(round(time.time() * 1000))
newfilename = "%s.%s.%s" % (newfile, millis, ext)
log.info("Ingest - oldfilename: %s" % (filename))
log.info("Ingest - newfilename: %s" % (newfilename))
#new file path
filepath = "%s/%s" % (folder,filename)
ftime = time.gmtime(os.path.getmtime(filepath))
#create date based year and month directories as needed
ctime_dir = "%s/%s" % (str(ftime.tm_year), str(ftime.tm_mon))
dest_dir="%s/%s" % (destdir, ctime_dir)
dest="%s/%s/%s" % (destdir, ctime_dir, filename)
newdest= "%s/%s" % (dest_dir, newfilename)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
if not os.path.exists(dest):
log.info('Ingest - Moving %s from %s to %s' % (ext,filename,dest))
shutil.move(filepath, dest)
else:
log.info("Ingest - Duplicate Name found - new path: %s" % (newdest) )
shutil.move(filepath, newdest)
else:
log.error("Ingest - Source Directory {} doesn't exist".format(sourcedir))
```
#### File: mediastruct/mediastruct/__main__.py
```python
import os
import sys
import configparser
import glob
import io
import logging
import logging.config
import argparse
#add path of package
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
##############################################################
# Import local classes
##############################################################
import ingest
import crawl
import dedupe
import archive
import utils
import validate
#############################################################
# Config files - create with defaults if they don't exist
#############################################################
configfile_name = "conf/config.ini"
#create a template if the config.ini doesn't exist
if not os.path.isfile(configfile_name):
cfgfile = open(configfile_name, 'w')
appConfig = configparser.ConfigParser()
appConfig.add_section('ingestdirs')
appConfig.set('ingestdirs','ingestdir','/data/ingest')
appConfig.add_section('workingdirs')
appConfig.set('workingdirs','workingdir','/data/media')
appConfig.add_section('archivedir')
appConfig.set('archivedir','archivedir','/archive')
appConfig.add_section('archivemedia')
appConfig.set('archivemedia','mediasize','24')
appConfig.set('archivemedia','burnedtag','wr')
appConfig.add_section('duplicates')
appConfig.set('duplicates','duplicatedir','/data/duplicates')
appConfig.add_section('validated')
appConfig.set('validated','validateddir','/validated')
appConfig.add_section('datadir')
appConfig.set('datadir','jsondatadir','data')
appConfig.set('datadir','logdir','logs')
appConfig.write(cfgfile)
cfgfile.close()
else:
config = configparser.ConfigParser()
config.read('conf/config.ini')
ingestdir = config['ingestdirs']['ingestdir']
workingdir = config['workingdirs']['workingdir']
archivedir = config['archivedir']['archivedir']
mediasize = config['archivemedia']['mediasize']
burnedtag = config['archivemedia']['burnedtag']
duplicatedir = config['duplicates']['duplicatedir']
validateddir = config['validated']['validateddir']
jsondatadir = config['datadir']['jsondatadir']
logdir = config['datadir']['logdir']
#############################################################
# Setup Paths
#############################################################
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
this_path = os.path.dirname(os.path.realpath(__file__))
one_up = os.path.dirname(os.path.realpath(__file__)) + '/../'
app_path = os.path.join(this_path, one_up)
config_path = app_path + 'conf/'
##############################################################
# Logging
##############################################################
log_path = logdir + '/mediastruct.log'
if not os.path.isfile(log_path):
logfile = open(log_path,'w')
logging.basicConfig(filename=log_path, level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p %Z -')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
class mediastruct_init(object):
def __init__(self):
log = logging.getLogger(__name__)
log.info("########################## Starting Medistruct ##########################")
#arparse init
parser = argparse.ArgumentParser(description='Manage media file structure for archiving.',usage='''mediastruct <command> [<args>]
Commands:
ingest - Moves files from the ingest directory set in conf/config.ini to the working directory set in conf/config.ini in a date structure
crawl - Iterates through all configured directories (except duplicates) and creates a hash index json file in data/
dedupe - Combines all configured directory's json datasets and moves duplicates in the working directory or ingest into the duplicates directory
archive - Uses the mediasize variable set in conf/config.ini to create sized volumes in the archive directory and moves files accordingly
validate - Does the reverse of the above actions by rehashing and comparing each marked duplicate file to all files in all structures, moves matches to the validated directory
daily - Combines the above functions into a re-usable automated workflow for use with scheduled jobs
''')
parser.add_argument('command', help='Subcommand to run')
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
parser.print_help()
exit(1)
#Assess Command Argument
log.info("Command: %s" % (args.command))
getattr(self, args.command)()
def crawl(self):
print("Crawling")
#the crawl function performs a hash index of all files in the target directories
parser = argparse.ArgumentParser(description='Crawl the dirs and create a hash index')
parser.add_argument('-f','--force',action='store_true',default=False,help='forces indexing of all directories')
parser.add_argument('-p','--path',help='pass a directory to crawl')
args = parser.parse_args(sys.argv[2:])
#Crawl a provided directory
if args.path:
crawl.crawl(args.force,args.path,jsondatadir)
else:
ingestsum = crawl.crawl(args.force,ingestdir,jsondatadir)
workingdirsum = crawl.crawl(args.force,workingdir,jsondatadir)
archivedirsum = crawl.crawl(args.force,archivedir,jsondatadir)
def ingest(self):
print("Ingesting Files")
#the ingest function sorts and moves files by date into the working/media directory
a = ingest.ingest(ingestdir,workingdir)
def dedupe(self):
print("Dedupping")
#the dedupe function combines all hash indexes and analyzes the dataset for duplicates
data_files = glob.glob(jsondatadir + '/*.json')
#run the dedupe function
dedupe.dedupe(data_files,duplicatedir,archivedir)
def archive(self):
print("Archiving")
#the archive function pulls from the working/media directory and pools into sized volumes
archive.archive(archivedir,jsondatadir, workingdir,mediasize)
def validate(self):
print("Validating - This can take awhile")
validate.validate(duplicatedir,workingdir,archivedir,validateddir)
def test(self):
print("Running Full Test Sequence")
#the ingest function sorts and moves files by date into the working/media directory
ingest.ingest(ingestdir,workingdir)
#the crawl function performs a hash index of all files in the target directories
workingdirsum = crawl.crawl(True,workingdir,jsondatadir)
archivedirsum = crawl.crawl(False,archivedir,jsondatadir)
#the dedupe function combines all hash indexes and analyzes the dataset for duplicates
data_files = glob.glob(jsondatadir + '/*.json')
#run the dedupe function
dedupe.dedupe(data_files,duplicatedir)
#after the dedupe function has moved duplicaes out, reindex
workingdirsum = crawl.crawl(True,workingdir,jsondatadir)
#the archive function pulls from the working/media directory and pools into sized volumes
archive.archive(archivedir,jsondatadir, workingdir,mediasize)
#validate that all files in duplicates exist elsewhere before moving to validated
validate.validate(duplicatedir,workingdir,archivedir,validateddir)
print("Daily Job Completed Successfully")
def daily(self):
print("Running Daily Job")
#the ingest function sorts and moves files by date into the working/media directory
ingest.ingest(ingestdir,workingdir)
#the crawl function performs a hash index of all files in the target directories
workingdirsum = crawl.crawl(True,workingdir,jsondatadir)
archivedirsum = crawl.crawl(False,archivedir,jsondatadir)
#the dedupe function combines all hash indexes and analyzes the dataset for duplicates
data_files = glob.glob(jsondatadir + '/*.json')
#run the dedupe function
dedupe.dedupe(data_files,duplicatedir)
#after the dedupe function has moved duplicaes out, reindex
#workingdirsum = crawl.crawl(True,workingdir,jsondatadir)
#the archive function pulls from the working/media directory and pools into sized volumes
#archive.archive(archivedir,jsondatadir, workingdir,mediasize)
#validate that all files in duplicates exist elsewhere before moving to validated
#validate.validate(duplicatedir,workingdir,archivedir,validateddir)
#launch on init
if __name__ == '__main__':
mediastruct_init()
```
#### File: mediastruct/mediastruct/sort.py
```python
import os
import sys
import time
import shutil
from glob import glob
class sort:
def __init__(self):
print("yeah")
``` |
{
"source": "joeljosephjin/gvgai-rl",
"score": 2
} |
#### File: stable_baselines3/common/off_policy_algorithm.py
```python
import time
import os
import pickle
import warnings
from typing import Union, Type, Optional, Dict, Any, Callable
import gym
import torch as th
import numpy as np
from stable_baselines3.common import logger
from stable_baselines3.common.base_class import BaseAlgorithm
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.utils import safe_mean
from stable_baselines3.common.vec_env import VecEnv
from stable_baselines3.common.type_aliases import GymEnv, RolloutReturn
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.buffers import ReplayBuffer
class OffPolicyAlgorithm(BaseAlgorithm):
"""
The base for Off-Policy algorithms (ex: SAC/TD3)
:param policy: Policy object
:param env: The environment to learn from
(if registered in Gym, can be str. Can be None for loading trained models)
:param policy_base: The base policy used by this method
:param learning_rate: (float or callable) learning rate for the optimizer,
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: (int) size of the replay buffer
:param learning_starts: (int) how many steps of the model to collect transitions for before learning starts
:param batch_size: (int) Minibatch size for each gradient update
:param policy_kwargs: Additional arguments to be passed to the policy on creation
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param verbose: The verbosity level: 0 none, 1 training information, 2 debug
:param device: Device on which the code should run.
By default, it will try to use a Cuda compatible device and fallback to cpu
if it is not possible.
:param support_multi_env: Whether the algorithm supports training
with multiple environments (as in A2C)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param seed: Seed for the pseudo random generators
:param use_sde: Whether to use State Dependent Exploration (SDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param use_sde_at_warmup: (bool) Whether to use gSDE instead of uniform sampling
during the warm up phase (before learning starts)
:param sde_support: (bool) Whether the model support gSDE or not
"""
def __init__(self,
policy: Type[BasePolicy],
env: Union[GymEnv, str],
policy_base: Type[BasePolicy],
learning_rate: Union[float, Callable],
buffer_size: int = int(1e6),
learning_starts: int = 100,
batch_size: int = 256,
policy_kwargs: Dict[str, Any] = None,
tensorboard_log: Optional[str] = None,
verbose: int = 0,
device: Union[th.device, str] = 'auto',
support_multi_env: bool = False,
create_eval_env: bool = False,
monitor_wrapper: bool = True,
seed: Optional[int] = None,
use_sde: bool = False,
sde_sample_freq: int = -1,
use_sde_at_warmup: bool = False,
sde_support: bool = True):
super(OffPolicyAlgorithm, self).__init__(policy=policy, env=env, policy_base=policy_base,
learning_rate=learning_rate, policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log, verbose=verbose,
device=device, support_multi_env=support_multi_env,
create_eval_env=create_eval_env, monitor_wrapper=monitor_wrapper,
seed=seed, use_sde=use_sde, sde_sample_freq=sde_sample_freq)
self.buffer_size = buffer_size
self.batch_size = batch_size
self.learning_starts = learning_starts
self.actor = None # type: Optional[th.nn.Module]
self.replay_buffer = None # type: Optional[ReplayBuffer]
# Update policy keyword arguments
if sde_support:
self.policy_kwargs['use_sde'] = self.use_sde
self.policy_kwargs['device'] = self.device
# For gSDE only
self.use_sde_at_warmup = use_sde_at_warmup
def _setup_model(self):
self._setup_lr_schedule()
self.set_random_seed(self.seed)
self.replay_buffer = ReplayBuffer(self.buffer_size, self.observation_space,
self.action_space, self.device)
self.policy = self.policy_class(self.observation_space, self.action_space,
self.lr_schedule, **self.policy_kwargs)
self.policy = self.policy.to(self.device)
def save_replay_buffer(self, path: str):
"""
Save the replay buffer as a pickle file.
:param path: (str) Path to a log folder
"""
assert self.replay_buffer is not None, "The replay buffer is not defined"
with open(os.path.join(path, 'replay_buffer.pkl'), 'wb') as file_handler:
pickle.dump(self.replay_buffer, file_handler)
def load_replay_buffer(self, path: str):
"""
Load a replay buffer from a pickle file.
:param path: (str) Path to the pickled replay buffer.
"""
with open(path, 'rb') as file_handler:
self.replay_buffer = pickle.load(file_handler)
assert isinstance(self.replay_buffer, ReplayBuffer), 'The replay buffer must inherit from ReplayBuffer class'
def collect_rollouts(self, # noqa: C901
env: VecEnv,
# Type hint as string to avoid circular import
callback: 'BaseCallback',
n_episodes: int = 1,
n_steps: int = -1,
action_noise: Optional[ActionNoise] = None,
learning_starts: int = 0,
replay_buffer: Optional[ReplayBuffer] = None,
log_interval: Optional[int] = None) -> RolloutReturn:
"""
Collect experiences and store them into a ReplayBuffer.
:param env: (VecEnv) The training environment
:param callback: (BaseCallback) Callback that will be called at each step
(and at the beginning and end of the rollout)
:param n_episodes: (int) Number of episodes to use to collect rollout data
You can also specify a ``n_steps`` instead
:param n_steps: (int) Number of steps to use to collect rollout data
You can also specify a ``n_episodes`` instead.
:param action_noise: (Optional[ActionNoise]) Action noise that will be used for exploration
Required for deterministic policy (e.g. TD3). This can also be used
in addition to the stochastic policy for SAC.
:param learning_starts: (int) Number of steps before learning for the warm-up phase.
:param replay_buffer: (ReplayBuffer)
:param log_interval: (int) Log data every ``log_interval`` episodes
:return: (RolloutReturn)
"""
episode_rewards, total_timesteps = [], []
total_steps, total_episodes = 0, 0
assert isinstance(env, VecEnv), "You must pass a VecEnv"
assert env.num_envs == 1, "OffPolicyAlgorithm only support single environment"
if n_episodes > 0 and n_steps > 0:
# Note we are refering to the constructor arguments
# that are named `train_freq` and `n_episodes_rollout`
# but correspond to `n_steps` and `n_episodes` here
warnings.warn("You passed a positive value for `train_freq` and `n_episodes_rollout`."
"Please make sure this is intended. "
"The agent will collect data by stepping in the environment "
"until both conditions are true: "
"`number of steps in the env` >= `train_freq` and "
"`number of episodes` > `n_episodes_rollout`")
if self.use_sde:
self.actor.reset_noise()
callback.on_rollout_start()
continue_training = True
while total_steps < n_steps or total_episodes < n_episodes:
done = False
episode_reward, episode_timesteps = 0.0, 0
while not done:
if self.use_sde and self.sde_sample_freq > 0 and total_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.actor.reset_noise()
# Select action randomly or according to policy
if self.num_timesteps < learning_starts and not (self.use_sde and self.use_sde_at_warmup):
# Warmup phase
unscaled_action = np.array([self.action_space.sample()])
else:
# Note: we assume that the policy uses tanh to scale the action
# We use non-deterministic action in the case of SAC, for TD3, it does not matter
unscaled_action, _ = self.predict(self._last_obs, deterministic=False)
# Rescale the action from [low, high] to [-1, 1]
if isinstance(self.action_space, gym.spaces.Box):
scaled_action = self.policy.scale_action(unscaled_action)
# Add noise to the action (improve exploration)
if action_noise is not None:
# NOTE: in the original implementation of TD3, the noise was applied to the unscaled action
# Update(October 2019): Not anymore
scaled_action = np.clip(scaled_action + action_noise(), -1, 1)
# We store the scaled action in the buffer
buffer_action = scaled_action
action = self.policy.unscale_action(scaled_action)
else:
# Discrete case, no need to normalize or clip
buffer_action = unscaled_action
action = buffer_action
# Rescale and perform action
new_obs, reward, done, infos = env.step(action)
# Only stop training if return value is False, not when it is None.
if callback.on_step() is False:
return RolloutReturn(0.0, total_steps, total_episodes, continue_training=False)
episode_reward += reward
# Retrieve reward and episode length if using Monitor wrapper
self._update_info_buffer(infos, done)
# Store data in replay buffer
if replay_buffer is not None:
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs()
reward_ = self._vec_normalize_env.get_original_reward()
else:
# Avoid changing the original ones
self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward
replay_buffer.add(self._last_original_obs, new_obs_, buffer_action, reward_, done)
self._last_obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
self._last_original_obs = new_obs_
self.num_timesteps += 1
episode_timesteps += 1
total_steps += 1
if 0 < n_steps <= total_steps:
break
if done:
total_episodes += 1
self._episode_num += 1
episode_rewards.append(episode_reward)
total_timesteps.append(episode_timesteps)
if action_noise is not None:
action_noise.reset()
# Log training infos
if log_interval is not None and self._episode_num % log_interval == 0:
fps = int(self.num_timesteps / (time.time() - self.start_time))
logger.record("time/episodes", self._episode_num, exclude="tensorboard")
if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
logger.record('rollout/ep_rew_mean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buffer]))
logger.record('rollout/ep_len_mean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buffer]))
logger.record("time/fps", fps)
logger.record('time/time_elapsed', int(time.time() - self.start_time), exclude="tensorboard")
logger.record("time/total timesteps", self.num_timesteps, exclude="tensorboard")
if self.use_sde:
logger.record("train/std", (self.actor.get_std()).mean().item())
if len(self.ep_success_buffer) > 0:
logger.record('rollout/success rate', safe_mean(self.ep_success_buffer))
# Pass the number of timesteps for tensorboard
logger.dump(step=self.num_timesteps)
mean_reward = np.mean(episode_rewards) if total_episodes > 0 else 0.0
callback.on_rollout_end()
return RolloutReturn(mean_reward, total_steps, total_episodes, continue_training)
``` |
{
"source": "joeljosephjin/SERL-baselines",
"score": 2
} |
#### File: SERL-baselines/SERL/PEARL.py
```python
class PEARL:
def __init__(self):
self.nothing = 0
def printer(self):
print("it works! ", self.nothing)
``` |
{
"source": "JoelJosephPaul/vanet-algorithms",
"score": 3
} |
#### File: vanet-algorithms/datagen/transmit.py
```python
l = { 9:{ 3:{0:None,1:None,2:None},8:{6:{4:None,5:None},7:None} } }
realdata1 = [10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 5, 5, 5, 5, 5, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 8, 8, 8, 8, 8, 6, 6, 6, 6, 6, 8, 8, 8, 8, 8, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10]
realdata2 = [10]*100
lst = [
{0:{2:{4:None,5:None},3:{6:{1:None,8:None},7:None},9:None}},
{9:{1:{5:{6:None,7:None}},0:{2:None,4:None},3:{8:None}}},
{5:{2:{1:{9:None},4:{3:{0:None,7:None},6:None}},8:None}},
{4:{0:{7:None},5:{9:{1:None,8:None},2:{3:None,6:None}}}},
{3:{4:{5:{7:None},0:None},8:{9:{1:None,2:None},6:None}}},
{6:{2:{8:None,9:None},3:{5:None},1:{7:None,0:None,4:None}}},
{7:{8:{2:None},5:{3:{9:None,6:None},4:None},1:None,0:None}},
{2:{6:{8:None,5:None,1:None,4:None},7:{0:None,9:None,3:None}}},
{1:{6:{0:None,5:None},7:{9:{2:None,8:None,3:None},4:None}}},
{8:{1:{6:None,4:None,0:None},3:{9:None,2:None},7:{5:None}}},
]
malicious=[1,6,9]
realvalue=10
maxval=10
minval=0
avgval = (maxval+minval)/2
def malval(num):
if num>avgval:
return minval
else:
return maxval
def agg(d):
if type(d)==type({}):
n=0.0
s=0.0
for i in d:
if i not in malicious:#malicious only 4,
rs,rn = agg(d[i])
s+=(rs+realvalue)/(rn+1)#realvalue is the real transmitted data
else:
s+=malval(realvalue)
n+=1
return (s,n)
elif d==None:
return (0,0)
def op(level,d):
for i in d:
print(" "*level*5+str(i))
if d[i]!=None:
op(level+1,d[i])
aglst = []
for i in lst:
op(1,i)
r1,r2 = agg(i)
aglst.append(r1/r2)
print(r1/r2)
times = [i/10 for i in range(1,11)]
for i,j in zip(times,aglst):
print(i,j,sep='\t')
``` |
{
"source": "joeljunior05/microspheres-sbrt",
"score": 4
} |
#### File: microspheres-sbrt/utils/imgproc.py
```python
import cv2 as cv
import numpy as np
def crop_img(img, stride=2, size=[128, 128]):
max_rows, max_cols = img.shape[:2]
ret = []
for c in range(0, max_cols, stride):
if (c + size[1]) >= max_cols:
break
for r in range(0, max_rows, stride):
if (r + size[0]) >= max_rows:
break
ret.append(img[r:r+size[0], c:c+size[1]])
return ret
def int_to_float(int_img):
float_img = int_img
if float_img.dtype == np.uint8:
float_img = float_img.astype(np.float32)
float_img /= 255
return float_img
def to_gray(img):
if len(img.shape) != 3:
raise ValueError("Image must have 3 dimension, but it has {0}".format(len(img.shape)))
gray_img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray_img = int_to_float(gray_img)
return gray_img
def read_img(filename_img, filename_mask=None):
"""
Function used to abstract the reading process of an image,
it returns [img, mask] when filename_mask is passed.
Otherwise, it returns [img]
"""
img = cv.imread(filename_img)
if filename_mask is not None:
return [img, cv.imread(filename_mask)]
return [img]
def copy_border(image, padding, value=[255, 255, 255]):
print('copy_border')
return cv.copyMakeBorder(image, padding, padding, padding, padding, cv.BORDER_CONSTANT, value=value)
def remove_border(image, padding):
print('remove_border')
width = image.shape[0]
height = image.shape[1]
new_width = width - padding
new_height = height - padding
assert (new_width > 0), "new_width should be bigger than zero!"
assert (new_height > 0), "new_height should be bigger than zero!"
assert (new_width > padding), "new_width should be bigger than padding!"
assert (new_height > padding), "new_width should be bigger than padding!"
return image[padding:new_width, padding:new_height]
def write_img(filename_img, img):
"""
Function used to abstract the writing process of an image
"""
if img.dtype == np.float:
img *= 255
cv.imwrite(filename_img, img)
```
#### File: microspheres-sbrt/utils/inout.py
```python
from os import listdir
from os.path import join, isfile
class InOutLoop:
"""
This class implements a loop that will iterate on every file in an 'input_folder':
- For every file in 'input_folder' it calls a function that was registered by 'on_input' method;
- The result of that function is passed to 'on_run' method;
- In the end, the function that was registered by 'on_output' method is called with
the result of 'on_run's function.
"""
def __init__(self, input_folder='inputs', output_folder='outputs', extensions=[], debug=False):
self.input_folder = input_folder
self.output_folder = output_folder
self.extensions = extensions
self.debug = debug
def log(self, msg):
if self.debug:
print(msg)
def has_extension(self, ext, filename):
namelen = len(filename)
extlen = len(ext)
return filename.find(ext, namelen - extlen, namelen) >= 0
def check_extensions(self, filename):
return len(self.extensions) == 0 or any(self.has_extension(ext, filename) for ext in self.extensions)
def on_input(self, func):
self.f_input = func
def on_output(self, func):
self.f_output = func
def on_run(self, func):
self.f_run = func
def run(self):
for file in listdir(self.input_folder):
input_path = join(self.input_folder, file)
output_path = join(self.output_folder, file)
if isfile(input_path) and self.check_extensions(file):
self.log('READING: {} <-'.format(file))
result_in = self.f_input(input_path)
self.log('PROCESSING: {} <-'.format(file))
result_run = self.f_run(result_in)
self.log('OUTPUT: {} <-'.format(file))
self.f_output(output_path, result_run)
``` |
{
"source": "joelkaret/Sudoku_Solver",
"score": 2
} |
#### File: joelkaret/Sudoku_Solver/SudokuSolverGui.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1122, 902)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.solve_button = QtWidgets.QPushButton(self.centralwidget)
self.solve_button.setGeometry(QtCore.QRect(820, 340, 211, 201))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(23)
self.solve_button.setFont(font)
self.solve_button.setCheckable(False)
self.solve_button.setAutoDefault(False)
self.solve_button.setObjectName("solve_button")
self.all_solutions_button = QtWidgets.QPushButton(self.centralwidget)
self.all_solutions_button.setGeometry(QtCore.QRect(820, 560, 211, 201))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(23)
self.all_solutions_button.setFont(font)
self.all_solutions_button.setCheckable(False)
self.all_solutions_button.setAutoDefault(False)
self.all_solutions_button.setObjectName("all_solutions_button")
self.edit_board_button = QtWidgets.QPushButton(self.centralwidget)
self.edit_board_button.setGeometry(QtCore.QRect(820, 120, 211, 201))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(23)
self.edit_board_button.setFont(font)
self.edit_board_button.setCheckable(False)
self.edit_board_button.setAutoDefault(False)
self.edit_board_button.setObjectName("edit_board_button")
self.pushButton_1 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_1.setGeometry(QtCore.QRect(40, 40, 71, 71))
self.pushButton_1.setText("")
self.pushButton_1.setCheckable(False)
self.pushButton_1.setAutoDefault(False)
self.pushButton_1.setObjectName("pushButton_1")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(120, 40, 71, 71))
self.pushButton_2.setText("")
self.pushButton_2.setCheckable(False)
self.pushButton_2.setAutoDefault(False)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(200, 40, 71, 71))
self.pushButton_3.setText("")
self.pushButton_3.setCheckable(False)
self.pushButton_3.setAutoDefault(False)
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setGeometry(QtCore.QRect(290, 40, 71, 71))
self.pushButton_4.setText("")
self.pushButton_4.setCheckable(False)
self.pushButton_4.setAutoDefault(False)
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_5.setGeometry(QtCore.QRect(370, 40, 71, 71))
self.pushButton_5.setText("")
self.pushButton_5.setCheckable(False)
self.pushButton_5.setAutoDefault(False)
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_6 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_6.setGeometry(QtCore.QRect(450, 40, 71, 71))
self.pushButton_6.setText("")
self.pushButton_6.setCheckable(False)
self.pushButton_6.setAutoDefault(False)
self.pushButton_6.setObjectName("pushButton_6")
self.pushButton_7 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_7.setGeometry(QtCore.QRect(540, 40, 71, 71))
self.pushButton_7.setText("")
self.pushButton_7.setCheckable(False)
self.pushButton_7.setAutoDefault(False)
self.pushButton_7.setObjectName("pushButton_7")
self.pushButton_8 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_8.setGeometry(QtCore.QRect(620, 40, 71, 71))
self.pushButton_8.setText("")
self.pushButton_8.setCheckable(False)
self.pushButton_8.setAutoDefault(False)
self.pushButton_8.setObjectName("pushButton_8")
self.pushButton_9 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_9.setGeometry(QtCore.QRect(700, 40, 71, 71))
self.pushButton_9.setText("")
self.pushButton_9.setCheckable(False)
self.pushButton_9.setAutoDefault(False)
self.pushButton_9.setObjectName("pushButton_9")
self.pushButton_10 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_10.setGeometry(QtCore.QRect(40, 120, 71, 71))
self.pushButton_10.setText("")
self.pushButton_10.setCheckable(False)
self.pushButton_10.setAutoDefault(False)
self.pushButton_10.setObjectName("pushButton_10")
self.pushButton_11 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_11.setGeometry(QtCore.QRect(120, 120, 71, 71))
self.pushButton_11.setText("")
self.pushButton_11.setCheckable(False)
self.pushButton_11.setAutoDefault(False)
self.pushButton_11.setObjectName("pushButton_11")
self.pushButton_12 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_12.setGeometry(QtCore.QRect(200, 120, 71, 71))
self.pushButton_12.setText("")
self.pushButton_12.setCheckable(False)
self.pushButton_12.setAutoDefault(False)
self.pushButton_12.setObjectName("pushButton_12")
self.pushButton_13 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_13.setGeometry(QtCore.QRect(290, 120, 71, 71))
self.pushButton_13.setText("")
self.pushButton_13.setCheckable(False)
self.pushButton_13.setAutoDefault(False)
self.pushButton_13.setObjectName("pushButton_13")
self.pushButton_14 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_14.setGeometry(QtCore.QRect(370, 120, 71, 71))
self.pushButton_14.setText("")
self.pushButton_14.setCheckable(False)
self.pushButton_14.setAutoDefault(False)
self.pushButton_14.setObjectName("pushButton_14")
self.pushButton_15 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_15.setGeometry(QtCore.QRect(450, 120, 71, 71))
self.pushButton_15.setText("")
self.pushButton_15.setCheckable(False)
self.pushButton_15.setAutoDefault(False)
self.pushButton_15.setObjectName("pushButton_15")
self.pushButton_16 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_16.setGeometry(QtCore.QRect(540, 120, 71, 71))
self.pushButton_16.setText("")
self.pushButton_16.setCheckable(False)
self.pushButton_16.setAutoDefault(False)
self.pushButton_16.setObjectName("pushButton_16")
self.pushButton_17 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_17.setGeometry(QtCore.QRect(620, 120, 71, 71))
self.pushButton_17.setText("")
self.pushButton_17.setCheckable(False)
self.pushButton_17.setAutoDefault(False)
self.pushButton_17.setObjectName("pushButton_17")
self.pushButton_18 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_18.setGeometry(QtCore.QRect(700, 120, 71, 71))
self.pushButton_18.setText("")
self.pushButton_18.setCheckable(False)
self.pushButton_18.setAutoDefault(False)
self.pushButton_18.setObjectName("pushButton_18")
self.pushButton_19 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_19.setGeometry(QtCore.QRect(40, 200, 71, 71))
self.pushButton_19.setText("")
self.pushButton_19.setCheckable(False)
self.pushButton_19.setAutoDefault(False)
self.pushButton_19.setObjectName("pushButton_19")
self.pushButton_20 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_20.setGeometry(QtCore.QRect(120, 200, 71, 71))
self.pushButton_20.setText("")
self.pushButton_20.setCheckable(False)
self.pushButton_20.setAutoDefault(False)
self.pushButton_20.setObjectName("pushButton_20")
self.pushButton_21 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_21.setGeometry(QtCore.QRect(200, 200, 71, 71))
self.pushButton_21.setText("")
self.pushButton_21.setCheckable(False)
self.pushButton_21.setAutoDefault(False)
self.pushButton_21.setObjectName("pushButton_21")
self.pushButton_22 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_22.setGeometry(QtCore.QRect(290, 200, 71, 71))
self.pushButton_22.setText("")
self.pushButton_22.setCheckable(False)
self.pushButton_22.setAutoDefault(False)
self.pushButton_22.setObjectName("pushButton_22")
self.pushButton_23 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_23.setGeometry(QtCore.QRect(370, 200, 71, 71))
self.pushButton_23.setText("")
self.pushButton_23.setCheckable(False)
self.pushButton_23.setAutoDefault(False)
self.pushButton_23.setObjectName("pushButton_23")
self.pushButton_24 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_24.setGeometry(QtCore.QRect(450, 200, 71, 71))
self.pushButton_24.setText("")
self.pushButton_24.setCheckable(False)
self.pushButton_24.setAutoDefault(False)
self.pushButton_24.setObjectName("pushButton_24")
self.pushButton_25 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_25.setGeometry(QtCore.QRect(540, 200, 71, 71))
self.pushButton_25.setText("")
self.pushButton_25.setCheckable(False)
self.pushButton_25.setAutoDefault(False)
self.pushButton_25.setObjectName("pushButton_25")
self.pushButton_26 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_26.setGeometry(QtCore.QRect(620, 200, 71, 71))
self.pushButton_26.setText("")
self.pushButton_26.setCheckable(False)
self.pushButton_26.setAutoDefault(False)
self.pushButton_26.setObjectName("pushButton_26")
self.pushButton_27 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_27.setGeometry(QtCore.QRect(700, 200, 71, 71))
self.pushButton_27.setText("")
self.pushButton_27.setCheckable(False)
self.pushButton_27.setAutoDefault(False)
self.pushButton_27.setObjectName("pushButton_27")
self.pushButton_28 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_28.setGeometry(QtCore.QRect(40, 290, 71, 71))
self.pushButton_28.setText("")
self.pushButton_28.setCheckable(False)
self.pushButton_28.setAutoDefault(False)
self.pushButton_28.setObjectName("pushButton_28")
self.pushButton_29 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_29.setGeometry(QtCore.QRect(120, 290, 71, 71))
self.pushButton_29.setText("")
self.pushButton_29.setCheckable(False)
self.pushButton_29.setAutoDefault(False)
self.pushButton_29.setObjectName("pushButton_29")
self.pushButton_30 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_30.setGeometry(QtCore.QRect(200, 290, 71, 71))
self.pushButton_30.setText("")
self.pushButton_30.setCheckable(False)
self.pushButton_30.setAutoDefault(False)
self.pushButton_30.setObjectName("pushButton_30")
self.pushButton_31 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_31.setGeometry(QtCore.QRect(290, 290, 71, 71))
self.pushButton_31.setText("")
self.pushButton_31.setCheckable(False)
self.pushButton_31.setAutoDefault(False)
self.pushButton_31.setObjectName("pushButton_31")
self.pushButton_32 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_32.setGeometry(QtCore.QRect(370, 290, 71, 71))
self.pushButton_32.setText("")
self.pushButton_32.setCheckable(False)
self.pushButton_32.setAutoDefault(False)
self.pushButton_32.setObjectName("pushButton_32")
self.pushButton_33 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_33.setGeometry(QtCore.QRect(450, 290, 71, 71))
self.pushButton_33.setText("")
self.pushButton_33.setCheckable(False)
self.pushButton_33.setAutoDefault(False)
self.pushButton_33.setObjectName("pushButton_33")
self.pushButton_34 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_34.setGeometry(QtCore.QRect(540, 290, 71, 71))
self.pushButton_34.setText("")
self.pushButton_34.setCheckable(False)
self.pushButton_34.setAutoDefault(False)
self.pushButton_34.setObjectName("pushButton_34")
self.pushButton_35 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_35.setGeometry(QtCore.QRect(620, 290, 71, 71))
self.pushButton_35.setText("")
self.pushButton_35.setCheckable(False)
self.pushButton_35.setAutoDefault(False)
self.pushButton_35.setObjectName("pushButton_35")
self.pushButton_36 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_36.setGeometry(QtCore.QRect(700, 290, 71, 71))
self.pushButton_36.setText("")
self.pushButton_36.setCheckable(False)
self.pushButton_36.setAutoDefault(False)
self.pushButton_36.setObjectName("pushButton_36")
self.pushButton_37 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_37.setGeometry(QtCore.QRect(700, 370, 71, 71))
self.pushButton_37.setText("")
self.pushButton_37.setCheckable(False)
self.pushButton_37.setAutoDefault(False)
self.pushButton_37.setObjectName("pushButton_37")
self.pushButton_38 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_38.setGeometry(QtCore.QRect(620, 370, 71, 71))
self.pushButton_38.setText("")
self.pushButton_38.setCheckable(False)
self.pushButton_38.setAutoDefault(False)
self.pushButton_38.setObjectName("pushButton_38")
self.pushButton_39 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_39.setGeometry(QtCore.QRect(540, 370, 71, 71))
self.pushButton_39.setText("")
self.pushButton_39.setCheckable(False)
self.pushButton_39.setAutoDefault(False)
self.pushButton_39.setObjectName("pushButton_39")
self.pushButton_40 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_40.setGeometry(QtCore.QRect(450, 370, 71, 71))
self.pushButton_40.setText("")
self.pushButton_40.setCheckable(False)
self.pushButton_40.setAutoDefault(False)
self.pushButton_40.setObjectName("pushButton_40")
self.pushButton_41 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_41.setGeometry(QtCore.QRect(370, 370, 71, 71))
self.pushButton_41.setText("")
self.pushButton_41.setCheckable(False)
self.pushButton_41.setAutoDefault(False)
self.pushButton_41.setObjectName("pushButton_41")
self.pushButton_42 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_42.setGeometry(QtCore.QRect(290, 370, 71, 71))
self.pushButton_42.setText("")
self.pushButton_42.setCheckable(False)
self.pushButton_42.setAutoDefault(False)
self.pushButton_42.setObjectName("pushButton_42")
self.pushButton_43 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_43.setGeometry(QtCore.QRect(200, 370, 71, 71))
self.pushButton_43.setText("")
self.pushButton_43.setCheckable(False)
self.pushButton_43.setAutoDefault(False)
self.pushButton_43.setObjectName("pushButton_43")
self.pushButton_44 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_44.setGeometry(QtCore.QRect(120, 370, 71, 71))
self.pushButton_44.setText("")
self.pushButton_44.setCheckable(False)
self.pushButton_44.setAutoDefault(False)
self.pushButton_44.setObjectName("pushButton_44")
self.pushButton_45 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_45.setGeometry(QtCore.QRect(40, 370, 71, 71))
self.pushButton_45.setText("")
self.pushButton_45.setCheckable(False)
self.pushButton_45.setAutoDefault(False)
self.pushButton_45.setObjectName("pushButton_45")
self.pushButton_46 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_46.setGeometry(QtCore.QRect(40, 450, 71, 71))
self.pushButton_46.setText("")
self.pushButton_46.setCheckable(False)
self.pushButton_46.setAutoDefault(False)
self.pushButton_46.setObjectName("pushButton_46")
self.pushButton_47 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_47.setGeometry(QtCore.QRect(120, 450, 71, 71))
self.pushButton_47.setText("")
self.pushButton_47.setCheckable(False)
self.pushButton_47.setAutoDefault(False)
self.pushButton_47.setObjectName("pushButton_47")
self.pushButton_48 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_48.setGeometry(QtCore.QRect(200, 450, 71, 71))
self.pushButton_48.setText("")
self.pushButton_48.setCheckable(False)
self.pushButton_48.setAutoDefault(False)
self.pushButton_48.setObjectName("pushButton_48")
self.pushButton_49 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_49.setGeometry(QtCore.QRect(290, 450, 71, 71))
self.pushButton_49.setText("")
self.pushButton_49.setCheckable(False)
self.pushButton_49.setAutoDefault(False)
self.pushButton_49.setObjectName("pushButton_49")
self.pushButton_50 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_50.setGeometry(QtCore.QRect(370, 450, 71, 71))
self.pushButton_50.setText("")
self.pushButton_50.setCheckable(False)
self.pushButton_50.setAutoDefault(False)
self.pushButton_50.setObjectName("pushButton_50")
self.pushButton_51 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_51.setGeometry(QtCore.QRect(450, 450, 71, 71))
self.pushButton_51.setText("")
self.pushButton_51.setCheckable(False)
self.pushButton_51.setAutoDefault(False)
self.pushButton_51.setObjectName("pushButton_51")
self.pushButton_52 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_52.setGeometry(QtCore.QRect(540, 450, 71, 71))
self.pushButton_52.setText("")
self.pushButton_52.setCheckable(False)
self.pushButton_52.setAutoDefault(False)
self.pushButton_52.setObjectName("pushButton_52")
self.pushButton_53 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_53.setGeometry(QtCore.QRect(620, 450, 71, 71))
self.pushButton_53.setText("")
self.pushButton_53.setCheckable(False)
self.pushButton_53.setAutoDefault(False)
self.pushButton_53.setObjectName("pushButton_53")
self.pushButton_54 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_54.setGeometry(QtCore.QRect(700, 450, 71, 71))
self.pushButton_54.setText("")
self.pushButton_54.setCheckable(False)
self.pushButton_54.setAutoDefault(False)
self.pushButton_54.setObjectName("pushButton_54")
self.pushButton_55 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_55.setGeometry(QtCore.QRect(40, 540, 71, 71))
self.pushButton_55.setText("")
self.pushButton_55.setCheckable(False)
self.pushButton_55.setAutoDefault(False)
self.pushButton_55.setObjectName("pushButton_55")
self.pushButton_56 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_56.setGeometry(QtCore.QRect(120, 540, 71, 71))
self.pushButton_56.setText("")
self.pushButton_56.setCheckable(False)
self.pushButton_56.setAutoDefault(False)
self.pushButton_56.setObjectName("pushButton_56")
self.pushButton_57 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_57.setGeometry(QtCore.QRect(200, 540, 71, 71))
self.pushButton_57.setText("")
self.pushButton_57.setCheckable(False)
self.pushButton_57.setAutoDefault(False)
self.pushButton_57.setObjectName("pushButton_57")
self.pushButton_58 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_58.setGeometry(QtCore.QRect(290, 540, 71, 71))
self.pushButton_58.setText("")
self.pushButton_58.setCheckable(False)
self.pushButton_58.setAutoDefault(False)
self.pushButton_58.setObjectName("pushButton_58")
self.pushButton_59 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_59.setGeometry(QtCore.QRect(370, 540, 71, 71))
self.pushButton_59.setText("")
self.pushButton_59.setCheckable(False)
self.pushButton_59.setAutoDefault(False)
self.pushButton_59.setObjectName("pushButton_59")
self.pushButton_60 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_60.setGeometry(QtCore.QRect(450, 540, 71, 71))
self.pushButton_60.setText("")
self.pushButton_60.setCheckable(False)
self.pushButton_60.setAutoDefault(False)
self.pushButton_60.setObjectName("pushButton_60")
self.pushButton_61 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_61.setGeometry(QtCore.QRect(540, 540, 71, 71))
self.pushButton_61.setText("")
self.pushButton_61.setCheckable(False)
self.pushButton_61.setAutoDefault(False)
self.pushButton_61.setObjectName("pushButton_61")
self.pushButton_62 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_62.setGeometry(QtCore.QRect(620, 540, 71, 71))
self.pushButton_62.setText("")
self.pushButton_62.setCheckable(False)
self.pushButton_62.setAutoDefault(False)
self.pushButton_62.setObjectName("pushButton_62")
self.pushButton_63 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_63.setGeometry(QtCore.QRect(700, 540, 71, 71))
self.pushButton_63.setText("")
self.pushButton_63.setCheckable(False)
self.pushButton_63.setAutoDefault(False)
self.pushButton_63.setObjectName("pushButton_63")
self.pushButton_64 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_64.setGeometry(QtCore.QRect(40, 620, 71, 71))
self.pushButton_64.setText("")
self.pushButton_64.setCheckable(False)
self.pushButton_64.setAutoDefault(False)
self.pushButton_64.setObjectName("pushButton_64")
self.pushButton_65 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_65.setGeometry(QtCore.QRect(120, 620, 71, 71))
self.pushButton_65.setText("")
self.pushButton_65.setCheckable(False)
self.pushButton_65.setAutoDefault(False)
self.pushButton_65.setObjectName("pushButton_65")
self.pushButton_66 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_66.setGeometry(QtCore.QRect(200, 620, 71, 71))
self.pushButton_66.setText("")
self.pushButton_66.setCheckable(False)
self.pushButton_66.setAutoDefault(False)
self.pushButton_66.setObjectName("pushButton_66")
self.pushButton_67 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_67.setGeometry(QtCore.QRect(290, 620, 71, 71))
self.pushButton_67.setText("")
self.pushButton_67.setCheckable(False)
self.pushButton_67.setAutoDefault(False)
self.pushButton_67.setObjectName("pushButton_67")
self.pushButton_68 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_68.setGeometry(QtCore.QRect(370, 620, 71, 71))
self.pushButton_68.setText("")
self.pushButton_68.setCheckable(False)
self.pushButton_68.setAutoDefault(False)
self.pushButton_68.setObjectName("pushButton_68")
self.pushButton_69 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_69.setGeometry(QtCore.QRect(450, 620, 71, 71))
self.pushButton_69.setText("")
self.pushButton_69.setCheckable(False)
self.pushButton_69.setAutoDefault(False)
self.pushButton_69.setObjectName("pushButton_69")
self.pushButton_70 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_70.setGeometry(QtCore.QRect(540, 620, 71, 71))
self.pushButton_70.setText("")
self.pushButton_70.setCheckable(False)
self.pushButton_70.setAutoDefault(False)
self.pushButton_70.setObjectName("pushButton_70")
self.pushButton_71 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_71.setGeometry(QtCore.QRect(620, 620, 71, 71))
self.pushButton_71.setText("")
self.pushButton_71.setCheckable(False)
self.pushButton_71.setAutoDefault(False)
self.pushButton_71.setObjectName("pushButton_71")
self.pushButton_72 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_72.setGeometry(QtCore.QRect(700, 620, 71, 71))
self.pushButton_72.setText("")
self.pushButton_72.setCheckable(False)
self.pushButton_72.setAutoDefault(False)
self.pushButton_72.setObjectName("pushButton_72")
self.pushButton_73 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_73.setGeometry(QtCore.QRect(40, 700, 71, 71))
self.pushButton_73.setText("")
self.pushButton_73.setCheckable(False)
self.pushButton_73.setAutoDefault(False)
self.pushButton_73.setObjectName("pushButton_73")
self.pushButton_74 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_74.setGeometry(QtCore.QRect(120, 700, 71, 71))
self.pushButton_74.setText("")
self.pushButton_74.setCheckable(False)
self.pushButton_74.setAutoDefault(False)
self.pushButton_74.setObjectName("pushButton_74")
self.pushButton_75 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_75.setGeometry(QtCore.QRect(200, 700, 71, 71))
self.pushButton_75.setText("")
self.pushButton_75.setCheckable(False)
self.pushButton_75.setAutoDefault(False)
self.pushButton_75.setObjectName("pushButton_75")
self.pushButton_76 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_76.setGeometry(QtCore.QRect(290, 700, 71, 71))
self.pushButton_76.setText("")
self.pushButton_76.setCheckable(False)
self.pushButton_76.setAutoDefault(False)
self.pushButton_76.setObjectName("pushButton_76")
self.pushButton_77 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_77.setGeometry(QtCore.QRect(370, 700, 71, 71))
self.pushButton_77.setText("")
self.pushButton_77.setCheckable(False)
self.pushButton_77.setAutoDefault(False)
self.pushButton_77.setObjectName("pushButton_77")
self.pushButton_78 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_78.setGeometry(QtCore.QRect(450, 700, 71, 71))
self.pushButton_78.setText("")
self.pushButton_78.setCheckable(False)
self.pushButton_78.setAutoDefault(False)
self.pushButton_78.setObjectName("pushButton_78")
self.pushButton_79 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_79.setGeometry(QtCore.QRect(540, 700, 71, 71))
self.pushButton_79.setText("")
self.pushButton_79.setCheckable(False)
self.pushButton_79.setAutoDefault(False)
self.pushButton_79.setObjectName("pushButton_79")
self.pushButton_80 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_80.setGeometry(QtCore.QRect(620, 700, 71, 71))
self.pushButton_80.setText("")
self.pushButton_80.setCheckable(False)
self.pushButton_80.setAutoDefault(False)
self.pushButton_80.setObjectName("pushButton_80")
self.pushButton_81 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_81.setGeometry(QtCore.QRect(700, 700, 71, 71))
self.pushButton_81.setText("")
self.pushButton_81.setCheckable(False)
self.pushButton_81.setAutoDefault(False)
self.pushButton_81.setObjectName("pushButton_81")
self.num1 = QtWidgets.QPushButton(self.centralwidget)
self.num1.setGeometry(QtCore.QRect(50, 790, 61, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(24)
font.setBold(False)
font.setUnderline(False)
font.setWeight(50)
self.num1.setFont(font)
self.num1.setCheckable(False)
self.num1.setAutoDefault(False)
self.num1.setObjectName("num1")
self.num2 = QtWidgets.QPushButton(self.centralwidget)
self.num2.setGeometry(QtCore.QRect(120, 790, 61, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(24)
font.setBold(False)
font.setUnderline(False)
font.setWeight(50)
self.num2.setFont(font)
self.num2.setCheckable(False)
self.num2.setAutoDefault(False)
self.num2.setObjectName("num2")
self.num3 = QtWidgets.QPushButton(self.centralwidget)
self.num3.setGeometry(QtCore.QRect(190, 790, 61, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(24)
font.setBold(False)
font.setUnderline(False)
font.setWeight(50)
self.num3.setFont(font)
self.num3.setCheckable(False)
self.num3.setAutoDefault(False)
self.num3.setObjectName("num3")
self.num4 = QtWidgets.QPushButton(self.centralwidget)
self.num4.setGeometry(QtCore.QRect(260, 790, 61, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(24)
font.setBold(False)
font.setUnderline(False)
font.setWeight(50)
self.num4.setFont(font)
self.num4.setCheckable(False)
self.num4.setAutoDefault(False)
self.num4.setObjectName("num4")
self.num5 = QtWidgets.QPushButton(self.centralwidget)
self.num5.setGeometry(QtCore.QRect(330, 790, 61, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(24)
font.setBold(False)
font.setUnderline(False)
font.setWeight(50)
self.num5.setFont(font)
self.num5.setCheckable(False)
self.num5.setAutoDefault(False)
self.num5.setObjectName("num5")
self.num6 = QtWidgets.QPushButton(self.centralwidget)
self.num6.setGeometry(QtCore.QRect(400, 790, 61, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(24)
font.setBold(False)
font.setUnderline(False)
font.setWeight(50)
self.num6.setFont(font)
self.num6.setCheckable(False)
self.num6.setAutoDefault(False)
self.num6.setObjectName("num6")
self.num7 = QtWidgets.QPushButton(self.centralwidget)
self.num7.setGeometry(QtCore.QRect(470, 790, 61, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(24)
font.setBold(False)
font.setUnderline(False)
font.setWeight(50)
self.num7.setFont(font)
self.num7.setCheckable(False)
self.num7.setAutoDefault(False)
self.num7.setObjectName("num7")
self.num8 = QtWidgets.QPushButton(self.centralwidget)
self.num8.setGeometry(QtCore.QRect(540, 790, 61, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(24)
font.setBold(False)
font.setUnderline(False)
font.setWeight(50)
self.num8.setFont(font)
self.num8.setCheckable(False)
self.num8.setAutoDefault(False)
self.num8.setObjectName("num8")
self.num9 = QtWidgets.QPushButton(self.centralwidget)
self.num9.setGeometry(QtCore.QRect(610, 790, 61, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(24)
font.setBold(False)
font.setUnderline(False)
font.setWeight(50)
self.num9.setFont(font)
self.num9.setCheckable(False)
self.num9.setAutoDefault(False)
self.num9.setObjectName("num9")
self.numDel = QtWidgets.QPushButton(self.centralwidget)
self.numDel.setGeometry(QtCore.QRect(680, 790, 81, 61))
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(24)
font.setBold(False)
font.setUnderline(False)
font.setWeight(50)
self.numDel.setFont(font)
self.numDel.setCheckable(False)
self.numDel.setAutoDefault(False)
self.numDel.setObjectName("numDel")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1122, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.solve_button.setText(_translate("MainWindow", "Solve"))
self.all_solutions_button.setText(_translate("MainWindow", "All Solutions"))
self.edit_board_button.setText(_translate("MainWindow", "Edit Board"))
self.num1.setText(_translate("MainWindow", "1"))
self.num1.setShortcut(_translate("MainWindow", "1"))
self.num2.setText(_translate("MainWindow", "2"))
self.num2.setShortcut(_translate("MainWindow", "2"))
self.num3.setText(_translate("MainWindow", "3"))
self.num3.setShortcut(_translate("MainWindow", "3"))
self.num4.setText(_translate("MainWindow", "4"))
self.num4.setShortcut(_translate("MainWindow", "4"))
self.num5.setText(_translate("MainWindow", "5"))
self.num5.setShortcut(_translate("MainWindow", "5"))
self.num6.setText(_translate("MainWindow", "6"))
self.num6.setShortcut(_translate("MainWindow", "6"))
self.num7.setText(_translate("MainWindow", "7"))
self.num7.setShortcut(_translate("MainWindow", "7"))
self.num8.setText(_translate("MainWindow", "8"))
self.num8.setShortcut(_translate("MainWindow", "8"))
self.num9.setText(_translate("MainWindow", "9"))
self.num9.setShortcut(_translate("MainWindow", "9"))
self.numDel.setText(_translate("MainWindow", "DEL"))
self.numDel.setShortcut(_translate("MainWindow", "Backspace"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
```
#### File: joelkaret/Sudoku_Solver/sudokuSolver.py
```python
import copy
import time
class Main:
def __init__(self, board) -> None:
self.board = board
self.current = copy.deepcopy(board)
self.column = 0
self.row = 0
self.solutions = 1
self.stop = True #Set to false if you do not want it pause after 10 solutions.
self.start = time.time()
self.since_last = time.time()
self.average = []
def check(self):
normal = self.current
transposed = self.transpose_board()
three_by_three = self.three_by_three()
if self.mini_check(normal) and self.mini_check(transposed) and self.mini_check(three_by_three):
if self.check_original() and self.current[self.row][self.column] != 0:
return True
return False
def mini_check(self, array):
for i in array:
for j in range(len(i)):
for k in range(j+1,9):
if i[j] == i[k] and i[j] != 0:
return False
return True
def check_original(self):
for i in range(len(self.board)):
for j in range(len(self.board[i])):
if self.board[i][j] != self.current[i][j] and self.board[i][j] != 0:
return False
return True
def no_zeros(self):
for i in range(len(self.current)-1,-1,-1):
for j in range(len(self.current[i])-1,-1,-1):
if self.current[i][j] == 0:
return False
return True
def display_board(self, board):
counter = 0
for i in board:
if counter % 3 == 0 and counter != 0:
print('-'*21)
counter2 = -2
for j in i[:-1]:
print(j, end='|')
if counter2 % 3 == 0:
print(' |', end = '')
counter2 += 1
print(i[-1])
counter += 1
def transpose_board(self):
transposed = []
for i in range(len(self.current)):
temp_transposed_row = []
for j in range(len(self.board[i])):
temp_transposed_row.append(self.current[j][i])
transposed.append(temp_transposed_row)
return transposed
def three_by_three(self):
transposed = []
for i in range(0,9,3):
for j in range(0,9,3):
temp_transposed_box = []
for k in range(0,3):
for l in range(0,3):
temp_transposed_box.append(self.current[k+i][l+j])
transposed.append(temp_transposed_box)
return transposed
def solve(self):
if self.check():
if self.no_zeros():
self.complete()
else:
self.next_cage()
elif self.board[self.row][self.column] != 0:
self.next_cage()
else:
self.next_number()
def next_cage(self):
if self.column == 8:
self.column = 0
if self.row == 8:
self.previous_cage()
else:
self.row += 1
else:
self.column += 1
def next_number(self):
if self.current[self.row][self.column] == 9:
self.previous_number()
else:
self.current[self.row][self.column] += 1
def previous_cage(self):
if self.column == 0:
self.column = 8
if self.row == 0:
self.failed()
else:
self.row -= 1
else:
self.column -= 1
if self.board[self.row][self.column] != 0:
self.previous_cage()
def previous_number(self):
self.current[self.row][self.column] = 0
self.previous_cage()
self.next_number()
def complete(self):
if self.solutions == 1:
print('\nOriginal board: \n')
self.display_board(self.board)
print('\n','-'*50, sep = '')
print('\nCompleted Solution 1')
print(f'Found In: {time.time() - self.start} seconds\n')
self.average.append(time.time()-self.start)
self.since_last = time.time()
self.display_board(self.current)
self.solutions += 1
self.next_number()
elif self.solutions < 11:
print('\n','-'*50, sep = '')
print(f'\nCompleted Solution {self.solutions}')
print(f'Found In: {time.time() - self.start} seconds')
print(f'Since last solution: {time.time() - self.since_last} seconds')
self.average.append(time.time()-self.since_last)
print(f'Average time per solution = {sum(self.average)/len(self.average)} seconds\n')
self.since_last = time.time()
self.display_board(self.current)
self.solutions += 1
self.next_number()
elif self.stop:
print('\n','-'*50, sep = '')
print("\nThere are more than 10 solutions.")
a = 'ok'
start = time.time()
while a != 'y' and a != 'n':
a = input("Would you like to stop outputing?(Y/N):\n").lower()
if a == 'y':
exit()
else:
end = time.time()
user_wasted = end - start
print(f'User Paused for {user_wasted} seconds')
self.start += user_wasted
self.since_last += user_wasted
self.stop = False
else:
print('\n','-'*50, sep = '')
print(f"\nCompleted Solution {self.solutions}")
print(f'Found In: {time.time() - self.start} seconds')
print(f'Since last solution: {time.time() - self.since_last} seconds')
self.average.append(time.time()-self.since_last)
print(f'Average time per solution = {sum(self.average)/len(self.average)} seconds\n')
self.since_last = time.time()
self.display_board(self.current)
self.solutions += 1
self.next_number()
def failed(self):
if self.solutions == 1:
print("Original board: ")
self.display_board(self.board)
print("There are no solutions to this sudoku.\n")
exit()
print("No more solutions.")
print(f'Total Time Elapsed: {time.time() - self.start} seconds')
print(f'Since last solution: {time.time() - self.since_last} seconds\n')
exit()
def run(self):
while True:
self.solve()
# def testing1234(self):
# print(self.board[0][0]) #prints 0
# self.current[0][0] = 5
# print(self.board[0][0]) #prints 5
### 1-9, completed
# board = [ [1,2,3,4,5,6,7,8,9],
# [4,5,6,7,8,9,1,2,3],
# [7,8,9,1,2,3,4,5,6],
# [2,3,4,5,6,7,8,9,1],
# [5,6,7,8,9,1,2,3,4],
# [8,9,1,2,3,4,5,6,7],
# [3,4,5,6,7,8,9,1,2],
# [6,7,8,9,1,2,3,4,5],
# [9,1,2,3,4,5,6,7,8]]
### Empty
board = [ [0,0,0,0,0,0,0,0,0] ,
[0,0,0,0,0,0,0,0,0] ,
[0,0,0,0,0,0,0,0,0] ,
[0,0,0,0,0,0,0,0,0] ,
[0,0,0,0,0,0,0,0,0] ,
[0,0,0,0,0,0,0,0,0] ,
[0,0,0,0,0,0,0,0,0] ,
[0,0,0,0,0,0,0,0,0] ,
[0,0,0,0,0,0,0,0,0] ]
###Random The Times
# board = [ [0,0,0,0,0,0,0,0,1] ,
# [0,0,6,5,0,2,8,0,0] ,
# [0,1,0,8,4,0,7,0,0] ,
# [0,3,1,0,0,0,5,8,0] ,
# [0,0,7,0,0,0,0,0,2] ,
# [0,5,0,0,0,0,4,9,0] ,
# [0,2,3,1,0,5,0,0,0] ,
# [0,0,0,7,0,9,0,0,0] ,
# [1,0,0,0,6,0,0,0,3] ]
### Extra Hard 1 Solution
# board = [ [0,2,0,0,0,0,0,0,0] ,
# [0,0,0,6,0,0,0,0,3] ,
# [0,7,4,0,8,0,0,0,0] ,
# [0,0,0,0,0,3,0,0,2] ,
# [0,8,0,0,4,0,0,9,0] ,
# [6,0,0,5,0,0,0,0,0] ,
# [0,0,0,0,9,0,7,8,0] ,
# [5,0,0,0,0,1,0,0,0] ,
# [0,0,0,0,0,0,0,4,0] ]
### 17 Clues
# board = [ [0,0,0,0,5,0,3,0,6] ,
# [1,0,0,6,0,0,0,0,0] ,
# [0,0,0,0,0,0,7,0,0] ,
# [2,0,0,0,0,0,5,4,0] ,
# [0,0,0,0,0,3,0,0,0] ,
# [0,0,0,0,0,6,0,0,0] ,
# [0,0,0,2,4,0,0,1,0] ,
# [0,3,0,0,0,0,0,8,0] ,
# [0,0,7,0,0,0,0,0,0] ]
ok = Main(board)
#print(ok.check())
# ok.testing1234()
ok.run()
# #ok.three_by_three()
# #ok.transpose_board()
# #ok.display_board(board)
``` |
{
"source": "JoelKatz/NuDB",
"score": 3
} |
#### File: NuDB/bench/plot_bench.py
```python
import argparse
import itertools
import collections
import pandas as pd
import matplotlib.pyplot as plt
import re
from bokeh.layouts import gridplot
from bokeh.palettes import Spectral11
from bokeh.plotting import figure, show, output_file
# Given a file at the start of a test result (on the header line)
# Return a data frame for the test result and leave the file one past
# the blank line at the end of the result
def to_data_frame(header, it):
column_labels = re.split(' +', header.strip())
columns = [[] for i in range(len(column_labels))]
for l in it:
l = l.strip()
if not l:
break
fields = l.split()
if len(fields) != len(columns):
raise Exception('Bad file format, line: {}'.format(l))
for c, f in zip(columns, fields):
c.append(float(f))
d = {k: v for k, v in zip(column_labels, columns)}
return pd.DataFrame(d, columns=column_labels[1:], index=columns[0])
def to_data_frames(f):
trial = ''
result = {}
for l in f:
if l and l[0] == '#': continue
if l and l[0] == ' ' and l.strip():
if trial:
# Remove anything in parens
trial = re.sub('\([^\)]*\)', '', trial)
result[trial] = to_data_frame(l, f)
trial = ''
continue
if trial: trial += ' ' # Handle multi-line labels
trial += l.strip()
return result
def bokeh_plot(title, df):
numlines = len(df.columns)
palette = Spectral11[0:numlines]
p = figure(
width=500,
height=400,
title=title,
x_axis_label='DB Items',
y_axis_label='Ops/Sec.')
for col_idx in range(numlines):
p.line(
x=df.index.values,
y=df.iloc[:, col_idx],
legend=df.columns[col_idx],
line_color=palette[col_idx],
line_width=5)
return p
def run_main(result_filename, plot_output):
with open(result_filename) as f:
dfd = to_data_frames(f)
plots = []
for k, v in dfd.items():
plots.append(bokeh_plot(k, v))
output_file(plot_output, title="NuDB Benchmark")
show(gridplot(*plots, ncols=2, plot_width=500, plot_height=400))
return dfd # for testing
def parse_args():
parser = argparse.ArgumentParser(
description=('Plot the benchmark results'))
parser.add_argument(
'--input',
'-i',
help=('input'), )
parser.add_argument(
'--output',
'-o',
help=('output'), )
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
result_filename = args.input
plot_output = args.output
if not result_filename:
print('No result file specified. Exiting')
elif not plot_output:
print('No output file specified. Exiting')
else:
run_main(result_filename, plot_output)
``` |
{
"source": "joelkay/youtube-archiver",
"score": 3
} |
#### File: src/youtube_archiver/cli.py
```python
import argparse
import logging
import pathlib
from sys import stderr
from .downloader import download
from .server import server
def server_cli() -> int:
"""
CLI entrypoint to start the API server.
:return: 0 on success
"""
parser = argparse.ArgumentParser(description="Backend API server for YouTube Archive")
parser.add_argument("--port", default=8081, help="TCP port to bind to")
parser.add_argument("--download-dir", required=True, type=pathlib.Path, help="Path to the download directory")
parser.add_argument(
"--downloads-prefix", default="/downloads", help="Path/string to prepend to generated download links"
)
parser.add_argument("--ffmpeg-dir", type=pathlib.Path, help="Directory containing FFMPEG")
parser.add_argument(
"--logging", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], default="INFO", help="Logging level"
)
args = parser.parse_args()
log_level = getattr(logging, args.logging)
logging.basicConfig(level=log_level, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ytdl_logger = logging.getLogger("ytdl")
# Things that youtube-dl considers warnings can be squelched
ytdl_logger.setLevel(level=max(logging.ERROR, log_level))
server(args.download_dir, args.downloads_prefix, args.port, args.ffmpeg_dir)
return 0
def download_cli() -> int:
"""
Quasi-debugging CLI entrypoint that uses youtube-dl to download a video/audio clip.
:return: 0 on success
"""
parser = argparse.ArgumentParser(description="Backend API server for YouTube Archive")
parser.add_argument("url", help="URL to process")
parser.add_argument(
"-o",
"--output-dir",
dest="output_dir",
default=pathlib.Path.cwd(),
type=pathlib.Path,
help="Directory to save the resulting files",
)
parser.add_argument("--named-subdir", action="store_true", help="Create a subdirectory based off the URL's title")
parser.add_argument("--skip-video", action="store_true", help="Do not save video files")
parser.add_argument("--extract-audio", action="store_true", help="Save audio as a MP3")
parser.add_argument("--audio-vbr", default=5, type=int, help="MP3 VBR quality")
parser.add_argument("--ffmpeg-dir", type=pathlib.Path, help="Directory containing FFMPEG")
args = parser.parse_args()
if args.skip_video and not args.extract_audio:
print("You must extract at least video or audio", file=stderr)
return 1
download_results = download(
args.output_dir,
args.named_subdir,
args.url,
not args.skip_video,
args.extract_audio,
args.audio_vbr,
ffmpeg_dir=args.ffmpeg_dir,
)
print(f'Successfully processed "{download_results.pretty_name}"')
print(f"\t Info File: {download_results.info_file}")
if not args.skip_video:
print(f"\tVideo File: {download_results.video_file}")
if args.extract_audio:
print(f"\tAudio File: {download_results.audio_file}")
return 0
``` |
{
"source": "joel-klemens/FitPoints",
"score": 3
} |
#### File: FitPoints/model/Account.py
```python
from model import Activities
from model.Activities import Activity
from model.Activities import Run
from model.Activities import Gym
import json
from model.Database import Database
#*******************************************************************************************************
#class for account information
class Account_info():
#total number of users
user_count = 0
def __init__(self, user_name, password, name_first, name_last, age, weight, height, num_activity):
self.user_name = user_name
self.password = password
self.name_first = name_first
self.name_last = name_last
self.age = age
self.height = height
self.weight = weight
self.num_activity = num_activity
self.activity_list = []
#add one to this counter everytime we add a new account
Account_info.user_count += 1
#***********************************************************************
#Functions for account information
def get_point_total(self):
total_points = 0
for x in range(len(self.activity_list)):
total_points += int(self.activity_list[x].points)
return total_points
def get_time_total(self):
total_time = 0
for x in range(len(self.activity_list)):
total_time += int(self.activity_list[x].duration)
return total_time
def get_average_points(self):
total_points = 0
for x in range(len(self.activity_list)):
total_points += int(self.activity_list[x].points)
if (len(self.activity_list) > 0):
return int(total_points/len(self.activity_list))
return 0
#***********************************************************************
# accessors
@property
def user_name(self):
return self.__user_name
@property
def password(self):
return self.__password
@property
def name_first(self):
return self.__name_first
@property
def name_last(self):
return self.__name_last
@property
def age(self):
return self.__age
@property
def height(self):
return self.__height
@property
def weight(self):
return self.__weight
@property
def num_activity(self):
return self.__num_activity
#***********************************************************************
#mutators
@user_name.setter
def user_name(self, new_user_name):
self.__user_name = new_user_name
@password.setter
def password(self, new_password):
self.__password = <PASSWORD>
@name_first.setter
def name_first(self, new_name_first):
self.__name_first = new_name_first
@name_last.setter
def name_last(self, new_name_last):
self.__name_last = new_name_last
@age.setter
def age(self,new_age ):
self.__age = new_age
@height.setter
def height(self, new_height):
self.__height = new_height
@weight.setter
def weight(self, new_weight):
self.__weight = new_weight
@num_activity.setter
def num_activity(self, new_num_activity):
self.__num_activity = new_num_activity
#*******************************************************************************************************
#list functions
#initializing the list of accounts
def init_account_list(account_list,database_obj):
#access the information on the stored user list from the database
Database.init_account_list_db(database_obj, account_list)
#*******************************************************
def update_account_list(account_list, database_obj):
#write the information in the account_list in the proper format to the main db_file
#making an array to hold the information about the person
Database.update_db(database_obj, account_list)
#*******************************************************
def check_user_pass(account_list, user_name, password):
#check if the user name and password are correct
#adding a static login to bypass no longer
account_list.append(Account_info("J01", "1234", "Joel", "Klemens", "22", "70", "170", 3))
for x in range(len(account_list)):
if(user_name == account_list[x].user_name):
if(password == account_list[x].password):
return True
return False
#*******************************************************
def search_user_name(account_list, user_name):
#search through the account list to make sure no two user names are the same
for x in range(len(account_list)):
if(user_name == account_list[x].user_name):
return True
return False
#*******************************************************
def get_account_info(account_list, user_name):
for x in range(len(account_list)):
if(user_name == account_list[x].user_name):
return account_list[x]
#*******************************************************
def open_user_logs(account_list, user_name):
#get the account info object
user = get_account_info(account_list, user_name)
if(int(user.num_activity) >= 0):
#getting the account information from the database file
with open("user_logs/"+user_name+".txt") as json_file:
data = json.load(json_file)
for p in data['user_data']:
temp_type = p['activity_type']
temp_HR = p['HR']
temp_points = p['points']
temp_duration = p['duration']
temp_date = p['date']
temp_notes = p['notes']
if(temp_type == "run"):
temp_distance = p['distance']
if(temp_type == "gym"):
temp_location = p['location']
#add these to the activity list
if(temp_type == "run"):
user.activity_list.append(Run(temp_type, temp_HR, temp_points, temp_duration, temp_date, temp_notes, temp_distance))
elif(temp_type == "gym"):
user.activity_list.append(Gym(temp_type, temp_HR, temp_points, temp_duration, temp_date, temp_notes, temp_location))
else:
user.activity_list.append(Activity(temp_type, temp_HR, temp_points, temp_duration, temp_date, temp_notes))
#close the file
json_file.close()
#*******************************************************
def update_user_logs(account_list, user_name):
#hold the user data
user_data = {}
user_data['user_data'] = []
#get the account info object
user = get_account_info(account_list, user_name)
for x in range(len(user.activity_list)):
if(user.activity_list[x].activity_type == "run"):
user_data['user_data'].append({
'activity_type':user.activity_list[x].activity_type,
'HR':user.activity_list[x].HR,
'points':str(user.activity_list[x].points),
'duration':user.activity_list[x].duration,
'date':user.activity_list[x].date,
'notes':user.activity_list[x].notes,
'distance':user.activity_list[x].distance
})
elif(user.activity_list[x].activity_type == "gym"):
user_data['user_data'].append({
'activity_type':user.activity_list[x].activity_type,
'HR':user.activity_list[x].HR,
'points':str(user.activity_list[x].points),
'duration':user.activity_list[x].duration,
'date':user.activity_list[x].date,
'notes':user.activity_list[x].notes,
'location':user.activity_list[x].location
})
else:
user_data['user_data'].append({
'activity_type':user.activity_list[x].activity_type,
'HR':user.activity_list[x].HR,
'points':str(user.activity_list[x].points),
'duration':user.activity_list[x].duration,
'date':user.activity_list[x].date,
'notes':user.activity_list[x].notes
})
with open('user_logs/'+user_name+'.txt', 'w+') as outfile:
json.dump(user_data, outfile)
#close the file
outfile.close()
print("json data: ")
print(json.dumps(user_data))
```
#### File: FitPoints/model/Activities.py
```python
class Activity():
def __init__(self, activity_type, HR, points, duration, date, notes):
self.activity_type = activity_type
self.HR = HR
self.points = points
self.duration = duration
self.date = date
self.notes = notes
#***********************************************************************
# accessors
@property
def activity_type(self):
return self.__activity_type
@property
def HR(self):
return self.__HR
@property
def points(self):
return self.__points
@property
def duation(self):
return self.__duration
@property
def date(self):
return self.__date
@property
def notes(self):
return self.__notes
#Mutators
@activity_type.setter
def activity_type(self, new_activity_type):
self.__activity_type = new_activity_type
@HR.setter
def HR(self, new_HR):
self.__HR = new_HR
@points.setter
def points(self, new_points):
self.__points = new_points
@duation.setter
def duation(self, new_duation):
self.__duation = new_duation
@date.setter
def date(self, new_date):
self.__date = new_date
@notes.setter
def notes(self, new_notes):
self.__notes = new_notes
#extens activity
class Run(Activity):
def __init__(self, activity_type, HR, points, duration, date, notes, distance):
super().__init__(activity_type, HR, points, duration, date, notes)
self.distance = distance
@property
def distance(self):
return self.__distance
@distance.setter
def distance(self, new_distance):
self.__distance = new_distance
#extends activity
class Gym(Activity):
def __init__(self, activity_type, HR, points, duration, date, notes, location):
super().__init__(activity_type, HR, points, duration, date, notes)
self.location = location
@property
def location(self):
return self.__location
@location.setter
def location(self, new_location):
self.__location = new_location
``` |
{
"source": "JoelKronander/TensorFlask",
"score": 4
} |
#### File: TensorFlask/model/cnn_model.py
```python
import tensorflow as tf
import tensorlayer as tl
def cnn_model_graph(input_node):
"""Defines a CNN model for classifying MNIST digits
Arguments:
input_node : Tensorflow placeholder with shape [batch_size, 28, 28 ,1]
Returns:
TensorLayer layer representing the tf graph
"""
network = tl.layers.InputLayer(input_node, name='input_layer')
network = tl.layers.Conv2dLayer(network,
act = tf.nn.relu,
shape = [5, 5, 1, 32], # 32 features for each 5x5 patch
strides=[1, 1, 1, 1],
padding='SAME',
name ='cnn_layer1') # output: (?, 28, 28, 32)
network = tl.layers.PoolLayer(network,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
pool = tf.nn.max_pool,
name ='pool_layer1',) # output: (?, 14, 14, 32)
network = tl.layers.Conv2dLayer(network,
act = tf.nn.relu,
shape = [5, 5, 32, 64], # 64 features for each 5x5 patch
strides=[1, 1, 1, 1],
padding='SAME',
name ='cnn_layer2') # output: (?, 14, 14, 64)
network = tl.layers.PoolLayer(network,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
pool = tf.nn.max_pool,
name ='pool_layer2',) # output: (?, 7, 7, 64)
network = tl.layers.FlattenLayer(network, name='flatten_layer') # output: (?, 3136)
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop1') # output: (?, 3136)
network = tl.layers.DenseLayer(network, n_units=256,
act = tf.nn.relu, name='relu1') # output: (?, 256)
network = tl.layers.DropoutLayer(network, keep=0.5, name='drop2') # output: (?, 256)
network = tl.layers.DenseLayer(network, n_units=10,
act = tf.identity,
name='output_layer') # output: (?, 10)
return network
``` |
{
"source": "joelkyu/SantaComingToTown",
"score": 3
} |
#### File: SantaComingToTown/app/app.py
```python
import os
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "auth.json"
from flask import Flask, render_template, request, url_for, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from config import app
from models import db, Person, Characteristic
@app.route('/')
def index():
db.create_all()
return jsonify([p.deserialize() for p in Person.query.all()]) # Initiate all objects
@app.route('/add', methods=['POST'])
def add_person():
db.create_all()
lp = float(request.headers['Lower-Price'])
hp = float(request.headers['Upper-Price'])
if hp < lp:
lp, hp = hp, lp # switch variables if user inputs wrong price bracket.
p = Person(twitter=request.headers['Twitter-Handle'], lower_price=lp, upper_price=hp)
db.session.add(p)
db.session.commit()
return jsonify([p.deserialize() for p in Person.query.all()])
@app.route('/person/<int:person_id>', methods=['GET'])
def get_person(person_id):
return jsonify(Person.query.get(person_id).deserialize())
@app.route('/compare/<int:person_id>', methods=['GET'])
def get_comparison(person_id):
description = request.headers['name']
price = request.headers['price']
compatible = {}
for person in Person.query.all():
person.load_user_sentiment()
c = person.compatibility(description, price)
if c > 0:
compatible[person.twitter] = c
return jsonify(compatible)
@app.route('/delete/<int:person_id>', methods=['POST'])
def delete_target(person_id):
db.session.delete(Person.query.get(person_id))
db.session.commit()
return jsonify([p.deserialize() for p in Person.query.all()])
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8080)
``` |
{
"source": "joellabes/dbt",
"score": 2
} |
#### File: dbt/contracts/relation.py
```python
from collections.abc import Mapping
from dataclasses import dataclass, fields
from typing import (
Optional, TypeVar, Generic, Dict,
)
from typing_extensions import Protocol
from hologram import JsonSchemaMixin
from hologram.helpers import StrEnum
from dbt import deprecations
from dbt.contracts.util import Replaceable
from dbt.exceptions import CompilationException
from dbt.utils import deep_merge
class RelationType(StrEnum):
Table = 'table'
View = 'view'
CTE = 'cte'
MaterializedView = 'materializedview'
External = 'external'
class ComponentName(StrEnum):
Database = 'database'
Schema = 'schema'
Identifier = 'identifier'
class HasQuoting(Protocol):
quoting: Dict[str, bool]
class FakeAPIObject(JsonSchemaMixin, Replaceable, Mapping):
# override the mapping truthiness, len is always >1
def __bool__(self):
return True
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key) from None
def __iter__(self):
deprecations.warn('not-a-dictionary', obj=self)
for _, name in self._get_fields():
yield name
def __len__(self):
deprecations.warn('not-a-dictionary', obj=self)
return len(fields(self.__class__))
def incorporate(self, **kwargs):
value = self.to_dict()
value = deep_merge(value, kwargs)
return self.from_dict(value)
T = TypeVar('T')
@dataclass
class _ComponentObject(FakeAPIObject, Generic[T]):
database: T
schema: T
identifier: T
def get_part(self, key: ComponentName) -> T:
if key == ComponentName.Database:
return self.database
elif key == ComponentName.Schema:
return self.schema
elif key == ComponentName.Identifier:
return self.identifier
else:
raise ValueError(
'Got a key of {}, expected one of {}'
.format(key, list(ComponentName))
)
def replace_dict(self, dct: Dict[ComponentName, T]):
kwargs: Dict[str, T] = {}
for k, v in dct.items():
kwargs[str(k)] = v
return self.replace(**kwargs)
@dataclass
class Policy(_ComponentObject[bool]):
database: bool = True
schema: bool = True
identifier: bool = True
@dataclass
class Path(_ComponentObject[Optional[str]]):
database: Optional[str]
schema: Optional[str]
identifier: Optional[str]
def __post_init__(self):
# handle pesky jinja2.Undefined sneaking in here and messing up rende
if not isinstance(self.database, (type(None), str)):
raise CompilationException(
'Got an invalid path database: {}'.format(self.database)
)
if not isinstance(self.schema, (type(None), str)):
raise CompilationException(
'Got an invalid path schema: {}'.format(self.schema)
)
if not isinstance(self.identifier, (type(None), str)):
raise CompilationException(
'Got an invalid path identifier: {}'.format(self.identifier)
)
def get_lowered_part(self, key: ComponentName) -> Optional[str]:
part = self.get_part(key)
if part is not None:
part = part.lower()
return part
```
#### File: dbt/contracts/util.py
```python
import dataclasses
import os
from datetime import datetime
from typing import (
List, Tuple, ClassVar, Type, TypeVar, Dict, Any, Optional
)
from dbt.clients.system import write_json, read_json
from dbt.exceptions import (
IncompatibleSchemaException,
InternalException,
RuntimeException,
)
from dbt.version import __version__
from dbt.tracking import get_invocation_id
from hologram import JsonSchemaMixin
MacroKey = Tuple[str, str]
SourceKey = Tuple[str, str]
def list_str() -> List[str]:
"""Mypy gets upset about stuff like:
from dataclasses import dataclass, field
from typing import Optional, List
@dataclass
class Foo:
x: Optional[List[str]] = field(default_factory=list)
Because `list` could be any kind of list, I guess
"""
return []
class Replaceable:
def replace(self, **kwargs):
return dataclasses.replace(self, **kwargs)
class Mergeable(Replaceable):
def merged(self, *args):
"""Perform a shallow merge, where the last non-None write wins. This is
intended to merge dataclasses that are a collection of optional values.
"""
replacements = {}
cls = type(self)
for arg in args:
for field in dataclasses.fields(cls):
value = getattr(arg, field.name)
if value is not None:
replacements[field.name] = value
return self.replace(**replacements)
class Writable:
def write(self, path: str, omit_none: bool = False):
write_json(path, self.to_dict(omit_none=omit_none)) # type: ignore
class AdditionalPropertiesMixin:
"""Make this class an extensible property.
The underlying class definition must include a type definition for a field
named '_extra' that is of type `Dict[str, Any]`.
"""
ADDITIONAL_PROPERTIES = True
@classmethod
def from_dict(cls, data, validate=True):
self = super().from_dict(data=data, validate=validate)
keys = self.to_dict(validate=False, omit_none=False)
for key, value in data.items():
if key not in keys:
self.extra[key] = value
return self
def to_dict(self, omit_none=True, validate=False):
data = super().to_dict(omit_none=omit_none, validate=validate)
data.update(self.extra)
return data
def replace(self, **kwargs):
dct = self.to_dict(omit_none=False, validate=False)
dct.update(kwargs)
return self.from_dict(dct)
@property
def extra(self):
return self._extra
class Readable:
@classmethod
def read(cls, path: str):
try:
data = read_json(path)
except (EnvironmentError, ValueError) as exc:
raise RuntimeException(
f'Could not read {cls.__name__} at "{path}" as JSON: {exc}'
) from exc
return cls.from_dict(data) # type: ignore
BASE_SCHEMAS_URL = 'https://schemas.getdbt.com/dbt/{name}/v{version}.json'
@dataclasses.dataclass
class SchemaVersion:
name: str
version: int
def __str__(self) -> str:
return BASE_SCHEMAS_URL.format(
name=self.name,
version=self.version,
)
SCHEMA_VERSION_KEY = 'dbt_schema_version'
METADATA_ENV_PREFIX = 'DBT_ENV_CUSTOM_ENV_'
def get_metadata_env() -> Dict[str, str]:
return {
k[len(METADATA_ENV_PREFIX):]: v for k, v in os.environ.items()
if k.startswith(METADATA_ENV_PREFIX)
}
@dataclasses.dataclass
class BaseArtifactMetadata(JsonSchemaMixin):
dbt_schema_version: str
dbt_version: str = __version__
generated_at: datetime = dataclasses.field(
default_factory=datetime.utcnow
)
invocation_id: Optional[str] = dataclasses.field(
default_factory=get_invocation_id
)
env: Dict[str, str] = dataclasses.field(default_factory=get_metadata_env)
def schema_version(name: str, version: int):
def inner(cls: Type[VersionedSchema]):
cls.dbt_schema_version = SchemaVersion(
name=name,
version=version,
)
return cls
return inner
@dataclasses.dataclass
class VersionedSchema(JsonSchemaMixin):
dbt_schema_version: ClassVar[SchemaVersion]
@classmethod
def json_schema(cls, embeddable: bool = False) -> Dict[str, Any]:
result = super().json_schema(embeddable=embeddable)
if not embeddable:
result['$id'] = str(cls.dbt_schema_version)
return result
T = TypeVar('T', bound='ArtifactMixin')
# metadata should really be a Generic[T_M] where T_M is a TypeVar bound to
# BaseArtifactMetadata. Unfortunately this isn't possible due to a mypy issue:
# https://github.com/python/mypy/issues/7520
@dataclasses.dataclass(init=False)
class ArtifactMixin(VersionedSchema, Writable, Readable):
metadata: BaseArtifactMetadata
@classmethod
def from_dict(
cls: Type[T], data: Dict[str, Any], validate: bool = True
) -> T:
if cls.dbt_schema_version is None:
raise InternalException(
'Cannot call from_dict with no schema version!'
)
if validate:
expected = str(cls.dbt_schema_version)
found = data.get('metadata', {}).get(SCHEMA_VERSION_KEY)
if found != expected:
raise IncompatibleSchemaException(expected, found)
return super().from_dict(data=data, validate=validate)
```
#### File: dbt/parser/docs.py
```python
from typing import Iterable
import re
from dbt.clients.jinja import get_rendered
from dbt.contracts.graph.parsed import ParsedDocumentation
from dbt.node_types import NodeType
from dbt.parser.base import Parser
from dbt.parser.search import (
BlockContents, FileBlock, FilesystemSearcher, BlockSearcher
)
SHOULD_PARSE_RE = re.compile(r'{[{%]')
class DocumentationParser(Parser[ParsedDocumentation]):
def get_paths(self):
return FilesystemSearcher(
project=self.project,
relative_dirs=self.project.docs_paths,
extension='.md',
)
@property
def resource_type(self) -> NodeType:
return NodeType.Documentation
@classmethod
def get_compiled_path(cls, block: FileBlock):
return block.path.relative_path
def generate_unique_id(self, resource_name: str) -> str:
# because docs are in their own graph namespace, node type doesn't
# need to be part of the unique ID.
return '{}.{}'.format(self.project.project_name, resource_name)
def parse_block(
self, block: BlockContents
) -> Iterable[ParsedDocumentation]:
unique_id = self.generate_unique_id(block.name)
contents = get_rendered(block.contents, {}).strip()
doc = ParsedDocumentation(
root_path=self.project.project_root,
path=block.file.path.relative_path,
original_file_path=block.path.original_file_path,
package_name=self.project.project_name,
unique_id=unique_id,
name=block.name,
block_contents=contents,
)
return [doc]
def parse_file(self, file_block: FileBlock):
searcher: Iterable[BlockContents] = BlockSearcher(
source=[file_block],
allowed_blocks={'docs'},
source_tag_factory=BlockContents,
)
for block in searcher:
for parsed in self.parse_block(block):
self.results.add_doc(file_block.file, parsed)
# mark the file as seen, even if there are no macros in it
self.results.get_file(file_block.file)
```
#### File: core/dbt/ui.py
```python
import dbt.flags as flags
import textwrap
from typing import Dict
import colorama
COLORS: Dict[str, str] = {
'red': colorama.Fore.RED,
'green': colorama.Fore.GREEN,
'yellow': colorama.Fore.YELLOW,
'reset_all': colorama.Style.RESET_ALL
}
COLOR_FG_RED = COLORS['red']
COLOR_FG_GREEN = COLORS['green']
COLOR_FG_YELLOW = COLORS['yellow']
COLOR_RESET_ALL = COLORS['reset_all']
PRINTER_WIDTH = 80
def use_colors(use_colors_val=True):
flags.USE_COLORS = use_colors_val
def printer_width(printer_width):
global PRINTER_WIDTH
PRINTER_WIDTH = printer_width
def color(text: str, color_code: str):
if flags.USE_COLORS:
return "{}{}{}".format(color_code, text, COLOR_RESET_ALL)
else:
return text
def green(text: str):
return color(text, COLOR_FG_GREEN)
def yellow(text: str):
return color(text, COLOR_FG_YELLOW)
def red(text: str):
return color(text, COLOR_FG_RED)
def line_wrap_message(
msg: str, subtract: int = 0, dedent: bool = True, prefix: str = ''
) -> str:
'''
Line wrap the given message to PRINTER_WIDTH - {subtract}. Convert double
newlines to newlines and avoid calling textwrap.fill() on them (like
markdown)
'''
width = PRINTER_WIDTH - subtract
if dedent:
msg = textwrap.dedent(msg)
if prefix:
msg = f'{prefix}{msg}'
# If the input had an explicit double newline, we want to preserve that
# (we'll turn it into a single line soon). Support windows, too.
splitter = '\r\n\r\n' if '\r\n\r\n' in msg else '\n\n'
chunks = msg.split(splitter)
return '\n'.join(textwrap.fill(chunk, width=width) for chunk in chunks)
def warning_tag(msg: str) -> str:
return f'[{yellow("WARNING")}]: {msg}'
```
#### File: integration/004_simple_snapshot_test/test_snapshot_check_cols.py
```python
from test.integration.base import DBTIntegrationTest, use_profile
import dbt.exceptions
class TestSimpleSnapshotFiles(DBTIntegrationTest):
NUM_SNAPSHOT_MODELS = 1
@property
def schema(self):
return "simple_snapshot_004"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
'config-version': 2,
"snapshot-paths": ['check-snapshots'],
"test-paths": ['check-snapshots-expected'],
"source-paths": [],
}
def test_snapshot_check_cols_cycle(self):
results = self.run_dbt(["snapshot", '--vars', 'version: 1'])
self.assertEqual(len(results), 1)
results = self.run_dbt(["snapshot", '--vars', 'version: 2'])
self.assertEqual(len(results), 1)
results = self.run_dbt(["snapshot", '--vars', 'version: 3'])
self.assertEqual(len(results), 1)
def assert_expected(self):
self.run_dbt(['test', '--data', '--vars', 'version: 3'])
@use_profile('snowflake')
def test__snowflake__simple_snapshot(self):
self.test_snapshot_check_cols_cycle()
self.assert_expected()
@use_profile('postgres')
def test__postgres__simple_snapshot(self):
self.test_snapshot_check_cols_cycle()
self.assert_expected()
@use_profile('bigquery')
def test__bigquery__simple_snapshot(self):
self.test_snapshot_check_cols_cycle()
self.assert_expected()
@use_profile('redshift')
def test__redshift__simple_snapshot(self):
self.test_snapshot_check_cols_cycle()
self.assert_expected()
```
#### File: integration/006_simple_dependency_test/test_simple_dependency_with_configs.py
```python
from test.integration.base import DBTIntegrationTest, use_profile
class BaseTestSimpleDependencyWithConfigs(DBTIntegrationTest):
def setUp(self):
DBTIntegrationTest.setUp(self)
self.run_sql_file("seed.sql")
@property
def schema(self):
return "simple_dependency_006"
@property
def models(self):
return "models"
class TestSimpleDependencyWithConfigs(BaseTestSimpleDependencyWithConfigs):
@property
def packages_config(self):
return {
"packages": [
{
'git': 'https://github.com/fishtown-analytics/dbt-integration-project',
'revision': 'with-configs-0.17.0',
},
]
}
@property
def project_config(self):
return {
'config-version': 2,
'vars': {
'dbt_integration_project': {
'bool_config': True
},
},
}
@use_profile('postgres')
def test_postgres_simple_dependency(self):
self.run_dbt(["deps"])
results = self.run_dbt(["run"])
self.assertEqual(len(results), 5)
self.assertTablesEqual('seed_config_expected_1', "config")
self.assertTablesEqual("seed", "table_model")
self.assertTablesEqual("seed", "view_model")
self.assertTablesEqual("seed", "incremental")
class TestSimpleDependencyWithOverriddenConfigs(BaseTestSimpleDependencyWithConfigs):
@property
def packages_config(self):
return {
"packages": [
{
'git': 'https://github.com/fishtown-analytics/dbt-integration-project',
'revision': 'with-configs-0.17.0',
},
]
}
@property
def project_config(self):
return {
'config-version': 2,
"vars": {
# project-level configs
"dbt_integration_project": {
"config_1": "abc",
"config_2": "def",
"bool_config": True
},
},
}
@use_profile('postgres')
def test_postgres_simple_dependency(self):
self.run_dbt(["deps"])
results = self.run_dbt(["run"])
self.assertEqual(len(results), 5)
self.assertTablesEqual('seed_config_expected_2', "config")
self.assertTablesEqual("seed", "table_model")
self.assertTablesEqual("seed", "view_model")
self.assertTablesEqual("seed", "incremental")
```
#### File: integration/022_bigquery_test/test_bigquery_query_results.py
```python
from test.integration.base import DBTIntegrationTest, use_profile
class TestBaseBigQueryResults(DBTIntegrationTest):
@property
def schema(self):
return "bigquery_test_022"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
'config-version': 2,
'macro-paths': ['macros'],
}
@use_profile('bigquery')
def test__bigquery_type_inference(self):
result = self.run_dbt(['run-operation', 'test_int_inference'])
self.assertTrue(result.success)
```
#### File: integration/039_config_test/test_configs.py
```python
import os
import shutil
from test.integration.base import DBTIntegrationTest, use_profile
from dbt.exceptions import CompilationException
class TestConfigs(DBTIntegrationTest):
@property
def schema(self):
return "config_039"
def unique_schema(self):
return super().unique_schema().upper()
@property
def project_config(self):
return {
'config-version': 2,
'data-paths': ['data'],
'models': {
'test': {
'tagged': {
# the model configs will override this
'materialized': 'invalid',
# the model configs will append to these
'tags': ['tag_one'],
}
},
},
'seeds': {
'quote_columns': False,
},
}
@property
def models(self):
return "models"
@use_profile('postgres')
def test_postgres_config_layering(self):
self.assertEqual(len(self.run_dbt(['seed'])), 1)
# test the project-level tag, and both config() call tags
self.assertEqual(len(self.run_dbt(['run', '--model', 'tag:tag_one'])), 1)
self.assertEqual(len(self.run_dbt(['run', '--model', 'tag:tag_two'])), 1)
self.assertEqual(len(self.run_dbt(['run', '--model', 'tag:tag_three'])), 1)
self.assertTablesEqual('seed', 'model')
# make sure we overwrote the materialization properly
models = self.get_models_in_schema()
self.assertEqual(models['model'], 'table')
class TestTargetConfigs(DBTIntegrationTest):
@property
def schema(self):
return "config_039"
def unique_schema(self):
return super().unique_schema().upper()
@property
def models(self):
return "models"
def setUp(self):
super().setUp()
self.init_targets = [d for d in os.listdir('.') if os.path.isdir(d) and d.startswith('target_')]
def tearDown(self):
super().tearDown()
for d in self.new_dirs():
shutil.rmtree(d)
def new_dirs(self):
for d in os.listdir('.'):
if os.path.isdir(d) and d not in self.init_targets and d.startswith('target_'):
yield d
@property
def project_config(self):
return {
'config-version': 2,
'data-paths': ['data'],
'target-path': "target_{{ modules.datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S') }}",
'seeds': {
'quote_columns': False,
},
}
@use_profile('postgres')
def test_postgres_alternative_target_paths(self):
self.run_dbt(['seed'])
dirs = list(self.new_dirs())
self.assertEqual(len(dirs), 1)
self.assertTrue(os.path.exists(os.path.join(dirs[0], 'manifest.json')))
class TestDisabledConfigs(DBTIntegrationTest):
@property
def schema(self):
return "config_039"
def postgres_profile(self):
return {
'config': {
'send_anonymous_usage_stats': False
},
'test': {
'outputs': {
'default2': {
'type': 'postgres',
# make sure you can do this and get an int out
'threads': "{{ (1 + 3) | as_number }}",
'host': self.database_host,
'port': "{{ (5400 + 32) | as_number }}",
'user': 'root',
'pass': 'password',
'dbname': 'dbt',
'schema': self.unique_schema()
},
'disabled': {
'type': 'postgres',
# make sure you can do this and get an int out
'threads': "{{ (1 + 3) | as_number }}",
'host': self.database_host,
'port': "{{ (5400 + 32) | as_number }}",
'user': 'root',
'pass': 'password',
'dbname': 'dbt',
'schema': self.unique_schema()
},
},
'target': 'default2'
}
}
@property
def project_config(self):
return {
'config-version': 2,
'data-paths': ['data'],
'models': {
'test': {
'enabled': "{{ (target.name == 'default2' | as_bool) }}",
},
},
# set the `var` result in schema.yml to be 'seed', so that the
# `source` call can suceed.
'vars': {
'test': {
'seed_name': 'seed',
}
},
'seeds': {
'quote_columns': False,
'test': {
'seed': {
'enabled': "{{ (target.name == 'default2') | as_bool }}",
},
},
},
}
@property
def models(self):
return "models"
@use_profile('postgres')
def test_postgres_disable_seed_partial_parse(self):
self.run_dbt(['--partial-parse', 'seed', '--target', 'disabled'])
self.run_dbt(['--partial-parse', 'seed', '--target', 'disabled'])
@use_profile('postgres')
def test_postgres_conditional_model(self):
# no seeds/models - enabled should eval to False because of the target
results = self.run_dbt(['seed', '--target', 'disabled'], strict=False)
self.assertEqual(len(results), 0)
results = self.run_dbt(['run', '--target', 'disabled'], strict=False)
self.assertEqual(len(results), 0)
# has seeds/models - enabled should eval to True because of the target
results = self.run_dbt(['seed'])
self.assertEqual(len(results), 1)
results = self.run_dbt(['run'])
self.assertEqual(len(results), 2)
class TestUnusedModelConfigs(DBTIntegrationTest):
@property
def schema(self):
return "config_039"
@property
def project_config(self):
return {
'config-version': 2,
'data-paths': ['data'],
'models': {
'test': {
'enabled': True,
}
},
'seeds': {
'quote_columns': False,
},
'sources': {
'test': {
'enabled': True,
}
}
}
@property
def models(self):
return "empty-models"
@use_profile('postgres')
def test_postgres_warn_unused_configuration_paths(self):
with self.assertRaises(CompilationException) as exc:
self.run_dbt(['seed'])
self.assertIn('Configuration paths exist', str(exc.exception))
self.assertIn('- sources.test', str(exc.exception))
self.assertIn('- models.test', str(exc.exception))
self.run_dbt(['seed'], strict=False)
```
#### File: integration/100_rpc_test/test_execute_fetch_and_serialize.py
```python
from test.integration.base import DBTIntegrationTest, use_profile
import pickle
import os
class TestRpcExecuteReturnsResults(DBTIntegrationTest):
@property
def schema(self):
return "rpc_test_100"
@property
def models(self):
return "models"
@property
def project_config(self):
return {
'config-version': 2,
'macro-paths': ['macros'],
}
def do_test_pickle(self, agate_table):
table = {
'column_names': list(agate_table.column_names),
'rows': [list(row) for row in agate_table]
}
pickle.dumps(table)
def do_test_file(self, filename):
file_path = os.path.join("sql", filename)
with open(file_path) as fh:
query = fh.read()
with self.adapter.connection_named('master'):
status, table = self.adapter.execute(query, auto_begin=False, fetch=True)
self.assertTrue(len(table.columns) > 0, "agate table had no columns")
self.assertTrue(len(table.rows) > 0, "agate table had no rows")
self.do_test_pickle(table)
@use_profile('bigquery')
def test__bigquery_fetch_and_serialize(self):
self.do_test_file('bigquery.sql')
@use_profile('snowflake')
def test__snowflake_fetch_and_serialize(self):
self.do_test_file('snowflake.sql')
@use_profile('redshift')
def test__redshift_fetch_and_serialize(self):
self.do_test_file('redshift.sql')
```
#### File: test/unit/test_graph_selection.py
```python
import unittest
from unittest import mock
import pytest
import string
import dbt.exceptions
import dbt.graph.selector as graph_selector
import dbt.graph.cli as graph_cli
from dbt.node_types import NodeType
import networkx as nx
def _get_graph():
integer_graph = nx.balanced_tree(2, 2, nx.DiGraph())
package_mapping = {
i: 'm.' + ('X' if i % 2 == 0 else 'Y') + '.' + letter
for (i, letter) in enumerate(string.ascii_lowercase)
}
# Edges: [(X.a, Y.b), (X.a, X.c), (Y.b, Y.d), (Y.b, X.e), (X.c, Y.f), (X.c, X.g)]
return graph_selector.Graph(nx.relabel_nodes(integer_graph, package_mapping))
def _get_manifest(graph):
nodes = {}
for unique_id in graph:
fqn = unique_id.split('.')
node = mock.MagicMock(
unique_id=unique_id,
fqn=fqn,
package_name=fqn[0],
tags=[],
resource_type=NodeType.Model,
empty=False,
config=mock.MagicMock(enabled=True),
)
nodes[unique_id] = node
nodes['m.X.a'].tags = ['abc']
nodes['m.Y.b'].tags = ['abc', 'bcef']
nodes['m.X.c'].tags = ['abc', 'bcef']
nodes['m.Y.d'].tags = []
nodes['m.X.e'].tags = ['efg', 'bcef']
nodes['m.Y.f'].tags = ['efg', 'bcef']
nodes['m.X.g'].tags = ['efg']
return mock.MagicMock(nodes=nodes)
@pytest.fixture
def graph():
return graph_selector.Graph(_get_graph())
@pytest.fixture
def manifest(graph):
return _get_manifest(graph)
def id_macro(arg):
if isinstance(arg, str):
return arg
try:
return '_'.join(arg)
except TypeError:
return arg
run_specs = [
# include by fqn
(['X.a'], [], {'m.X.a'}),
# include by tag
(['tag:abc'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
# exclude by tag
(['*'], ['tag:abc'], {'m.Y.d', 'm.X.e', 'm.Y.f', 'm.X.g'}),
# tag + fqn
(['tag:abc', 'a'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:abc', 'd'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.d'}),
# multiple node selection across packages
(['X.a', 'b'], [], {'m.X.a', 'm.Y.b'}),
(['X.a+'], ['b'], {'m.X.a','m.X.c', 'm.Y.d','m.X.e','m.Y.f','m.X.g'}),
# children
(['X.c+'], [], {'m.X.c', 'm.Y.f', 'm.X.g'}),
(['X.a+1'], [], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['X.a+'], ['tag:efg'], {'m.X.a','m.Y.b','m.X.c', 'm.Y.d'}),
# parents
(['+Y.f'], [], {'m.X.c', 'm.Y.f', 'm.X.a'}),
(['1+Y.f'], [], {'m.X.c', 'm.Y.f'}),
# childrens parents
(['@X.c'], [], {'m.X.a', 'm.X.c', 'm.Y.f', 'm.X.g'}),
# multiple selection/exclusion
(['tag:abc', 'tag:bcef'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.X.e', 'm.Y.f'}),
(['tag:abc', 'tag:bcef'], ['tag:efg'], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:abc', 'tag:bcef'], ['tag:efg', 'a'], {'m.Y.b', 'm.X.c'}),
# intersections
(['a,a'], [], {'m.X.a'}),
(['+c,c+'], [], {'m.X.c'}),
(['a,b'], [], set()),
(['tag:abc,tag:bcef'], [], {'m.Y.b', 'm.X.c'}),
(['*,tag:abc,a'], [], {'m.X.a'}),
(['a,tag:abc,*'], [], {'m.X.a'}),
(['tag:abc,tag:bcef'], ['c'], {'m.Y.b'}),
(['tag:bcef,tag:efg'], ['tag:bcef,@b'], {'m.Y.f'}),
(['tag:bcef,tag:efg'], ['tag:bcef,@a'], set()),
(['*,@a,+b'], ['*,tag:abc,tag:bcef'], {'m.X.a'}),
(['tag:bcef,tag:efg', '*,tag:abc'], [], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.X.e', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e'], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e'], {'m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['e', 'f'], {'m.X.a', 'm.Y.b', 'm.X.c'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['tag:abc,tag:bcef'], {'m.X.a', 'm.X.e', 'm.Y.f'}),
(['tag:bcef,tag:efg', '*,tag:abc'], ['tag:abc,tag:bcef', 'tag:abc,a'], {'m.X.e', 'm.Y.f'})
]
@pytest.mark.parametrize('include,exclude,expected', run_specs, ids=id_macro)
def test_run_specs(include, exclude, expected):
graph = _get_graph()
manifest = _get_manifest(graph)
selector = graph_selector.NodeSelector(graph, manifest)
spec = graph_cli.parse_difference(include, exclude)
selected = selector.select_nodes(spec)
assert selected == expected
param_specs = [
('a', False, None, False, None, 'fqn', 'a', False),
('+a', True, None, False, None, 'fqn', 'a', False),
('256+a', True, 256, False, None, 'fqn', 'a', False),
('a+', False, None, True, None, 'fqn', 'a', False),
('a+256', False, None, True, 256, 'fqn', 'a', False),
('+a+', True, None, True, None, 'fqn', 'a', False),
('16+a+32', True, 16, True, 32, 'fqn', 'a', False),
('@a', False, None, False, None, 'fqn', 'a', True),
('a.b', False, None, False, None, 'fqn', 'a.b', False),
('+a.b', True, None, False, None, 'fqn', 'a.b', False),
('256+a.b', True, 256, False, None, 'fqn', 'a.b', False),
('a.b+', False, None, True, None, 'fqn', 'a.b', False),
('a.b+256', False, None, True, 256, 'fqn', 'a.b', False),
('+a.b+', True, None, True, None, 'fqn', 'a.b', False),
('16+a.b+32', True, 16, True, 32, 'fqn', 'a.b', False),
('@a.b', False, None, False, None, 'fqn', 'a.b', True),
('a.b.*', False, None, False, None, 'fqn', 'a.b.*', False),
('+a.b.*', True, None, False, None, 'fqn', 'a.b.*', False),
('256+a.b.*', True, 256, False, None, 'fqn', 'a.b.*', False),
('a.b.*+', False, None, True, None, 'fqn', 'a.b.*', False),
('a.b.*+256', False, None, True, 256, 'fqn', 'a.b.*', False),
('+a.b.*+', True, None, True, None, 'fqn', 'a.b.*', False),
('16+a.b.*+32', True, 16, True, 32, 'fqn', 'a.b.*', False),
('@a.b.*', False, None, False, None, 'fqn', 'a.b.*', True),
('tag:a', False, None, False, None, 'tag', 'a', False),
('+tag:a', True, None, False, None, 'tag', 'a', False),
('256+tag:a', True, 256, False, None, 'tag', 'a', False),
('tag:a+', False, None, True, None, 'tag', 'a', False),
('tag:a+256', False, None, True, 256, 'tag', 'a', False),
('+tag:a+', True, None, True, None, 'tag', 'a', False),
('16+tag:a+32', True, 16, True, 32, 'tag', 'a', False),
('@tag:a', False, None, False, None, 'tag', 'a', True),
('source:a', False, None, False, None, 'source', 'a', False),
('source:a+', False, None, True, None, 'source', 'a', False),
('source:a+1', False, None, True, 1, 'source', 'a', False),
('source:a+32', False, None, True, 32, 'source', 'a', False),
('@source:a', False, None, False, None, 'source', 'a', True),
]
@pytest.mark.parametrize(
'spec,parents,parents_depth,children,children_depth,filter_type,filter_value,childrens_parents',
param_specs,
ids=id_macro
)
def test_parse_specs(spec, parents, parents_depth, children, children_depth, filter_type, filter_value, childrens_parents):
parsed = graph_selector.SelectionCriteria.from_single_spec(spec)
assert parsed.parents == parents
assert parsed.parents_depth == parents_depth
assert parsed.children == children
assert parsed.children_depth == children_depth
assert parsed.method == filter_type
assert parsed.value == filter_value
assert parsed.childrens_parents == childrens_parents
invalid_specs = [
'@a+',
'@a.b+',
'@a.b*+',
'@tag:a+',
'@source:a+',
]
@pytest.mark.parametrize('invalid', invalid_specs, ids=lambda k: str(k))
def test_invalid_specs(invalid):
with pytest.raises(dbt.exceptions.RuntimeException):
graph_selector.SelectionCriteria.from_single_spec(invalid)
```
#### File: test/unit/test_jinja.py
```python
from contextlib import contextmanager
import pytest
import unittest
import yaml
from dbt.clients.jinja import get_rendered
from dbt.clients.jinja import get_template
from dbt.clients.jinja import extract_toplevel_blocks
from dbt.exceptions import CompilationException, JinjaRenderingException
@contextmanager
def returns(value):
yield value
@contextmanager
def raises(value):
with pytest.raises(value) as exc:
yield exc
def expected_id(arg):
if isinstance(arg, list):
return '_'.join(arg)
jinja_tests = [
# strings
(
'''foo: bar''',
returns('bar'),
returns('bar'),
),
(
'''foo: "bar"''',
returns('bar'),
returns('bar'),
),
(
'''foo: "'bar'"''',
returns("'bar'"),
returns("'bar'"),
),
(
"""foo: '"bar"'""",
returns('"bar"'),
returns('"bar"'),
),
(
'''foo: "{{ 'bar' | as_text }}"''',
returns('bar'),
returns('bar'),
),
(
'''foo: "{{ 'bar' | as_bool }}"''',
returns('bar'),
raises(JinjaRenderingException),
),
(
'''foo: "{{ 'bar' | as_number }}"''',
returns('bar'),
raises(JinjaRenderingException),
),
(
'''foo: "{{ 'bar' | as_native }}"''',
returns('bar'),
returns('bar'),
),
# ints
(
'''foo: 1''',
returns('1'),
returns('1'),
),
(
'''foo: "1"''',
returns('1'),
returns('1'),
),
(
'''foo: "'1'"''',
returns("'1'"),
returns("'1'"),
),
(
"""foo: '"1"'""",
returns('"1"'),
returns('"1"'),
),
(
'''foo: "{{ 1 }}"''',
returns('1'),
returns('1'),
),
(
'''foo: "{{ '1' }}"''',
returns('1'),
returns('1'),
),
(
'''foo: "'{{ 1 }}'"''',
returns("'1'"),
returns("'1'"),
),
(
'''foo: "'{{ '1' }}'"''',
returns("'1'"),
returns("'1'"),
),
(
'''foo: "{{ 1 | as_text }}"''',
returns('1'),
returns('1'),
),
(
'''foo: "{{ 1 | as_bool }}"''',
returns('1'),
raises(JinjaRenderingException),
),
(
'''foo: "{{ 1 | as_number }}"''',
returns('1'),
returns(1),
),
(
'''foo: "{{ 1 | as_native }}"''',
returns('1'),
returns(1),
),
(
'''foo: "{{ '1' | as_text }}"''',
returns('1'),
returns('1'),
),
(
'''foo: "{{ '1' | as_bool }}"''',
returns('1'),
raises(JinjaRenderingException),
),
(
'''foo: "{{ '1' | as_number }}"''',
returns('1'),
returns(1),
),
(
'''foo: "{{ '1' | as_native }}"''',
returns('1'),
returns(1),
),
# booleans.
# Note the discrepancy with true vs True: `true` is recognized by jinja but
# not literal_eval, but `True` is recognized by ast.literal_eval.
# For extra fun, yaml recognizes both.
# unquoted true
(
'''foo: "{{ True }}"''',
returns('True'),
returns('True'),
),
(
'''foo: "{{ True | as_text }}"''',
returns('True'),
returns('True'),
),
(
'''foo: "{{ True | as_bool }}"''',
returns('True'),
returns(True),
),
(
'''foo: "{{ True | as_number }}"''',
returns('True'),
raises(JinjaRenderingException),
),
(
'''foo: "{{ True | as_native }}"''',
returns('True'),
returns(True),
),
# unquoted true
(
'''foo: "{{ true }}"''',
returns("True"),
returns("True"),
),
(
'''foo: "{{ true | as_text }}"''',
returns("True"),
returns("True"),
),
(
'''foo: "{{ true | as_bool }}"''',
returns("True"),
returns(True),
),
(
'''foo: "{{ true | as_number }}"''',
returns("True"),
raises(JinjaRenderingException),
),
(
'''foo: "{{ true | as_native }}"''',
returns("True"),
returns(True),
),
(
'''foo: "{{ 'true' | as_text }}"''',
returns("true"),
returns("true"),
),
# quoted 'true'
(
'''foo: "'{{ true }}'"''',
returns("'True'"),
returns("'True'"),
), # jinja true -> python True -> str(True) -> "True" -> quoted
(
'''foo: "'{{ true | as_text }}'"''',
returns("'True'"),
returns("'True'"),
),
(
'''foo: "'{{ true | as_bool }}'"''',
returns("'True'"),
returns("'True'"),
),
(
'''foo: "'{{ true | as_number }}'"''',
returns("'True'"),
returns("'True'"),
),
(
'''foo: "'{{ true | as_native }}'"''',
returns("'True'"),
returns("'True'"),
),
# unquoted True
(
'''foo: "{{ True }}"''',
returns('True'),
returns('True'),
),
(
'''foo: "{{ True | as_text }}"''',
returns("True"),
returns("True"),
), # True -> string 'True' -> text -> str('True') -> 'True'
(
'''foo: "{{ True | as_bool }}"''',
returns("True"),
returns(True),
),
(
'''foo: "{{ True | as_number }}"''',
returns("True"),
raises(JinjaRenderingException),
),
(
'''foo: "{{ True | as_native }}"''',
returns("True"),
returns(True),
),
# quoted 'True' within rendering
(
'''foo: "{{ 'True' | as_text }}"''',
returns("True"),
returns("True"),
),
# 'True' -> string 'True' -> text -> str('True') -> 'True'
(
'''foo: "{{ 'True' | as_bool }}"''',
returns('True'),
returns(True),
),
# quoted 'True' outside rendering
(
'''foo: "'{{ True }}'"''',
returns("'True'"),
returns("'True'"),
),
(
'''foo: "'{{ True | as_bool }}'"''',
returns("'True'"),
returns("'True'"),
),
# yaml turns 'yes' into a boolean true
(
'''foo: yes''',
returns('True'),
returns('True'),
),
(
'''foo: "yes"''',
returns('yes'),
returns('yes'),
),
# concatenation
(
'''foo: "{{ (a_int + 100) | as_native }}"''',
returns('200'),
returns(200),
),
(
'''foo: "{{ (a_str ~ 100) | as_native }}"''',
returns('100100'),
returns(100100),
),
(
'''foo: "{{( a_int ~ 100) | as_native }}"''',
returns('100100'),
returns(100100),
),
# multiple nodes -> always str
(
'''foo: "{{ a_str | as_native }}{{ a_str | as_native }}"''',
returns('100100'),
returns('100100'),
),
(
'''foo: "{{ a_int | as_native }}{{ a_int | as_native }}"''',
returns('100100'),
returns('100100'),
),
(
'''foo: "'{{ a_int | as_native }}{{ a_int | as_native }}'"''',
returns("'100100'"),
returns("'100100'"),
),
(
'''foo:''',
returns('None'),
returns('None'),
),
(
'''foo: null''',
returns('None'),
returns('None'),
),
(
'''foo: ""''',
returns(''),
returns(''),
),
(
'''foo: "{{ '' | as_native }}"''',
returns(''),
returns(''),
),
# very annoying, but jinja 'none' is yaml 'null'.
(
'''foo: "{{ none | as_native }}"''',
returns('None'),
returns(None),
),
# make sure we don't include comments in the output (see #2707)
(
'''foo: "{# #}hello"''',
returns('hello'),
returns('hello'),
),
(
'''foo: "{% if false %}{% endif %}hello"''',
returns('hello'),
returns('hello'),
),
]
@pytest.mark.parametrize(
'value,text_expectation,native_expectation',
jinja_tests,
ids=expected_id
)
def test_jinja_rendering(value, text_expectation, native_expectation):
foo_value = yaml.safe_load(value)['foo']
ctx = {
'a_str': '100',
'a_int': 100,
'b_str': 'hello'
}
with text_expectation as text_result:
assert text_result == get_rendered(foo_value, ctx, native=False)
with native_expectation as native_result:
assert native_result == get_rendered(foo_value, ctx, native=True)
class TestJinja(unittest.TestCase):
def test_do(self):
s = '{% set my_dict = {} %}\n{% do my_dict.update(a=1) %}'
template = get_template(s, {})
mod = template.make_module()
self.assertEqual(mod.my_dict, {'a': 1})
def test_regular_render(self):
s = '{{ "some_value" | as_native }}'
value = get_rendered(s, {}, native=False)
assert value == 'some_value'
s = '{{ 1991 | as_native }}'
value = get_rendered(s, {}, native=False)
assert value == '1991'
s = '{{ "some_value" | as_text }}'
value = get_rendered(s, {}, native=False)
assert value == 'some_value'
s = '{{ 1991 | as_text }}'
value = get_rendered(s, {}, native=False)
assert value == '1991'
def test_native_render(self):
s = '{{ "some_value" | as_native }}'
value = get_rendered(s, {}, native=True)
assert value == 'some_value'
s = '{{ 1991 | as_native }}'
value = get_rendered(s, {}, native=True)
assert value == 1991
s = '{{ "some_value" | as_text }}'
value = get_rendered(s, {}, native=True)
assert value == 'some_value'
s = '{{ 1991 | as_text }}'
value = get_rendered(s, {}, native=True)
assert value == '1991'
class TestBlockLexer(unittest.TestCase):
def test_basic(self):
body = '{{ config(foo="bar") }}\r\nselect * from this.that\r\n'
block_data = ' \n\r\t{%- mytype foo %}'+body+'{%endmytype -%}'
blocks = extract_toplevel_blocks(block_data, allowed_blocks={'mytype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'mytype')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].contents, body)
self.assertEqual(blocks[0].full_block, block_data)
def test_multiple(self):
body_one = '{{ config(foo="bar") }}\r\nselect * from this.that\r\n'
body_two = (
'{{ config(bar=1)}}\r\nselect * from {% if foo %} thing '
'{% else %} other_thing {% endif %}'
)
block_data = (
' {% mytype foo %}' + body_one + '{% endmytype %}' +
'\r\n{% othertype bar %}' + body_two + '{% endothertype %}'
)
blocks = extract_toplevel_blocks(block_data, allowed_blocks={'mytype', 'othertype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 2)
def test_comments(self):
body = '{{ config(foo="bar") }}\r\nselect * from this.that\r\n'
comment = '{# my comment #}'
block_data = ' \n\r\t{%- mytype foo %}'+body+'{%endmytype -%}'
blocks = extract_toplevel_blocks(comment+block_data, allowed_blocks={'mytype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'mytype')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].contents, body)
self.assertEqual(blocks[0].full_block, block_data)
def test_evil_comments(self):
body = '{{ config(foo="bar") }}\r\nselect * from this.that\r\n'
comment = '{# external comment {% othertype bar %} select * from thing.other_thing{% endothertype %} #}'
block_data = ' \n\r\t{%- mytype foo %}'+body+'{%endmytype -%}'
blocks = extract_toplevel_blocks(comment+block_data, allowed_blocks={'mytype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'mytype')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].contents, body)
self.assertEqual(blocks[0].full_block, block_data)
def test_nested_comments(self):
body = '{# my comment #} {{ config(foo="bar") }}\r\nselect * from {# my other comment embedding {% endmytype %} #} this.that\r\n'
block_data = ' \n\r\t{%- mytype foo %}'+body+'{% endmytype -%}'
comment = '{# external comment {% othertype bar %} select * from thing.other_thing{% endothertype %} #}'
blocks = extract_toplevel_blocks(comment+block_data, allowed_blocks={'mytype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'mytype')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].contents, body)
self.assertEqual(blocks[0].full_block, block_data)
def test_complex_file(self):
blocks = extract_toplevel_blocks(complex_snapshot_file, allowed_blocks={'mytype', 'myothertype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[0].block_type_name, 'mytype')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].full_block, '{% mytype foo %} some stuff {% endmytype %}')
self.assertEqual(blocks[0].contents, ' some stuff ')
self.assertEqual(blocks[1].block_type_name, 'mytype')
self.assertEqual(blocks[1].block_name, 'bar')
self.assertEqual(blocks[1].full_block, bar_block)
self.assertEqual(blocks[1].contents, bar_block[16:-15].rstrip())
self.assertEqual(blocks[2].block_type_name, 'myothertype')
self.assertEqual(blocks[2].block_name, 'x')
self.assertEqual(blocks[2].full_block, x_block.strip())
self.assertEqual(blocks[2].contents, x_block[len('\n{% myothertype x %}'):-len('{% endmyothertype %}\n')])
def test_peaceful_macro_coexistence(self):
body = '{# my macro #} {% macro foo(a, b) %} do a thing {%- endmacro %} {# my model #} {% a b %} test {% enda %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'macro', 'a'}, collect_raw_data=True)
self.assertEqual(len(blocks), 4)
self.assertEqual(blocks[0].full_block, '{# my macro #} ')
self.assertEqual(blocks[1].block_type_name, 'macro')
self.assertEqual(blocks[1].block_name, 'foo')
self.assertEqual(blocks[1].contents, ' do a thing')
self.assertEqual(blocks[2].full_block, ' {# my model #} ')
self.assertEqual(blocks[3].block_type_name, 'a')
self.assertEqual(blocks[3].block_name, 'b')
self.assertEqual(blocks[3].contents, ' test ')
def test_macro_with_trailing_data(self):
body = '{# my macro #} {% macro foo(a, b) %} do a thing {%- endmacro %} {# my model #} {% a b %} test {% enda %} raw data so cool'
blocks = extract_toplevel_blocks(body, allowed_blocks={'macro', 'a'}, collect_raw_data=True)
self.assertEqual(len(blocks), 5)
self.assertEqual(blocks[0].full_block, '{# my macro #} ')
self.assertEqual(blocks[1].block_type_name, 'macro')
self.assertEqual(blocks[1].block_name, 'foo')
self.assertEqual(blocks[1].contents, ' do a thing')
self.assertEqual(blocks[2].full_block, ' {# my model #} ')
self.assertEqual(blocks[3].block_type_name, 'a')
self.assertEqual(blocks[3].block_name, 'b')
self.assertEqual(blocks[3].contents, ' test ')
self.assertEqual(blocks[4].full_block, ' raw data so cool')
def test_macro_with_crazy_args(self):
body = '''{% macro foo(a, b=asdf("cool this is 'embedded'" * 3) + external_var, c)%}cool{# block comment with {% endmacro %} in it #} stuff here {% endmacro %}'''
blocks = extract_toplevel_blocks(body, allowed_blocks={'macro'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'macro')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].contents, 'cool{# block comment with {% endmacro %} in it #} stuff here ')
def test_materialization_parse(self):
body = '{% materialization xxx, default %} ... {% endmaterialization %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'materialization'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'materialization')
self.assertEqual(blocks[0].block_name, 'xxx')
self.assertEqual(blocks[0].full_block, body)
body = '{% materialization xxx, adapter="other" %} ... {% endmaterialization %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'materialization'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'materialization')
self.assertEqual(blocks[0].block_name, 'xxx')
self.assertEqual(blocks[0].full_block, body)
def test_nested_not_ok(self):
# we don't allow nesting same blocks
body = '{% myblock a %} {% myblock b %} {% endmyblock %} {% endmyblock %}'
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body, allowed_blocks={'myblock'})
def test_incomplete_block_failure(self):
fullbody = '{% myblock foo %} {% endmyblock %}'
for length in range(len('{% myblock foo %}'), len(fullbody)-1):
body = fullbody[:length]
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body, allowed_blocks={'myblock'})
def test_wrong_end_failure(self):
body = '{% myblock foo %} {% endotherblock %}'
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body, allowed_blocks={'myblock', 'otherblock'})
def test_comment_no_end_failure(self):
body = '{# '
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body)
def test_comment_only(self):
body = '{# myblock #}'
blocks = extract_toplevel_blocks(body)
self.assertEqual(len(blocks), 1)
blocks = extract_toplevel_blocks(body, collect_raw_data=False)
self.assertEqual(len(blocks), 0)
def test_comment_block_self_closing(self):
# test the case where a comment start looks a lot like it closes itself
# (but it doesn't in jinja!)
body = '{#} {% myblock foo %} {#}'
blocks = extract_toplevel_blocks(body, collect_raw_data=False)
self.assertEqual(len(blocks), 0)
def test_embedded_self_closing_comment_block(self):
body = '{% myblock foo %} {#}{% endmyblock %} {#}{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, body)
self.assertEqual(blocks[0].contents, ' {#}{% endmyblock %} {#}')
def test_set_statement(self):
body = '{% set x = 1 %}{% myblock foo %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, '{% myblock foo %}hi{% endmyblock %}')
def test_set_block(self):
body = '{% set x %}1{% endset %}{% myblock foo %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, '{% myblock foo %}hi{% endmyblock %}')
def test_crazy_set_statement(self):
body = '{% set x = (thing("{% myblock foo %}")) %}{% otherblock bar %}x{% endotherblock %}{% set y = otherthing("{% myblock foo %}") %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'otherblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, '{% otherblock bar %}x{% endotherblock %}')
self.assertEqual(blocks[0].block_type_name, 'otherblock')
def test_do_statement(self):
body = '{% do thing.update() %}{% myblock foo %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, '{% myblock foo %}hi{% endmyblock %}')
def test_deceptive_do_statement(self):
body = '{% do thing %}{% myblock foo %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, '{% myblock foo %}hi{% endmyblock %}')
def test_do_block(self):
body = '{% do %}thing.update(){% enddo %}{% myblock foo %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'do', 'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 2)
self.assertEqual(blocks[0].contents, 'thing.update()')
self.assertEqual(blocks[0].block_type_name, 'do')
self.assertEqual(blocks[1].full_block, '{% myblock foo %}hi{% endmyblock %}')
def test_crazy_do_statement(self):
body = '{% do (thing("{% myblock foo %}")) %}{% otherblock bar %}x{% endotherblock %}{% do otherthing("{% myblock foo %}") %}{% myblock x %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock', 'otherblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 2)
self.assertEqual(blocks[0].full_block, '{% otherblock bar %}x{% endotherblock %}')
self.assertEqual(blocks[0].block_type_name, 'otherblock')
self.assertEqual(blocks[1].full_block, '{% myblock x %}hi{% endmyblock %}')
self.assertEqual(blocks[1].block_type_name, 'myblock')
def test_awful_jinja(self):
blocks = extract_toplevel_blocks(
if_you_do_this_you_are_awful,
allowed_blocks={'snapshot', 'materialization'},
collect_raw_data=False
)
self.assertEqual(len(blocks), 2)
self.assertEqual(len([b for b in blocks if b.block_type_name == '__dbt__data']), 0)
self.assertEqual(blocks[0].block_type_name, 'snapshot')
self.assertEqual(blocks[0].contents, '\n '.join([
'''{% set x = ("{% endsnapshot %}" + (40 * '%})')) %}''',
'{# {% endsnapshot %} #}',
'{% embedded %}',
' some block data right here',
'{% endembedded %}'
]))
self.assertEqual(blocks[1].block_type_name, 'materialization')
self.assertEqual(blocks[1].contents, '\nhi\n')
def test_quoted_endblock_within_block(self):
body = '{% myblock something -%} {% set x = ("{% endmyblock %}") %} {% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'myblock')
self.assertEqual(blocks[0].contents, '{% set x = ("{% endmyblock %}") %} ')
def test_docs_block(self):
body = '{% docs __my_doc__ %} asdf {# nope {% enddocs %}} #} {% enddocs %} {% docs __my_other_doc__ %} asdf "{% enddocs %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'docs'}, collect_raw_data=False)
self.assertEqual(len(blocks), 2)
self.assertEqual(blocks[0].block_type_name, 'docs')
self.assertEqual(blocks[0].contents, ' asdf {# nope {% enddocs %}} #} ')
self.assertEqual(blocks[0].block_name, '__my_doc__')
self.assertEqual(blocks[1].block_type_name, 'docs')
self.assertEqual(blocks[1].contents, ' asdf "')
self.assertEqual(blocks[1].block_name, '__my_other_doc__')
def test_docs_block_expr(self):
body = '{% docs more_doc %} asdf {{ "{% enddocs %}" ~ "}}" }}{% enddocs %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'docs'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'docs')
self.assertEqual(blocks[0].contents, ' asdf {{ "{% enddocs %}" ~ "}}" }}')
self.assertEqual(blocks[0].block_name, 'more_doc')
def test_unclosed_model_quotes(self):
# test case for https://github.com/fishtown-analytics/dbt/issues/1533
body = '{% model my_model -%} select * from "something"."something_else{% endmodel %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'model'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'model')
self.assertEqual(blocks[0].contents, 'select * from "something"."something_else')
self.assertEqual(blocks[0].block_name, 'my_model')
def test_if(self):
# if you conditionally define your macros/models, don't
body = '{% if true %}{% macro my_macro() %} adsf {% endmacro %}{% endif %}'
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body)
def test_if_innocuous(self):
body = '{% if true %}{% something %}asdfasd{% endsomething %}{% endif %}'
blocks = extract_toplevel_blocks(body)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, body)
def test_for(self):
# no for-loops over macros.
body = '{% for x in range(10) %}{% macro my_macro() %} adsf {% endmacro %}{% endfor %}'
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body)
def test_for_innocuous(self):
# no for-loops over macros.
body = '{% for x in range(10) %}{% something my_something %} adsf {% endsomething %}{% endfor %}'
blocks = extract_toplevel_blocks(body)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, body)
def test_endif(self):
body = '{% snapshot foo %}select * from thing{% endsnapshot%}{% endif %}'
with self.assertRaises(CompilationException) as err:
extract_toplevel_blocks(body)
self.assertIn('Got an unexpected control flow end tag, got endif but never saw a preceeding if (@ 1:53)', str(err.exception))
def test_if_endfor(self):
body = '{% if x %}...{% endfor %}{% endif %}'
with self.assertRaises(CompilationException) as err:
extract_toplevel_blocks(body)
self.assertIn('Got an unexpected control flow end tag, got endfor but expected endif next (@ 1:13)', str(err.exception))
def test_if_endfor_newlines(self):
body = '{% if x %}\n ...\n {% endfor %}\n{% endif %}'
with self.assertRaises(CompilationException) as err:
extract_toplevel_blocks(body)
self.assertIn('Got an unexpected control flow end tag, got endfor but expected endif next (@ 3:4)', str(err.exception))
bar_block = '''{% mytype bar %}
{# a comment
that inside it has
{% mytype baz %}
{% endmyothertype %}
{% endmytype %}
{% endmytype %}
{#
{% endmytype %}#}
some other stuff
{%- endmytype%}'''
x_block = '''
{% myothertype x %}
before
{##}
and after
{% endmyothertype %}
'''
complex_snapshot_file = '''
{#some stuff {% mytype foo %} #}
{% mytype foo %} some stuff {% endmytype %}
'''+bar_block+x_block
if_you_do_this_you_are_awful = '''
{#} here is a comment with a block inside {% block x %} asdf {% endblock %} {#}
{% do
set('foo="bar"')
%}
{% set x = ("100" + "hello'" + '%}') %}
{% snapshot something -%}
{% set x = ("{% endsnapshot %}" + (40 * '%})')) %}
{# {% endsnapshot %} #}
{% embedded %}
some block data right here
{% endembedded %}
{%- endsnapshot %}
{% raw %}
{% set x = SYNTAX ERROR}
{% endraw %}
{% materialization whatever, adapter='thing' %}
hi
{% endmaterialization %}
'''
``` |
{
"source": "JoelLefkowitz/arcade",
"score": 3
} |
#### File: arcade/src/rest.py
```python
import json
from urllib.error import HTTPError
from browser import ajax
def get_top_scores(callback=None):
ajax.get(
"api/scores", oncomplete=lambda x: oncomplete(x, callback)
)
def post_score(name, score, callback=None):
ajax.post(
"api/scores",
oncomplete=lambda x: oncomplete(x, callback),
data={"name": name, "value": score},
)
def oncomplete(x, callback):
if x.status >= 400:
raise HTTPError
if callback:
return callback(json.loads(x.text))
```
#### File: scores/scores/views.py
```python
from aiohttp.web import HTTPUnprocessableEntity, View, json_response
from .db import get_top_scores, insert_score
from .models import Score
class ScoresView(View):
async def get(self):
leaders = await get_top_scores()
return json_response([score.data for score in leaders])
async def post(self):
data = await self.post()
if "name" not in data or "value" not in data:
raise HTTPUnprocessableEntity
score = Score(data["name"], data["value"])
await insert_score(score)
return json_response(score.data)
``` |
{
"source": "JoelLefkowitz/conductor",
"score": 2
} |
#### File: execs/workspaces/update.py
```python
import json
import os
from pathlib import Path
from typing import Any, Dict, List
import yaml
from conductor.utils.colors import modulo_pastel
from conductor.utils.strings import decapitalize, title_case
def parse_workspaces(
workspaces: Dict[str, List[str]], organizations: List[str], output: str
) -> None:
Path(output).mkdir(parents=True, exist_ok=True)
for workspace, projects in workspaces.items():
with open(os.path.join(output, f"{workspace}.code-workspace"), "w") as stream:
stream.write(
json.dumps(
workspace_configuration(
workspace=workspace,
projects=projects,
is_organization=title_case(workspace) in organizations,
)
)
)
def update_settings(
workspaces: Dict[str, List[str]], organizations: List[str], output: str
) -> None:
settings_path = os.path.expanduser(
"~/Library/Application Support/Code/User/settings.json"
)
project_data = [
group_project_data(
{k: v for k, v in workspaces.items() if title_case(k) not in organizations},
"projects",
output,
),
group_project_data(
{k: v for k, v in workspaces.items() if title_case(k) in organizations},
"organizations",
output,
),
]
with open(settings_path, "r") as stream:
settings = json.load(stream)
with open(settings_path, "w") as stream:
settings["dashboard.projectData"] = project_data
stream.write(json.dumps(settings))
def workspace_configuration(
workspace: str, projects: List[str], is_organization: bool
) -> Dict[str, Any]:
root = os.path.expanduser(
os.path.join("~", title_case(workspace) if is_organization else "Workspace")
)
return {
"folders": [
{
"name": project,
"path": os.path.normpath(
os.path.join(
*(
[root, "..", workspace.capitalize(), project]
if is_organization
else [root, project]
)
)
),
}
for project in projects
]
}
def group_project_data(
workspaces: Dict[str, Any], group_name: str, output: str
) -> Dict[str, Any]:
return {
"collapsed": False,
"id": decapitalize(group_name),
"groupName": group_name.capitalize(),
"projects": [
{
"color": "#" + modulo_pastel(i),
"id": workspace.lower(),
"isGitRepo": False,
"name": workspace.capitalize(),
"path": os.path.abspath(
os.path.join(output, f"{workspace}.code-workspace")
),
}
for i, workspace in enumerate(workspaces)
],
}
def update(path: str) -> None:
organizations = ["Mint", "Papergym"]
output = os.path.join(os.path.dirname(path), "dist")
with open(path, "r") as stream:
workspaces = yaml.safe_load(stream)
parse_workspaces(workspaces, organizations, output)
update_settings(workspaces, organizations, output)
```
#### File: conductor/utils/colors.py
```python
def modulo_pastel(key: int) -> str:
"""
Select a pastel color on a modulo cycle.
Args:
key (int): The index modulo index.
Returns:
str: The selected RGB pastel color code.
"""
hex_codes = [
"957DAD",
"B5EAD7",
"C7CEEA",
"D291BC",
"E0BBE4",
"E2F0CB",
"FEC8D8",
"FF9AA2",
"FFB7B2",
"FFDFD3",
]
return hex_codes[key % len(hex_codes)]
```
#### File: conductor/utils/inputs.py
```python
def prompt(msg: str) -> bool:
return input(msg).lower() in ["y", "yes"]
``` |
{
"source": "JoelLefkowitz/convert-case",
"score": 3
} |
#### File: convert-case/src/lower.py
```python
from .camel import camel_to_lower_case, is_camel_case
from .definitions import LOWER
from .exceptions import MixedCaseError
from .kebab import is_kebab_case, kebab_to_lower_case
from .pascal import is_pascal_case, pascal_to_lower_case
from .sentence import is_sentence_case, sentence_to_lower_case
from .snake import is_snake_case, snake_to_lower_case
from .title import is_title_case, title_to_lower_case
from .upper import is_upper_case, upper_to_lower_case
TO_LOWER = [
(is_lower_case, lambda x: x),
(is_upper_case, upper_to_lower_case),
(is_sentence_case, sentence_to_lower_case),
(is_title_case, title_to_lower_case),
(is_camel_case, camel_to_lower_case),
(is_snake_case, snake_to_lower_case),
(is_kebab_case, kebab_to_lower_case),
(is_pascal_case, pascal_to_lower_case),
]
def lower_case(string: str) -> str:
try:
return next(to_case(string) for is_case, to_case in TO_LOWER if is_case(string))
# Throwing an error introduces impurity but is preferable to
# returning an empty result.
except StopIteration:
raise MixedCaseError(string)
def is_lower_case(string: str) -> bool:
return LOWER.match(string) is not None
```
#### File: convert-case/src/pascal.py
```python
from functools import reduce
from .definitions import PASCAL
def is_pascal_case(string: str) -> bool:
return PASCAL.match(string) is not None
def lower_to_pascal_case(lower: str) -> str:
return "".join([i.capitalize() for i in lower.split(" ")])
def pascal_to_lower_case(pascal: str) -> str:
return reduce(
lambda acc, x: acc + (" " + x.lower() if x.isupper() else x),
pascal,
"",
).lstrip()
```
#### File: convert-case/src/sentence.py
```python
from .definitions import SENTENCE
def is_sentence_case(string: str) -> bool:
return SENTENCE.match(string) is not None
def lower_to_sentence_case(lower: str) -> str:
return lower.capitalize()
def sentence_to_lower_case(sentence: str) -> str:
return sentence.lower()
``` |
{
"source": "JoelLefkowitz/dev",
"score": 2
} |
#### File: dev/multi_job/exceptions.py
```python
from multi_job.utils.colours import fail
from multi_job.utils.emojis import FIRE
class PrettyException(Exception):
def __init__(self, message):
pretty_msg = f"\n{FIRE}{fail('Oh my!')}{FIRE}\n{message}"
super().__init__(pretty_msg)
class ParserValidationError(PrettyException):
pass
class ConfigNotGiven(PrettyException):
pass
class ArgumentMissing(PrettyException):
pass
class StepError(PrettyException):
pass
``` |
{
"source": "JoelLefkowitz/digitalocean-inventory",
"score": 2
} |
#### File: digitalocean-inventory/digitalocean_inventory/manager.py
```python
from functools import cached_property
from typing import Dict, List
import digitalocean # type: ignore
from .exceptions import MissingProjectError
from .formatter import Formatter
class Manager:
def __init__(
self, private_ips: bool, formatter: Formatter, token: str
) -> None:
self.private_ips = private_ips
self.formatter = formatter
self.manager = digitalocean.Manager(token=token)
@cached_property
def project_droplets(self) -> List[digitalocean.Droplet]:
try:
project = next(
filter(
lambda x: x.name == self.formatter.project_name,
self.manager.get_all_projects(),
)
)
except StopIteration:
raise MissingProjectError(self.formatter.project_name)
project_droplet_ids = list(
map(
lambda x: int(x.split(":")[2]),
filter(
lambda x: x.split(":")[1] == "droplet",
project.get_all_resources(),
),
)
)
return list(
filter(
lambda x: x.id in project_droplet_ids,
self.manager.get_all_droplets(),
)
)
@property
def meta_hostvars(self) -> Dict:
return {
"hostvars": {
self.droplet_ipv4(droplet): self.droplet_hostvars(
droplet
)
for droplet in self.project_droplets
}
}
def droplet_hostvars(self, droplet: digitalocean.Droplet) -> Dict:
return {
"ansible_python_interpreter": "/usr/bin/python3",
"ansible_ssh_extra_args": "-o StrictHostKeyChecking=no",
"ansible_ssh_private_key_file": self.formatter.ssh_key_path(
self.formatter.parse_index(droplet.name)
),
}
def droplet_ipv4(self, droplet: digitalocean.Droplet) -> str:
network_tag = "private" if self.private_ips else "public"
network = next(
filter(
lambda x: x["type"] == network_tag,
droplet.networks["v4"],
)
)
return network["ip_address"]
```
#### File: digitalocean-inventory/tests/test_formatter.py
```python
import os
import pytest
from digitalocean_inventory.exceptions import DropletNameError
from digitalocean_inventory.formatter import Formatter
@pytest.fixture()
def formatter():
return Formatter(
project="project",
env="env",
ssh_dir="ssh_dir",
)
def test_project_name(formatter):
assert formatter.project_name == "project env"
def test_droplet_name(formatter):
assert formatter.droplet_name(0) == "project-env-0"
def test_ssh_key_path(formatter):
assert formatter.ssh_key_path(0) == os.path.join(
"ssh_dir", "project-env-0"
)
def test_parse_index(formatter):
assert formatter.parse_index("project-env-0") == 0
assert formatter.parse_index("project-env-10") == 10
with pytest.raises(DropletNameError):
formatter.parse_index("project-en-0")
with pytest.raises(DropletNameError):
formatter.parse_index("project-env-")
with pytest.raises(DropletNameError):
formatter.parse_index("project-env-a")
``` |
{
"source": "JoelLefkowitz/fake-module",
"score": 2
} |
#### File: fake-module/src/fake_module.py
```python
import importlib
import sys
from types import ModuleType, TracebackType
from typing import Optional, Type, Any
from .exceptions import MissingModule
class FakeModule:
name: str
def __init__(self, name: str) -> None:
if name not in sys.modules:
raise MissingModule(name)
self.name = name
def __setattr__(self, name: str, value: Any) -> None:
super().__setattr__(name, value)
setattr(self.module, name, value)
def __enter__(self) -> "FakeModule":
self.purge()
return self
def __exit__(
self,
exctype: Optional[Type[BaseException]],
excinst: Optional[BaseException],
exctb: Optional[TracebackType],
) -> None:
self.restore()
@property
def module(self) -> ModuleType:
return sys.modules[self.name]
def purge(self) -> None:
unchanged = [
"__file__",
"__loader__",
"__package__",
"__path__",
"__spec__",
]
for key in set(self.module.__dict__) - set(unchanged):
delattr(self.module, key)
def restore(self) -> None:
importlib.reload(self.module)
``` |
{
"source": "JoelLefkowitz/mock-file-tree",
"score": 3
} |
#### File: src/models/file_tree.py
```python
import os
from dataclasses import dataclass
from typing import List
from ..utils.paths import child_paths, path_base, path_from_base
from .exceptions import SubtreeNotFound
@dataclass
class FileTree:
base: str
children: List["FileTree"]
@classmethod
def from_paths(cls, *paths: str, base: str = ".") -> "FileTree":
return cls(
base,
[
cls.from_paths(*child_paths(i, list(paths)), base=i)
for i in set(path_base(j) for j in paths)
],
)
def is_descendant(self, path: str) -> bool:
path = os.path.normpath(path)
return (
path == self.base
or path in [i.base for i in self.children]
or any(i.is_descendant(path_from_base(path)) for i in self.children)
)
def get_descendant(self, path: str) -> "FileTree":
if not self.is_descendant(path):
raise SubtreeNotFound(path, self)
path = os.path.normpath(path)
if path == self.base:
return self
if path in [i.base for i in self.children]:
return next(filter(lambda x: x.base == path, self.children))
return next(
i.get_descendant(path_from_base(path))
for i in self.children
if i.is_descendant(path_from_base(path))
)
def listdir(self, path: str = ".") -> List[str]:
return [i.base for i in self.get_descendant(path).children]
def path_exists(self, path: str) -> bool:
return self.is_descendant(path)
def path_isdir(self, path: str) -> bool:
return self.is_descendant(path) and len(self.get_descendant(path).children) > 0
def path_isfile(self, path: str) -> bool:
return self.is_descendant(path) and len(self.get_descendant(path).children) == 0
``` |
{
"source": "JoelLefkowitz/pipes",
"score": 3
} |
#### File: pipes/simple_pipes/pipes.py
```python
import subprocess
from io import TextIOWrapper
def pipe_call(call, cwd=".", break_str=None):
wrapper = TextIOWrapper(
subprocess.Popen(
call, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd
).stdout,
encoding="utf-8",
)
for line in wrapper:
else:
print(line, end="")
if break_str and break_str in line:
wrapper.detach()
return wrapper
def pipe_capture(call, cwd="."):
lines = subprocess.check_output(call, cwd=cwd)
return lines.decode("utf-8").split("\n")[:-1]
``` |
{
"source": "JoelLefkowitz/poetry-pdf",
"score": 2
} |
#### File: poetry-pdf/poetry_pdf/builder.py
```python
import os
from typing import List, Optional
import pdfkit
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from .utils import create_parent_dirs, parent_dirs_exist
def prepare_html(
title: str,
author: Optional[str],
plaintext_lines: List[str],
stylesheets: List[str],
) -> str:
env = Environment(
loader=FileSystemLoader(
os.path.normpath(os.path.join(__file__, "../templates/"))
),
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True,
undefined=StrictUndefined,
)
render_context = {
"title": title,
"author": author,
"plaintext_lines": plaintext_lines,
"stylesheets": stylesheets,
}
return env.get_template("template.html").render(render_context)
def build(
output_path: str,
title: str,
author: Optional[str],
plaintext_lines: List[str],
stylesheets: List[str],
) -> None:
html = prepare_html(title, author, plaintext_lines, stylesheets)
options = {"--enable-local-file-access": ""}
if not parent_dirs_exist(output_path):
create_parent_dirs(output_path)
pdfkit.from_string(
html, output_path, css=stylesheets, options=options
)
``` |
{
"source": "JoelLefkowitz/portfolio",
"score": 2
} |
#### File: management/celery/projects.py
```python
import logging
from django.db.models import Q
from safe_environ import from_env
from celery import Celery
from projects.models import Project
from projects.management.scrapers.projects import fetch_public_projects
logger = logging.getLogger(__name__)
app = Celery()
@app.task
def update_projects():
owner = from_env("GITHUB_HANDLE")
names = lambda x: {i.name if hasattr(i, "name") else i["name"] for i in x}
try:
projects = fetch_public_projects(owner)
existing = Project.objects.filter(Q(name__in=names(projects)))
former = Project.objects.filter(~Q(name__in=names(projects)))
logger.info(
{
"retrieved": names(projects),
"create": names(projects) - names(existing),
"update": names(existing),
"delete": names(former),
}
)
for i in projects:
Project.objects.update_or_create(name=i["name"], defaults=i)
former.delete()
except Exception as e:
logger.critical(e)
```
#### File: management/commands/projects.py
```python
from django.core.management.base import BaseCommand
from projects.management.celery.projects import update_projects
class Command(BaseCommand):
help = "Update projects"
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
update_projects()
```
#### File: management/parsers/exceptions.py
```python
class MissingFields(Exception):
def __init__(self, url, fields):
super().__init__("")
``` |
{
"source": "JoelLefkowitz/pyimport",
"score": 2
} |
#### File: pyimport/pyimport/get_resource.py
```python
import inspect
import os
import sys
from dataclasses import dataclass, field
from types import ModuleType
from typing import Any, List
from .exceptions import ModuleDoesNotExist, ObjectDoesNotExist
def get_module(rel_module_path: str) -> ModuleType:
frame = inspect.stack()[1]
source_path = frame[0].f_code.co_filename
module_path = os.path.join(source_path, rel_module_path)
if not os.path.exists(module_path):
raise ModuleDoesNotExist(rel_module_path)
module_dir = os.path.dirname(os.path.normpath(module_path))
module_name = os.path.basename(os.path.normpath(module_path))
if module_name.endswith(".py"):
module_name = module_name[:-3]
with PathControl(module_dir):
module = __import__(module_name)
return module
def get_object(object_name: str, rel_module_path: str) -> Any:
module = get_module(rel_module_path)
if not hasattr(module, object_name):
raise ObjectDoesNotExist(object_name)
else:
return getattr(module, object_name)
@dataclass
class PathControl:
module_dir: str
exit_path: List[str] = field(default_factory=list)
def __enter__(self) -> None:
self.exit_path = sys.path.copy()
sys.path.append(self.module_dir)
def __exit__(self, type, value, tb) -> None:
sys.path = self.exit_path.copy()
``` |
{
"source": "JoelLefkowitz/quickdocs",
"score": 3
} |
#### File: quickdocs/state/paths.py
```python
import os
from dataclasses import dataclass
from typing import Any, Mapping
from quickdocs.utils.paths import path_base
@dataclass
class Paths:
template_path: str
template_dir: str
output_dir: str
@property
def dct(self) -> Mapping[str, Any]:
"""
Dictionary representation of this instance.
Returns:
Dict: Dictionary representation.
"""
return self.__dict__
@property
def local_path(self) -> str:
"""
Relative path of this instance from the template directory.
Returns:
str: Relative path.
"""
return os.path.relpath(self.template_path, self.template_dir)
@property
def output_path(self) -> str:
"""
Relative path of this instance's output path.
Returns:
str: Relative path.
"""
return os.path.join(self.output_dir, self.local_path)
@property
def first_subdir(self) -> str:
"""
Base path of this instance's path from the template directory.
Returns:
str: Base path.
"""
return path_base(self.local_path)
```
#### File: quickdocs/utils/dicts.py
```python
from typing import Any, Dict, Mapping
def merge_dicts(*args: Mapping[Any, Any]) -> Dict[Any, Any]:
"""
Successively merge any number of dictionaries.
>>> merge_dicts({'a': 1}, {'b': 2})
{'a': 1, 'b': 2}
>>> merge_dicts({'a': 1}, {'a': 2}, {'a': 3})
{'a': 3}
Returns:
Dict: Dictionary of merged inputs.
"""
out = {} # type: Dict[Any, Any]
for dct in args:
out = {**out, **dct}
return out
```
#### File: quickdocs/utils/files.py
```python
import json
from typing import Any, Dict
import ruamel.yaml
def parse_json(path: str) -> Dict[str, Any]:
"""
Parse a json file.
Args:
path (str): Json file path.
Returns:
Dict: Dictionary of parsed data.
"""
with open(path, "r") as stream:
return json.load(stream)
def parse_yaml(path: str) -> Dict[str, Any]:
"""
Parse a yaml file.
Args:
path (str): Yaml file path.
Returns:
Dict: Dictionary of parsed data.
"""
with open(path, "r") as stream:
return ruamel.yaml.safe_load(stream)
```
#### File: quickdocs/tests/test_inputs.py
```python
import io
import json
from typing import Dict
from unittest.mock import mock_open, patch
import pytest
import ruamel.yaml
from quickdocs.exceptions.files import UnrecognizedFormat
from quickdocs.exceptions.inputs import MissingInputs
from quickdocs.state.inputs import Inputs
def test_inputs_from_file(minimal_inputs: Dict[str, str]) -> None:
"""
Test creation of Inputs instance from a file.
"""
read_data = json.dumps(minimal_inputs)
with patch("builtins.open", mock_open(None, read_data)):
Inputs.from_file("inputs.json")
buffer = io.BytesIO()
yaml = ruamel.yaml.YAML()
yaml.dump(minimal_inputs, buffer)
read_data = buffer.getvalue().decode("utf-8")
with patch("builtins.open", mock_open(None, read_data)):
Inputs.from_file("inputs.yml")
with patch("builtins.open", mock_open()):
with pytest.raises(UnrecognizedFormat):
Inputs.from_file("inputs.xyz")
def test_inputs_constraints(minimal_inputs: Dict[str, str]) -> None:
"""
Test Inputs state class properties.
"""
missing_inputs = minimal_inputs.copy()
missing_inputs.pop("project")
read_data = json.dumps(missing_inputs)
with patch("builtins.open", mock_open(None, read_data)):
with pytest.raises(MissingInputs):
Inputs.from_file("inputs.json")
redundant_inputs = minimal_inputs.copy()
redundant_inputs["spirit_animal"] = "Shark"
read_data = json.dumps(redundant_inputs)
with patch("builtins.open", mock_open(None, read_data)):
with pytest.warns(UserWarning):
Inputs.from_file("inputs.json")
@pytest.fixture
def minimal_inputs() -> Dict[str, str]:
"""
Minimal Inputs state class fixture.
Returns:
Dict[str, str]: Minimal inputs.
"""
return {
"project": "",
"version": "",
"author": "",
"html_title": "",
"github_url": "",
}
``` |
{
"source": "JoelLefkowitz/randutils",
"score": 4
} |
#### File: randutils/randutils/generate.py
```python
from random import SystemRandom
from string import ascii_uppercase, digits
import numpy as np
def randint(maximum):
return np.random.randint(0, maximum)
def random_number_str(length=1, minimum=0, maximum=None):
maximum = maximum or 10 ** length
return str(np.random.randint(minimum, maximum)).zfill(length)
def random_string(n):
alphabet = ascii_uppercase + digits
return "".join(SystemRandom().choice(alphabet) for _ in range(n))
def random_phone(area_code="27"):
return f"{area_code}{random_number_str(10)}"
def random_birthday():
return f"{random_number_str(4, 1920, 2021)}-{random_number_str(2, 1, 13)}-{random_number_str(2, 1, 29)}"
```
#### File: randutils/randutils/lists.py
```python
import numpy as np
from .chance import by_chance
from .exceptions import EmptyListError
def pop_random_entry(lst):
if not lst:
raise EmptyListError
index = np.random.randint(0, len(lst))
return lst.pop(index)
def pick_random_entry(lst):
if not lst:
raise EmptyListError
index = np.random.randint(0, len(lst))
return lst[index]
def randomly_filter(lst, weight=0.1):
return [i for i in lst if by_chance(weight)]
def scramble(lst):
return sorted(lst, key=lambda x: random.random())
``` |
{
"source": "JoelLefkowitz/safe-environ",
"score": 3
} |
#### File: safe-environ/safe_environ/environ.py
```python
import os
from typing import Any
from .exceptions import InvalidEnvVar, MissingEnvVar
def from_env(name: str) -> Any:
if name not in os.environ:
raise MissingEnvVar(name)
value = os.environ.get(name)
if value == None:
raise InvalidEnvVar(name)
return value
```
#### File: safe-environ/safe_environ/exceptions.py
```python
class MissingEnvVar(Exception):
def __init__(self, env_var_name: str) -> None:
super().__init__(
f"The environment variable '{env_var_name}' is not set"
)
class InvalidEnvVar(Exception):
def __init__(self, env_var_name: str) -> None:
super().__init__(
f"The environment variable '{env_var_name}' is empty or None"
)
``` |
{
"source": "JoelLefkowitz/templ8",
"score": 2
} |
#### File: templ8/e2e/models.py
```python
import os
from dataclasses import dataclass
from shutil import rmtree
from typing import Iterable, Type, TypeVar
from src.utils.strings.paths import full_listdir, path_head
T = TypeVar("T", bound="TestCase") # pylint: disable = C0103
@dataclass
class TestCase:
__test__ = False
root: str
@classmethod
def infer_cases(cls: Type[T]) -> Iterable[T]:
parent = os.path.normpath(os.path.join(__file__, "..", "cases"))
for path in full_listdir(parent):
case = cls(path)
if os.path.isdir(case.root) and os.path.isdir(case.expected):
if os.path.exists(case.actual):
rmtree(case.actual)
yield case
@property
def name(self) -> str:
return path_head(self.root)
@property
def templates(self) -> str:
return self.from_root("templates")
@property
def settings(self) -> str:
return self.from_root(".template.yml")
@property
def expected(self) -> str:
return self.from_root("expected")
@property
def actual(self) -> str:
return self.from_root("actual")
def from_root(self, path: str) -> str:
return os.path.join(self.root, path)
```
#### File: src/models/cli.py
```python
from argparse import ArgumentParser
from dataclasses import dataclass
from logging import DEBUG, INFO, WARNING
from typing import Any, Dict, List, Optional, Type, TypeVar
from .. import __version__
from ..utils.extensions.format_help import format_help
from ..utils.extensions.store_kv import StoreKV
from ..utils.parsers.dataclasses import pick_into_dataclass
T = TypeVar("T", bound="CLI") # pylint: disable = C0103
parser = ArgumentParser(
"templ8",
description=f"Templ8 {__version__}",
)
format_help(parser)
parser.add_argument(
"--output",
help="output directory.",
action="store",
)
parser.add_argument(
"--settings-file",
help="input file path.",
action="store",
)
parser.add_argument(
"--dry-run", help="don't make any changes.", action="store_true", default=False
)
parser.add_argument(
"--silent", help="don't output any logs.", action="store_true", default=False
)
parser.add_argument(
"--verbose", help="output verbose logs.", action="store_true", default=False
)
parser.add_argument(
"--debug", help="output debug logs.", action="store_true", default=False
)
parser.add_argument(
"--clear-top-level",
help="remove top level files.",
action="store_true",
default=None,
)
parser.add_argument(
"--logical-grouping",
help="flatten render context.",
action="store_true",
default=None,
)
parser.add_argument(
"--skip-core-templates",
help="skip core templates.",
action="store_true",
default=None,
)
parser.add_argument(
"--collection-sources",
help="where to look for collections.",
action="append",
)
parser.add_argument(
"--collections",
help="collection names.",
action="append",
)
parser.add_argument(
"--includes",
help="path names to include.",
action="append",
)
parser.add_argument(
"--excludes",
help="path names to exclude.",
action="append",
)
parser.add_argument(
"--loader-paths",
help="where to look for Jinja includes.",
action="append",
)
parser.add_argument(
"--render-context",
help="jinja context variables.",
action=StoreKV,
)
@dataclass
class CLI:
# Some properties are Optional since we want
# to be able to detect when they are not set.
output: Optional[str]
settings_file: Optional[str]
dry_run: bool
silent: bool
verbose: bool
debug: bool
clear_top_level: Optional[bool]
logical_grouping: Optional[bool]
skip_core_templates: Optional[bool]
collection_sources: List[str]
collections: List[str]
includes: List[str]
excludes: List[str]
loader_paths: List[str]
render_context: Dict[str, Any]
@classmethod
def parse(cls: Type[T]) -> T:
return pick_into_dataclass(cls, parser.parse_args())
@property
def loglevel(self) -> int:
if self.verbose or self.debug:
return DEBUG
if self.silent:
return WARNING
return INFO
```
#### File: src/models/inputs.py
```python
from dataclasses import dataclass
from typing import Type, TypeVar
from ..models.cli import CLI
from ..models.flags import Flags
from ..models.settings import Settings
T = TypeVar("T", bound="Inputs") # pylint: disable = C0103
@dataclass
class Inputs:
flags: Flags
settings: Settings
@classmethod
def parse(cls: Type[T], cli: CLI) -> T:
return cls(Flags.parse(cli), Settings.parse(cli))
```
#### File: src/models/rename.py
```python
from dataclasses import dataclass
from typing import Dict, Type, TypeVar, Union
from ..models.exceptions import InvalidRename
T = TypeVar("T", bound="Rename") # pylint: disable = C0103
@dataclass
class Rename:
segment: str
token: str
@classmethod
def parse(cls: Type[T], obj: Union[str, Dict[str, str]]) -> T:
if isinstance(obj, str):
return cls(segment=obj, token=obj)
if not isinstance(obj, dict) or "segment" not in obj:
raise InvalidRename(obj)
return cls(obj["segment"], obj["token"] if "token" in obj else obj["segment"])
```
#### File: utils/extensions/progress_handler.py
```python
import sys
from logging import DEBUG, Logger, LogRecord, StreamHandler
from typing import Any
import colorama
from ...utils.collections.dicts import key_is_true
from ...utils.strings.formatters import progress
class ProgressHandler(StreamHandler):
parent: Logger
overwrite_previous: bool
def __init__(self, parent: Logger, *args: Any, **kwargs: Any) -> None:
colorama.init()
self.parent = parent
self.overwrite_previous = False
super().__init__(*args, **kwargs)
@staticmethod
def overwrite() -> None:
sys.stdout.write("\033[F\033[K")
sys.stdout.flush()
def emit(self, record: LogRecord) -> None:
if self.overwrite_previous:
self.overwrite()
if (
key_is_true(record.__dict__, "progress")
and self.parent.getEffectiveLevel() > DEBUG
):
record.msg = progress(record.msg)
self.overwrite_previous = True
else:
self.overwrite_previous = False
super().emit(record)
```
#### File: utils/parsers/exceptions.py
```python
from typing import Set
class MissingDataclassArguments(Exception):
def __init__(self, missing: Set[str]) -> None:
super().__init__(
f"Failed to map arguments into the dataclass, missing: {missing}."
)
class FileParsingError(Exception):
def __init__(self, path: str) -> None:
super().__init__(f"Failed to parse {path}.")
class UnsupportedFormat(Exception):
def __init__(self, path: str, markup: str) -> None:
super().__init__(f"Unable to parse {path}. Was expecting {markup}.")
```
#### File: utils/strings/pipes.py
```python
def pad_in(string: str, space: int) -> str:
"""
>>> pad_in('abc', 0)
'abc'
>>> pad_in('abc', 2)
' abc'
"""
return "".join([" "] * space) + string
def without_ends(string: str) -> str:
"""
>>> without_ends('abc')
'b'
"""
return string[1:-1]
def without_first(string: str) -> str:
"""
>>> without_first('abc')
'bc'
"""
return string[1:]
def without_last(string: str) -> str:
"""
>>> without_last('abc')
'ab'
"""
return string[:-1]
def quote(string: str) -> str:
"""
>>> quote('abc')
'\"abc\"'
>>> quote('"abc"')
'\"abc\"'
"""
return string if string.startswith('"') and string.endswith('"') else f'"{string}"'
def handle(string: str) -> str:
"""
>>> handle('https://github.com/user/repo')
'user/repo'
>>> handle('user/repo')
'user/repo'
>>> handle('')
''
"""
splt = string.split("/")
return "/".join(splt[-2:] if len(splt) >= 2 else splt)
def pluralize(count: int, unit: str) -> str:
"""
Pluralize a count and given its units.
>>> pluralize(1, 'file')
'1 file'
>>> pluralize(2, 'file')
'2 files'
>>> pluralize(0, 'file')
'0 files'
"""
return f"{count} {unit}{'s' if count != 1 else ''}"
def remove_prefix(string: str, prefix: str) -> str:
"""
>>> remove_prefix('abc', 'ab')
'c'
>>> remove_prefix('abc', 'd')
'abc'
>>> remove_prefix('abc', 'abcd')
'abc'
"""
return string[len(prefix) :] if string.startswith(prefix) else string
``` |
{
"source": "JoelLefkowitz/yummy-cereal",
"score": 2
} |
#### File: tests/behaviour_tests/test_annotations_parsing.py
```python
from typing import Dict
import pytest
from pytest_bdd import given, scenario, then, when
from yummy_cereal import AnnotationsParser
from ..models.menus.course import Course
from ..models.menus.dish import Dish
from ..models.menus.menu import Menu
@pytest.fixture()
def bdd_context() -> Dict:
return {}
@scenario(
"annotations_parsing.feature", "Parsing a menu from a yaml file"
)
def test_parsing_a_menu_from_a_yaml_file():
"""Parsing a menu from a yaml file."""
@given("I have a serialized menu")
def i_have_a_serialized_menu():
"""I have a serialized menu."""
@given("I have annotated menu classes")
def i_have_annotated_menu_classes():
"""I have annotated menu classes."""
@when("I create a menu parser")
def i_create_a_menu_parser(bdd_context: Dict):
"""I create a menu parser."""
dish_parser = AnnotationsParser(Dish)
course_parser = AnnotationsParser(
Course, specified_parsers={Dish: dish_parser}
)
bdd_context["menu_parser"] = AnnotationsParser(
Menu,
specified_parsers={Course: course_parser, Dish: dish_parser},
)
@when("I parse the serialized menu")
def i_parse_the_serialized_menu(
bdd_context: Dict, serialized_menu: Menu
):
"""I parse the serialized menu."""
menu_parser = bdd_context["menu_parser"]
bdd_context["parsed_menu"] = menu_parser(serialized_menu)
@then("I recieve a menu object")
def i_recieve_a_menu_object(bdd_context: Dict, parsed_menu: Menu):
"""I recieve a menu object."""
assert bdd_context["parsed_menu"] == parsed_menu
```
#### File: models/people/house.py
```python
from dataclasses import dataclass
from typing import Dict
from yummy_cereal.parsers.exceptions import MissingFieldError
from yummy_cereal.serializers.exceptions import MissingFieldError
@dataclass
class House:
number: int
street: str
def house_parser(config: Dict) -> House:
if not "number" in config or not "street" in config:
raise MissingFieldError()
return House(config["number"], config["street"])
@dataclass
class House:
number: int
street: str
```
#### File: parsers/validated_parser/exceptions.py
```python
from typing import Any
from ...utils.prettifiers import prettify_dict
from ..exceptions import ParsingError
class ParserValidationFailed(ParsingError):
def __init__(self, obj: Any) -> None:
msg = "The given object data failed a parser validation check"
context = prettify_dict({"Given object": obj})
super().__init__(f"{msg}\n{context}")
```
#### File: parsers/validated_parser/validated_parser.py
```python
from dataclasses import dataclass
from typing import Dict, Generic, List, TypeVar
from ...protocols import Parser, Validator
from .exceptions import ParserValidationFailed
T = TypeVar("T")
@dataclass
class ValidatedParser(Generic[T]):
parser: Parser[T]
validators: List[Validator]
def __call__(self: T, config: Dict) -> T:
"""
Runs each of self.validatiors the calls self.parser on success
Args:
config (Dict): Configuration to be parsed
Raises:
ValidationFailed: One or more validators will return False
Returns:
T: Parsed object
"""
for validator in self.validators:
if not validator(config):
raise ParserValidationFailed(config)
return self.parser(config)
```
#### File: yummy_cereal/serializers/exceptions.py
```python
from typing import Any, Dict
from ..utils.prettifiers import prettify_dict
class MissingFieldError(Exception):
def __init__(self, field_name: str, annotations: Dict) -> None:
msg = (
"Failed to parse field\nNo matching field found or default value provided for an annotation\n"
+ prettify_dict(
{
"Missing field": field_name,
"Annotations": annotations,
}
)
)
super().__init__(msg)
class FieldSerializingError(Exception):
def __init__(
self, field_parser: Any, raw_field_value: Any
) -> None:
msg = "Failed to parse field\n" + prettify_dict(
{
"Serializer": field_parser,
"Field value": raw_field_value,
}
)
super().__init__(msg)
class ListFieldSerializingError(FieldSerializingError):
def __init__(
self, inner_field_parser: Any, raw_field_value: Any
) -> None:
super().__init__(inner_field_parser, raw_field_value)
class DictFieldSerializingError(FieldSerializingError):
def __init__(
self, inner_field_parser: Any, raw_field_value: Any
) -> None:
super().__init__(inner_field_parser, raw_field_value)
class SerializerValidationFailed(Exception):
def __init__(self, obj: Any) -> None:
msg = (
"The given object data failed a parser validation check\n"
+ prettify_dict({"Given object": obj})
)
super().__init__(msg)
```
#### File: yummy_cereal/utils/prettifiers.py
```python
from typing import Dict
def prettify_dict(dct: Dict) -> str:
return "/n".join(f"{k}: {v}" for k, v in dct.items())
``` |
{
"source": "joellehanna97/wikipedia-nlp",
"score": 2
} |
#### File: joellehanna97/wikipedia-nlp/cat_bert_model.py
```python
import os, re
import tensorflow as tf
import pandas as pd
import tensorflow_hub as hub
from tqdm import tqdm
import numpy as np
from bert.tokenization import FullTokenizer
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import load_model
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import classification_report
"""# Tokenize
Next, tokenize our text to create `input_ids`, `input_masks`, and `segment_ids`
"""
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def create_tokenizer_from_hub_module(bert_path):
"""Get the vocab file and casing info from the Hub module."""
bert_layer = hub.KerasLayer(bert_path, trainable=False)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = FullTokenizer(vocab_file, do_lower_case)
return tokenizer, bert_layer
def convert_single_example(tokenizer, example, max_seq_length=256):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
input_ids = [0] * max_seq_length
input_mask = [0] * max_seq_length
segment_ids = [0] * max_seq_length
label = 0
return input_ids, input_mask, segment_ids, label
tokens_a = tokenizer.tokenize(example.text_a)
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0 : (max_seq_length - 2)]
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids, example.label
def convert_examples_to_features(tokenizer, examples, max_seq_length=256):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
input_ids, input_masks, segment_ids, labels = [], [], [], []
for example in tqdm(examples, desc="Converting examples to features"):
input_id, input_mask, segment_id, label = convert_single_example(
tokenizer, example, max_seq_length
)
input_ids.append(input_id)
input_masks.append(input_mask)
segment_ids.append(segment_id)
labels.append(label)
return (
np.array(input_ids),
np.array(input_masks),
np.array(segment_ids),
np.array(labels).reshape(-1, 1),
)
def convert_text_to_examples(texts, labels):
"""Create InputExamples"""
InputExamples = []
for text, label in zip(texts, labels):
InputExamples.append(
InputExample(guid=None, text_a=" ".join(text), text_b=None, label=label)
)
return InputExamples
# Build model
def build_model(bert_layer, max_seq_length, n_classes):
act = 'softmax'
loss = 'categorical_crossentropy'
in_id = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="input_ids")
in_mask = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="input_masks")
in_segment = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="segment_ids")
bert_inputs = [in_id, in_mask, in_segment]
pooled_output, sentence_output = bert_layer(bert_inputs)
flatten = tf.keras.layers.Flatten()(pooled_output)
dense_1 = tf.keras.layers.Dense(512, activation='relu')(flatten)
dropout_1 = tf.keras.layers.Dropout(0.5)(dense_1)
dense_2 = tf.keras.layers.Dense(256, activation='relu')(dropout_1)
dense_3 = tf.keras.layers.Dense(128, activation='relu')(dense_2)
dropout_2 = tf.keras.layers.Dropout(0.4)(dense_3)
dense_4 = tf.keras.layers.Dense(64, activation='relu')(dropout_2)
pred = tf.keras.layers.Dense(n_classes, activation=act)(dense_4)
model = tf.keras.models.Model(inputs=bert_inputs, outputs=pred)
adam = Adam(lr=0.0003)
model.compile(loss=loss, optimizer=adam, metrics=['accuracy'])
model.summary()
return model
def load_dataset(bert_path, max_seq_length, data_path, text_col, label_col, split=[0.80, 0.10, 0.10]):
df = pd.read_csv(data_path)
df = df.sample(frac=1).reset_index(drop=True)
text = df[text_col].tolist()
texts = [' '.join(t.split()[0:max_seq_length]) for t in text]
texts = np.array(texts, dtype=object)[:, np.newaxis]
labels = [1 for i in range(len(text))]
# instantiate tokenizer and bert model through tf-hub
print('Instantiating tokenizer and bert model through tf-hub...')
tokenizer, bert_layer = create_tokenizer_from_hub_module(bert_path)
# Convert data to InputExample format
print('Converting inputs...')
examples = convert_text_to_examples(texts, labels)
# Convert to features
(all_input_ids, all_input_masks, all_segment_ids, all_labels
) = convert_examples_to_features(tokenizer, examples, max_seq_length=max_seq_length)
from sklearn.preprocessing import LabelEncoder
labels = df[label_col].to_list()
le = LabelEncoder()
le.fit(labels)
n_classes = len(list(le.classes_))
all_labels = le.transform(labels)
all_labels = tf.keras.utils.to_categorical(all_labels)
if (np.array(split).sum() != float(1)):
split = [0.80, 0.10, 0.10]
else:
val_size = split[1]
test_size = split[2]/split[0]
print('Splitting dataset...')
train_input_ids, val_input_ids, train_input_masks, val_input_masks, train_segment_ids, val_segment_ids, train_labels, val_labels = train_test_split(all_input_ids, all_input_masks, all_segment_ids, all_labels, test_size=val_size)
train_input_ids, test_input_ids, train_input_masks, test_input_masks, train_segment_ids, test_segment_ids, train_labels, test_labels = train_test_split(train_input_ids, train_input_masks, train_segment_ids, train_labels, test_size=test_size)
X_train = [train_input_ids, train_input_masks, train_segment_ids]
y_train = train_labels
X_val = [val_input_ids, val_input_masks, val_segment_ids]
y_val = val_labels
X_test = [test_input_ids, test_input_masks, test_segment_ids]
y_test = test_labels
return bert_layer, df, X_train, y_train, X_val, y_val, X_test, y_test, n_classes, list(le.classes_)
def fit_model(model, X_train, y_train, X_val, y_val, epochs, batch_size):
model_name = 'models/bert_wiki.h5'
mcp_save = ModelCheckpoint(model_name, save_best_only=True, monitor='val_loss', mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0.0000000001)
print('Starting training for {} epochs with a batch size of {}. Saving to {}'.format(epochs, batch_size, model_name))
history = model.fit(X_train,
y_train,
validation_data=(X_val, y_val),
epochs=epochs,
batch_size=batch_size,
callbacks=[mcp_save, reduce_lr])
return history, model_name
def evaluate_model(df, class_names, model_name, X_test, y_test):
print('Loading model...')
model = load_model(model_name, custom_objects={'KerasLayer': hub.KerasLayer})
t1 = tf.convert_to_tensor(X_test[0], dtype=tf.int32)
t2 = tf.convert_to_tensor(X_test[1], dtype=tf.int32)
t3 = tf.convert_to_tensor(X_test[2], dtype=tf.int32)
print('Running predictions...')
preds = model.predict([t1, t2, t3])
predictions = np.argmax(preds, axis=1)
y = np.argmax(y_test, axis=1)
report = classification_report(y, predictions, target_names=class_names)
return predictions, y, report
``` |
{
"source": "JoelLigma/Image-Classifier",
"score": 3
} |
#### File: JoelLigma/Image-Classifier/train.py
```python
import numpy as np
import pandas as pd
import json
import torchvision
from torchvision import datasets, transforms, models
import torch
from torch import nn, optim
import torch.nn.functional as F
import time
import PIL
from PIL import Image
import argparse
# import defined functions from functions.py
import functions
from functions import get_input_args, load_and_transform, build_classifier, train, save_model
'''
This file let's the user train a pre-trained neural network on an image dataset.
The user can choose the learning rate, epochs, model architecture, input features, hidden layers,
output_nodes and whether he/she wants to use GPU to train it.
'''
def main():
# user inputs from command line
in_arg = get_input_args()
# load and process data into training, validation and test data sets
trainloader, validationloader, testloader, train_data = load_and_transform(in_arg.data_dir)
# load pre-trained nn and build classifier with user inputs (loss criterion & optimizer are fixed) to create the model
model, criterion, optimizer = build_classifier(in_arg.arch, in_arg.in_features, in_arg.hidden_layers, in_arg.output_size, in_arg.learning_rate)
# Train the model
trained_model = train(in_arg.epochs, trainloader, validationloader, optimizer, model, criterion, in_arg.gpu)
# saving the model
save_model(trained_model, optimizer, in_arg.saving_dir, in_arg.arch, in_arg.learning_rate, in_arg.epochs, train_data)
# Call to main function to run the program
if __name__ == "__main__":
main()
``` |
{
"source": "joelliusp/SpaceHabit",
"score": 3
} |
#### File: SpaceHabit/Experiments/Experiments.py
```python
import ConfigLayer
import ExperimentModule
import DatabaseLayer
import ExperimentalModule2
import datetime
from bson.objectid import ObjectId
import Progression
import Replacer
import ReplacerB
class Experiment(object):
"""just a file to experiment with python"""
def print_hello(self):
print(ExperimentModule.return_something())
def testing_testing(self):
print("This is the non-test version")
def check_db_works():
DatabaseLayer.open_conn()
print(DatabaseLayer.get_open_connection().list_tables())
def python_dick():
mydict = {'name':'joel',
'color':'white'}
print(mydict['name'])
mydict['age'] = 27
print(mydict['age'])
def add_item_to_db():
item = {'name':"John",
'lvl':31}
heros = DatabaseLayer.get_table("heros")
id = heros.insert_one(item).inserted_id
print(id)
def get_item_from_db():
id = ObjectId("5733fa75eceb33173454efd0")
hero = ModelLayer.get_hero_by_id(id)
print(hero['name'])
def also_testing_testing():
print("this is also not a test")
def update_db_item():
hero = ModelLayer.get_hero_by_id(ObjectId("5733fa75eceb33173454efd0"))
changes = {'lvl':145}
ModelLayer.update_hero(hero,changes)
def get_non_existent_item_from_db():
a = DatabaseLayer.get_stuff_by_id(ObjectId("5733fa75ec00000000000000"),"heros")
if not a:
print("not A")
if a is None:
print("A is none")
def print_objectId_with_something_else():
a = "B - " + str(ObjectId("5733fa75eceb33173454efd0"))
print(a)
def playing_with_dt():
dt = datetime.datetime
print("")
def print_converted_numbers():
for i in range(0,53*53):
n = Progression.convert_number_to_naming_number_base(i,53)
if n % 53 == 0:
print(str(i) + ": " + str(n) +"*")
else:
print(str(i) + ": " + str(n))
def print_all_name_combos():
for i in range(0,2756):
s = Progression.generate_zone_name_suffix(i)
print(s)
def test_something_about_ifs():
#if 1==2:
# somebool = True
#if somebool is None:
# print("weird")
if 1 == 1:
someotherbool = True
if someotherbool:
print("makes sense")
testa = None
def g_testa():
global testa
if testa:
print("A:"+ str(testa))
else:
print("anope")
testa = 5
def g_testab():
try:
if testa:
print("B:"+ str(testa))
else:
print("bnope")
testa =7
except UnboundLocalError:
print("error in g_testab")
def g_testc():
global testa
if testa:
print("C:"+ str(testa))
else:
print("cnope")
testa =11
testdict = {}
def g_testa_dict():
global testdict
testdict['testa'] = 5
def g_testab_dict():
try:
if 'testa' in testdict:
print("B:"+ str(testa))
else:
print("bnope")
testdict['testa'] =7
except UnboundLocalError:
print("error in g_testab")
def g_testc_dict():
global testdict
if 'testa' in testdict:
print("C:"+ str(testdict['testa']))
else:
print("cnope")
testdict['testa'] =11
def hopefully_not_a_ref():
a = [4,6,2,3,5]
b = a[0]
a[0] = 7
print(b)
b = 19
print(a[0])
genList = [4,6,2,8,1,4,3,63,0,7]
def trying_out_gens():
yield genList.pop()
def gen2():
print(1)
yield 8
print(2)
yield
print(3)
yield
#for n in trying_out_gens():
# print(n)
def reverse_dict():
dict = {'4': 1,'a':9,'z':11}
dict['7'] = 15
dict['j'] = 2
items = dict.items()
print(type(items))
def is_a_list_in_a_tuple_still_a_ref():
a = [1,2,4,6,7,43,3,5]
b = [3,6,26,2,7,2,6,3,8,4]
t = (a,b)
t[0][1] = 999
print(a[1])
def trying_something_with_the_db_layer_and_globals():
a = DatabaseLayer.get_table("heros")
b = DatabaseLayer.get_table("dailies")
def magic_method_stuff():
a = 5
print(a.__gt__(0))
print(a.__gt__(6))
def testing_closures(a):
b = a
def nested():
print(b)
return nested
def testing_module_scope():
Replacer.replace()
ReplacerB.call_print_one()
def find_out_what_im_getting_back_from_db():
for i in range(0,1000):
obj = {'a':i,'b':i % 10, 'c': "test"}
DatabaseLayer.insert_thing(obj,"test")
s = DatabaseLayer.get_sorted_stuff_by_search({},"test")
pass
find_out_what_im_getting_back_from_db()
```
#### File: SpaceHabitRPG/CoreGame/StartUpRoutine.py
```python
from datetime import datetime, timedelta
from StoryEvents import storyEvents
from Zone import Zone
from Monster import Monster
import random
def build_first_time_checkin_messages(hero):
"""
We want to build a dict that can be sent back to the client when they
create a hero
args:
hero:
an object of the hero model. We want to iterate through the
zone options for where to go next and we need the shipname from
the hero.
return:
a dict with two keys:
key1: storyNotice:
value1: a story element to display at the user
key2: zoneNotice:
value2: the new zone description
key3: zonePrompt:
value3: a small list of dicts that the zone info that the user can
choose
"""
result = {}
result['storyNotice'] = get_intro_with_shipName_included(hero.shipName)
result['zoneNotice'] = hero.zone.get_description()
result['zonePrompt'] = hero.zone.nextZoneReferenceList
return result
def get_intro_with_shipName_included(shipName):
"""
we want to check if the user gave their ship a name and then add it to the
story. And if they didn't, then we'll modify accordingly.
args:
shipName:
string, name of the ship, duh!
return:
modified story element
"""
storyElement = storyEvents['newUser']['description']
if shipName:
return storyElement.format(shipName)
else:
return storyElement.format(storyEvents['noShipNameIntro']['description'])
def check_in_and_get_notices(heroPk,accountPk,checkinTimeUtc,utcOffset):
"""
this should be called on page load and should be used to get any notices
for the use
args:
heroPk:
we want a pymongo objectId for the hero table
accountPk:
we want a pymongo objectId for the hero table
checkinTimeUtc:
this needs to be that the user check in and it needs to be utc
utcOffset:
the time-zone offset from UTC, in minutes, for the current locale.
returns:
we return a dict with two elements: 'story' which will be a list of
huge things of text and 'zoneChoice' which is a list of dicts each
of which contain 'zonePk','description'
but zoneChoice may be none.
"""
from Hero import Hero
from Account import Account
hero = Hero.construct_model_from_pk(heroPk)
account = Account.construct_model_from_pk(accountPk)
lastCheckinTime = account.lastCheckInTime
account.lastCheckInTime = checkinTimeUtc
account.save_changes()
if not lastCheckinTime:
messages = build_first_time_checkin_messages(hero)
hero.save_changes()
return messages
if hero.isInZoneLimbo:
autoPickedZoneDict = random.choice(hero.zone.nextZoneReferenceList)
hero.zone = Zone.construct_model_from_dict(autoPickedZoneDict)
hero.monster = Monster.construct_new_monster(hero.zone.definitionKey,hero.zone.lvl)
timeDiffInDays = (checkinTimeUtc - lastCheckinTime)/(60*60*24)
if timeDiffInDays >= 1:
pass
```
#### File: SpaceHabitRPG/Helpers/AuthenticationLayer.py
```python
from AllDBFields import BaseFields
from AllDBFields import AuthenticationFields
import CryptKeeper
import DatabaseLayer
import re
import cherrypy
def is_login_taken(login):
"""
checks the database to determine if an email address has already been used.
args:
login:
this should be an email address
returns:
a boolean. true if email is already used. false if not.
"""
collection = DatabaseLayer.get_table(AuthenticationFields.COLLECTION_NAME)
if collection.find({AuthenticationFields.USER_LOGIN:login.lower()})\
.count() > 0:
return True
else:
return False
def get_user(login):
"""
gets the user data from the database
args:
this should be an email address
returns:
a dict containing, the user email address in forced lower case,
the users encrypted password, and the user's email address in whatever
case they saved it as.
"""
collection = DatabaseLayer.get_table(AuthenticationFields.COLLECTION_NAME)
return collection.find_one({AuthenticationFields.USER_LOGIN:login.lower()})
def insert_new_user(login,pw,shipName=""):
"""
This used during the create new user process
saves a new user to the database
args:
login:
unique email address supplied by the user
pw:
password supplied by the user.
shipName:
The space ship name is supplied by the user.
Not required but lame if not there.
returns:
a tuple containing the primary key of the newly inserted user from the
User table, the primary key of the account created for the new user,
and the primary key of the hero created for the new user.
"""
from Account import Account
from Hero import Hero
if is_login_taken(login):
raise FileExistsError("That email is already taken")
loginPk = safe_insert_new_user(login,pw)
accountId = Account.create_new_account_in_db(loginPk)
heroId = Hero.construct_new_hero_in_db(accountId,shipName)
return (loginPk,accountId,heroId)
def safe_insert_new_user(login,pw):
"""
This used during the create new user process.
this should be called when doing the actual inserting of a new user.
This encrypts the password before saving.
args:
login:
unique email address supplied by the user
pw:
the unencrypted password supplied by the user.
returns:
the primary key returned from the Database upon insertion of
the new user
"""
safePw = CryptKeeper.encrypt_str(pw)
collection = DatabaseLayer.get_table(AuthenticationFields.COLLECTION_NAME)
id = collection.insert_one({AuthenticationFields.USER_LOGIN:login.lower(),
AuthenticationFields.USER_PASSWORD:safePw,
AuthenticationFields.USER_DESC: login}).inserted_id
return id
def authenticate_user(login,pw):
"""
This is used during the login process.
Determines if the user is trying to log on with a valid login and
also determines if the user is trying to log on with a correct password
args:
login:
email address supplied by the user
pw:
the un<PASSWORD> supplied by the user.
returns:
a dict with two keys, a boolean: 'success' and list: 'errors.'
'success' tells the caller whether or not the login attempt was successful.
If it was 'success' is true, then 'errors' should be an empty list.
If 'success' is false, then 'errors' will have between one
and two elements. Each of them will be id-css selectors for jquery
to use.
"""
user = get_user(login)
resultDict = {'messages':[],'success':False}
if not user:
resultDict['messages'].append("#bad_login")
return resultDict
if not CryptKeeper.password_is_right(\
pw,user[AuthenticationFields.USER_PASSWORD]):
resultDict['messages'].append("#bad_login_pw")
return resultDict
resultDict['success'] = True
return resultDict
def get_loginPk_by_login(validLogin):
"""
args:
validLogin:
I'm gonna assume that this login has already been vetted earlier
in the program.
return:
an objectId to the users collection
"""
collection = DatabaseLayer.get_table(AuthenticationFields.COLLECTION_NAME)
login = collection.find_one({AuthenticationFields.USER_LOGIN: validLogin})
return login[BaseFields.PK_KEY]
def get_accountPk_by_loginPk(loginPk):
"""
args:
loginPk:
an fk to the user collection
return:
an objectId to the account collection
"""
from AllDBFields import AccountDbFields
collection = DatabaseLayer.get_table(AccountDbFields.COLLECTION_NAME)
account = collection.find_one({AccountDbFields.LOGIN_PK_KEY:loginPk})
return account[AccountDbFields.PK_KEY]
def get_heroPk_by_accountPk(accountPk):
"""
args:
userId:
an fk to the account collection
return:
an objectId to the hero collection
"""
from AllDBFields import HeroDbFields
collection = DatabaseLayer.get_table(HeroDbFields.COLLECTION_NAME)
hero = collection.find_one({HeroDbFields.ACCOUNT_PK_KEY:accountPk})
return hero[HeroDbFields.PK_KEY]
def validate_email(email):
"""
This used during the create new user process.
determines if the email supplied is formatted correctly and doesn't
already exist.
args:
email:
An email address supplied by the user
returns:
a dict with two keys, a boolean: 'success' and list: 'errors.'
'success' tells the caller whether or not the login attempt was successful.
If it was 'success' is true, then 'errors' should be an empty list.
If 'success' is false, then 'errors' will have between one
and two elements. Each of them will be id-css selectors for jquery
to use.
"""
if not re.match(r"[^@]+@[^@]+\.[^@]+",email):
return {'success': False,'messages':["#bad_email"]}
if is_login_taken(email):
return {'success': False,'messages':["#taken_email"]}
return {'success': True,'messages':["#good_email"]}
def check_all_validations_for_new_login(email1,email2,pw1,pw2,shipName):
"""
This used during the create new user process.
This method calls other validation methods and baically determines
if any of the info that the user entered was illegal.
args:
all args should be strings and less than 256 characters else
this will return invalid.
email1:
this should be input that will also pass the
validate_email test
email2:
this should match email1.
pw1:
this only needs to pass any password complexity requirements
that have been added to the method. Currently the only
requirement is that the password must be at least 6
characters.
pw2:
this should match pw1:
returns:
a list of violations that either the user's email or password
commits. An empty list implies that everything is dandy.
"""
flags = []
if len(email1) <= 256:
emailValidationResult = validate_email(email1)
if not emailValidationResult['success']:
flags.extend(emailValidationResult['messages'])
if email1 != email2:
flags.append("#mismatched_email")
else:
if len(email1) > 256:
flags.append("#email1_too_long")
if len(email2) > 256:
flags.append("#email2_too_long")
if len(pw1) <= 256:
if len(pw1) < 6:
flags.append("#short_pw")
if pw1 != pw2:
flags.append("#mismatched_pw")
else:
if len(pw1) > 256:
flags.append("pw1_too_long")
if len(pw2) > 256:
flags.append("pw2_too_long")
if len(shipName) > 256:
flags.append("#shipname_too_long")
return flags
#disableAuthenticationRedirects should only ever be used in testing.
#Never in production
disableAuthenticationRedirects = False
def redirect_unauthenticated():
"""
a cherrypy decororator. Place the decorator infront of controller
methods that return parts of the website which the user needs to be
logged in to see. If they are not logged in, redirect them to the
login page.
"""
if disableAuthenticationRedirects:
return
username = cherrypy.session.get(BaseFields.SESSION_KEY)
if not username:
raise cherrypy.HTTPRedirect("/login")
def redirect_authenticated():
"""
a cherrypy decororator. Place the decorator infront of controller
methods that return the login part of the website.
If they are already logged in, redirect them to the main page.
"""
if disableAuthenticationRedirects:
return
username = cherrypy.session.get(BaseFields.SESSION_KEY)
if username:
raise cherrypy.HTTPRedirect("/")
#These are useable as soon as I import AuthenticationLayer
cherrypy.tools.redirect_unauthenticated = cherrypy.Tool("before_handler",redirect_unauthenticated)
cherrypy.tools.redirect_authenticated = cherrypy.Tool("before_handler",redirect_authenticated)
```
#### File: Tests/JSTests/LoginJSTest.py
```python
from SpaceUnitTest import SpaceUnitTest
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import SpaceHabitServer
import threading
import cherrypy
import time
import requests
import AuthenticationLayer
import DatabaseLayer
import DatabaseTestSetupCleanup as dbHelp
class Test_LoginJSTest(SpaceUnitTest):
@classmethod
def setUpClass(cls):
DatabaseLayer.isUnitTestMode = True
cls.server = SpaceHabitServer.HabitServer()
cls.server.start()
ticks = 0
while cherrypy.engine.state != cherrypy.engine.states.STARTED:
time.sleep(1)
ticks += 1
if ticks >= 10:
raise TimeoutError("ran out of time")
return super().setUpClass()
@classmethod
def tearDownClass(cls):
dbHelp.clean_up()
cls.server.stop()
ticks = 0
while cherrypy.engine.state != cherrypy.engine.states.STOPPED:
time.sleep(1)
ticks += 1
if ticks >= 10:
raise TimeoutError("ran out of time")
return super().tearDownClass()
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(5)
self.driver.get("http://127.0.0.1:8080")
self.input1 = self.driver.find_element_by_xpath("//input[@name='email_input_1']")
self.input2 = self.driver.find_element_by_xpath("//input[@name='email_input_2']")
self.pw1 = self.driver.find_element_by_xpath("//input[@name='pw_input_1']")
self.pw2 = self.driver.find_element_by_xpath("//input[@name='pw_input_2']")
self.ship = self.driver.find_element_by_xpath("//input[@name='ship_input']")
self.newUserModal = self.driver.find_element_by_id("new_user_box")
self.pwModal = self.driver.find_element_by_id("forgotten_pw_box")
return super().setUp()
def tearDown(self):
self.driver.quit()
return super().tearDown()
def open_new_user_box(self):
clickElem = self.driver.find_element_by_id("create_account")
clickElem.click()
def test_clearNewAccountWindow(self):
self.open_new_user_box()
self.input1.send_keys("aaaaa")
self.input2.send_keys("bbbbb")
self.pw1.send_keys("cccc")
self.pw2.send_keys("dddd")
self.ship.send_keys("eeee")
self.driver.execute_script("clearNewAccountWindow();")
self.assertEqual(self.input1.get_attribute('value'),"")
self.assertEqual(self.input2.get_attribute('value'),"")
self.assertEqual(self.pw1.get_attribute('value'),"")
self.assertEqual(self.pw2.get_attribute('value'),"")
self.assertEqual(self.ship.get_attribute('value'),"")
elem = self.driver.find_element_by_id("bad_email")
self.assertFalse(elem.is_displayed())
elem = self.driver.find_element_by_id("taken_email")
self.assertFalse(elem.is_displayed())
elem = self.driver.find_element_by_id("mismatched_email")
self.assertFalse(elem.is_displayed())
elem = self.driver.find_element_by_id("good_email")
self.assertFalse(elem.is_displayed())
elem = self.driver.find_element_by_id("short_pw")
self.assertFalse(elem.is_displayed())
elem = self.driver.find_element_by_id("mismatched_pw")
self.assertFalse(elem.is_displayed())
def test_createAccountClick(self):
elem = self.driver.find_element_by_id("new_user_box")
self.assertFalse(elem.is_displayed())
self.driver.execute_script("createAccountClick();")
self.assertTrue(elem.is_displayed())
def test_forgotPWClick(self):
self.assertFalse(self.pwModal.is_displayed())
self.driver.execute_script("forgotPWClick();")
self.assertTrue(self.pwModal.is_displayed())
def test_cancelAddClick(self):
self.open_new_user_box()
self.assertTrue(self.newUserModal.is_displayed())
self.driver.execute_script("cancelAddClick();")
self.assertFalse(self.newUserModal.is_displayed())
def test_cancelForgotPassword(self):
self.driver.find_element_by_id("forgot_pw").click()
self.assertTrue(self.pwModal.is_displayed())
self.driver.execute_script("cancelForgotPassword();")
self.assertFalse(self.pwModal.is_displayed())
def test_validateEmailAjaxSuccess(self):
self.open_new_user_box()
self.driver.execute_script(
"validateNewEmailAjaxSuccess("
"{'messages':['#bad_email'],'success':false});")
elem = self.driver.find_element_by_id("bad_email")
self.assertTrue(elem.is_displayed())
self.driver.execute_script(
"validateNewEmailAjaxSuccess("
"{'messages':['#bad_email','#taken_email'],'success':false});")
elem = self.driver.find_element_by_id("bad_email")
self.assertTrue(elem.is_displayed())
elem = self.driver.find_element_by_id("taken_email")
self.assertTrue(elem.is_displayed())
self.driver.execute_script(
"validateNewEmailAjaxSuccess("
"{'messages':['#good_email'],'success':true});")
elem = self.driver.find_element_by_id("bad_email")
self.assertFalse(elem.is_displayed())
elem = self.driver.find_element_by_id("taken_email")
self.assertFalse(elem.is_displayed())
elem = self.driver.find_element_by_id("good_email")
self.assertTrue(elem.is_displayed())
def test_loginAjaxSuccessSession(self):
AuthenticationLayer.disableAuthenticationRedirects = True
self.driver.execute_script("loginAjaxSuccess({'messages':[\"#bad_login\",\"#bad_login_pw\"],'success':false});")
self.assertEqual(self.driver.title,"Login to Space Habit Frontier")
elem = self.driver.find_element_by_id("bad_login")
self.assertTrue(elem.is_displayed())
elem = self.driver.find_element_by_id("bad_login_pw")
self.assertTrue(elem.is_displayed())
self.driver.execute_script("loginAjaxSuccess({'messages':[\"#bad_login_pw\"],'success':false});")
self.assertEqual(self.driver.title,"Login to Space Habit Frontier")
elem = self.driver.find_element_by_id("bad_login")
self.assertFalse(elem.is_displayed())
elem = self.driver.find_element_by_id("bad_login_pw")
self.assertTrue(elem.is_displayed())
self.driver.execute_script("loginAjaxSuccess({'messages':[],'success':true});")
#WebDriverWait(self.driver,10).until(EC.title_is("Space Habit Frontier!"))
self.assertEqual(self.driver.title,"Space Habit Frontier!")
def test_onEmail2InputBlur(self):
self.open_new_user_box()
self.input1.send_keys("[email protected]")
self.input2.send_keys("[email protected]")
self.driver.execute_script("onEmail2InputBlur();")
elem = self.driver.find_element_by_id("mismatched_email")
self.assertTrue(elem.is_displayed())
self.input2.clear()
self.input2.send_keys("[email protected]")
self.assertEqual(self.input1.get_attribute('value'),self.input2.get_attribute('value'))
self.driver.execute_script("onEmail2InputBlur();")
self.assertFalse(elem.is_displayed())
def test_onPw1InputBlur(self):
self.open_new_user_box()
self.pw1.send_keys("123")
self.driver.execute_script("onPw1InputBlur();")
elem = self.driver.find_element_by_id("short_pw")
self.assertTrue(elem.is_displayed())
self.pw1.clear()
self.pw1.send_keys("123456")
self.driver.execute_script("onPw1InputBlur();")
self.assertFalse(elem.is_displayed())
def test_onPw2InputBlur(self):
self.open_new_user_box()
self.pw1.send_keys("abcdef")
self.pw2.send_keys("Abcdef")
self.driver.execute_script("onPw2InputBlur();")
elem = self.driver.find_element_by_id("mismatched_pw")
self.assertTrue(elem.is_displayed())
self.pw2.clear()
self.pw2.send_keys("abcdef")
self.assertEqual(self.pw1.get_attribute('value'),self.pw2.get_attribute('value'))
self.driver.execute_script("onPw2InputBlur();")
self.assertFalse(elem.is_displayed())
if __name__ == '__main__':
unittest.main()
```
#### File: Tests/JSTests/PlaygroundLoginJS.py
```python
from SpaceUnitTest import SpaceUnitTest
from selenium import webdriver
import SpaceHabitServer
import threading
import cherrypy
import time
import requests
import DatabaseLayer
import DatabaseTestSetupCleanup as dbHelp
class Test_PlaygroundLoginJS(SpaceUnitTest):
@classmethod
def setUpClass(cls):
DatabaseLayer.isUnitTestMode = True
cls.server = SpaceHabitServer.HabitServer()
cls.server.start()
ticks = 0
while cherrypy.engine.state != cherrypy.engine.states.STARTED:
time.sleep(1)
ticks += 1
if ticks >= 10:
raise TimeoutError("ran out of time")
return super().setUpClass()
@classmethod
def tearDownClass(cls):
dbHelp.clean_up()
cls.server.stop()
ticks = 0
while cherrypy.engine.state != cherrypy.engine.states.STOPPED:
time.sleep(1)
ticks += 1
if ticks >= 10:
raise TimeoutError("ran out of time")
return super().tearDownClass()
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(5)
self.driver.get("http://127.0.0.1:8080/playground")
self.input1 = self.driver.find_element_by_xpath("//input[@name='input1']")
self.input2 = self.driver.find_element_by_xpath("//input[@name='input2']")
self.input3 = self.driver.find_element_by_xpath("//input[@name='input3']")
self.email1 = self.driver.find_element_by_xpath("//input[@name='email_input_1']")
self.email2 = self.driver.find_element_by_xpath("//input[@name='email_input_2']")
self.pw1 = self.driver.find_element_by_xpath("//input[@name='pw_input_1']")
self.pw2 = self.driver.find_element_by_xpath("//input[@name='pw_input_2']")
self.ship = self.driver.find_element_by_xpath("//input[@name='ship_input']")
return super().setUp()
def tearDown(self):
self.driver.quit()
return super().tearDown()
def test_match(self):
self.input1.send_keys("abcdefg")
self.input2.send_keys("abcdefg")
self.input3.send_keys("abcdefg")
r = self.driver.execute_script("return ValidateInputsMatch('match');")
self.assertTrue(r)
def test_match_two(self):
self.email1.send_keys("abcdefg")
self.email2.send_keys("abcdefg")
r = self.driver.execute_script("return ValidateInputsMatch('match_email');")
self.assertTrue(r)
def test_mismatch_two(self):
self.email1.send_keys("abcdefg")
self.email2.send_keys("abcd")
r = self.driver.execute_script("return ValidateInputsMatch('match_email');")
self.assertFalse(r)
def test_mismatch_first(self):
self.input1.send_keys("abcdefi")
self.input2.send_keys("abcdefg")
self.input3.send_keys("abcdefg")
r = self.driver.execute_script("return ValidateInputsMatch('match');")
self.assertFalse(r)
def test_mismatch_last(self):
self.input1.send_keys("abcdefg")
self.input2.send_keys("abcdefg")
self.input3.send_keys("abcdefj")
r = self.driver.execute_script("return ValidateInputsMatch('match');")
self.assertFalse(r)
def test_match_empty(self):
from selenium.common.exceptions import WebDriverException
self.assertRaises(WebDriverException,lambda :self.driver.execute_script("return ValidateInputsMatch('empty');"))
def test_caseSensitivity(self):
self.input1.send_keys("abcdefg")
self.input2.send_keys("Abcdefg")
self.input3.send_keys("abcdefg")
r = self.driver.execute_script("return ValidateInputsMatch('match',true);")
self.assertFalse(r)
r = self.driver.execute_script("return ValidateInputsMatch('match');")
self.assertTrue(r)
if __name__ == '__main__':
unittest.main()
```
#### File: SpaceHabitRPG/Web Server/SpaceHabitServer.py
```python
from LoginController import LoginController
from ValidationController import ValidationController
from MainController import MainController
import cherrypy
import os
import AuthenticationLayer
class SpaceHabitHome(object):
def __init__(self):
self.testModeEnabled = False
@cherrypy.expose
@cherrypy.tools.redirect_unauthenticated()
def index(self):
return open("HabitFrontend/index.html",encoding="utf-8")
@cherrypy.expose
def playground(self):
import ConfigLayer
if ConfigLayer.get_is_debug():
return open("HabitFrontend/TestPlayground.html",encoding="utf-8")
import threading
class HabitServer(threading.Thread):
def __init__(self,port=8080,host="127.0.0.1"):
self.port = port
self.host = host
subdir = ""
static = ""
if os.name == "nt":
subdir = "\\HabitFrontend"
else:
subdir = "/HabitFrontend"
self.conf = {
'/':{
'tools.sessions.on': True,
'tools.staticdir.root': (os.path.abspath(os.getcwd()) + subdir)
},
'/login':{
},
'/static':{
'tools.staticdir.on': True,
'tools.staticdir.dir': "public"
}
}
threading.Thread.__init__(self)
self.sync = threading.Condition()
self.daemon = True
def run(self):
with self.sync:
cherrypy.server.socket_port = self.port
cherrypy.server.socket_host = self.host
webapp = SpaceHabitHome()
webapp.login = LoginController()
webapp.login.validate = ValidationController()
webapp.main = MainController()
cherrypy.tree.mount(webapp,"/",self.conf)
cherrypy.engine.start()
cherrypy.engine.block()
def stop(self):
with self.sync:
cherrypy.engine.exit()
cherrypy.engine.stop()
def server_starter():
server = HabitServer()
server.start()
if __name__ == "__main__":
server_starter()
``` |
{
"source": "joellord/website",
"score": 2
} |
#### File: website/sync/sync.py
```python
import copy
import fnmatch
import json
import logging
import markdown
from multiprocessing import Pool
import os
import os.path
import re
import sys
from urllib.error import URLError
from urllib.parse import urlparse, urljoin, urlunparse
from bs4 import BeautifulSoup
import click
import git
from jinja2 import Environment
from jinja2 import FileSystemLoader
from ruamel.yaml import YAML
CONTENT_DIR = './content/en/docs'
VAULT_DIR = './content/en/vault'
JS_ASSET_DIR = './assets/js'
TEMPLATE_DIR = './templates'
BASE_FOLDER = os.path.dirname(os.path.abspath(__file__))
DEFAULT_CONFIG_FOLDER = os.path.join(BASE_FOLDER, 'config')
DEFAULT_CACHE_FOLDER = os.path.join(BASE_FOLDER, '.cache')
jinja_env = Environment(loader=FileSystemLoader(TEMPLATE_DIR))
FM_BOUNDARY = re.compile(r"^(?:<!--\n)?-{3,}\s*$(?:\n-->)?", re.MULTILINE)
YAML_SEPARATOR = "---\n"
FOLDER_INDEX = '_index.md'
def doc_config(doc, folder_config, weight=None):
""" Return the target name, folder and header for doc based on folder_config
:param doc: the doc as a gitpython Blob
:param folder_config: a dict with the configuration of the folder the doc
was found in, as specified in the sync config file under `folders`
:params weight: optional weight of the doc. When specified it's set in the
returned header dict
:returns: a tuple (target_filename, target_folder, header), which describes
which files `doc` should be written to, in which folder, with which header
"""
index_file = folder_config.get('index', FOLDER_INDEX)
target_folder = folder_config.get('target', '')
# If the doc name is configured as index, rewrite it to FOLDER_INDEX
target_filename = FOLDER_INDEX if doc.name == index_file else doc.name
# If an header is specified, build it an return it
header_dict = None
if 'header' in folder_config:
header_dict = copy.deepcopy(folder_config['header'])
if weight is not None:
header_dict['weight'] = weight
return target_filename, target_folder, header_dict
def docs_from_tree(tree, include=['*'], exclude=[]):
""" Get matching docs (git blobs) from a git tree
Filter all blobs directly under a tree based on include and
exclude lists. Filters are specified as list of unix style
filename pattern:
(https://docs.python.org/3/library/fnmatch.html) """
return filter(lambda b:
any(fnmatch.fnmatch(b.name, i) for i in include) and
not any(fnmatch.fnmatch(b.name, e) for e in exclude), tree.blobs)
def transform_docs(git_repo, tag, folders, site_folder, base_path, base_url):
""" Transform all folders configured for a tag
:param git_repo: a gitpython Repo object, that points to the source git repo
:param tag: a string that represent the git tag to be used
:param folders: a list of folder names with a dict config each, loaded from
sync config file
:param site_folder: the root folder on disk where files shall be written to
:param base_path: used to rewrite relative links to sync'ed files
:param base_url: used to rewrite relative links to unknown files
"""
# Get the root tree for the requested version from the repo
try:
tag = next(x for x in git_repo.tags if x.name == tag)
except StopIteration:
# When no tag is found try to match a branch (references)
try:
tag = next(x for x in git_repo.references if x.name == tag)
except StopIteration:
logging.error(f'No tag {tag} found in {git_repo}')
sys.exit(1)
# List all relevant blobs based on the folder config
files = []
for folder, folder_config in folders.items():
root = tag.commit.tree.join(folder)
docs = docs_from_tree(
tree=root, include=folder_config.get('include', ['*']),
exclude=folder_config.get('exclude', []))
# zip doc, folder, targer and header so we can process them in parallel later
files.extend([(doc, folder, *doc_config(doc, folder_config, idx))
for idx, doc in enumerate(docs)])
# Build a dict of all valid local links
# This is used by `transfor_line` to identify local links
local_files = {doc.path: (target, target_folder) for
doc, _, target, target_folder, _ in files}
# Build a list of tuple of `transform_doc` parameters
tranform_args = [
(*f, local_files, base_path, base_url, site_folder) for f in files]
with Pool() as pool:
results = pool.starmap(transform_doc, tranform_args)
# Return the list of files transformed
return results
def safe_makedirs(path):
try:
os.makedirs(path, exist_ok=True)
except FileExistsError:
pass
def transform_doc(doc, source_folder, target, target_folder, header,
local_files, base_path, base_url, site_folder):
""" Transform a single doc to the target file
Read a doc (git blob), transform links in it
and writes the results in to a target file
:param doc: The source doc as gitpython Blob
:param source_folder: the name of the folder in the source repo where
the file comes from
:param target: the name of the file the transformed doc shall be written to
:param target_folder: the folder within `site_folder` where the transformed
doc shall be written to
:param header: a dict with the content of a header (if any) to be prepended
in the transformed doc
:param local_files: a dict source file -> target used to rewrite
relative links to sync'ed files
:param base_path: used to rewrite relative links to sync'ed files
:param base_url: used to rewrite relative links to unknown files
:param site_folder: the root folder on disk where files shall be written to
"""
if doc.mime_type != 'text/plain':
logging.error(f'Cannot process {doc.mime_type} file {doc.path}')
sys.exit(1)
site_target_folder = os.path.normpath(os.path.join(site_folder, target_folder))
safe_makedirs(site_target_folder)
target = os.path.join(site_target_folder, target)
with open(target, 'w+') as target_doc:
# If there is an header configured, write it (in YAML)
doc_all = decode(doc.data_stream.read())
doc_markdown, fm = read_front_matter(doc_all)
# Update the doc front matter with the configured one and write it
write_front_matter(target_doc, fm, header)
doc_markdown = transform_links_doc(
doc_markdown, source_folder, local_files, base_path, base_url)
target_doc.write(doc_markdown)
return target
def decode(s, encodings=('utf8', 'latin1', 'ascii')):
for encoding in encodings:
try:
return s.decode(encoding)
except UnicodeDecodeError:
pass
return s.decode('ascii', 'ignore')
def read_front_matter(text):
""" returns a tuple text, frontmatter (as dict) """
if FM_BOUNDARY.match(text):
try:
_, fm, content = FM_BOUNDARY.split(text, 2)
except ValueError:
# Not enough values to unpack, boundary was matched once
return text, None
if content.startswith('\n'):
content = content[1:]
return content, YAML().load(fm)
else:
return text, None
def write_front_matter(target_doc, fm_doc, fm_config):
fm_doc = fm_doc or {}
fm_config = fm_config or {}
fm_doc.update(fm_config)
if fm_doc:
target_doc.write(YAML_SEPARATOR)
YAML().dump(fm_doc, target_doc)
target_doc.write(YAML_SEPARATOR)
def transform_links_doc(text, base_path, local_files, rewrite_path, rewrite_url):
""" transform all the links the text """
links = get_links(text)
# Rewrite map, only use links with an href
rewrite_map = {x.get("href"): transform_link(x.get("href"), base_path, local_files, rewrite_path, rewrite_url)
for x in links if x.get("href")}
for source, target in rewrite_map.items():
text = text.replace(source, target)
return text
def get_links(md):
""" return a list of all the links in a string formatted in markdown """
md = markdown.markdown(md)
soup = BeautifulSoup(md, 'html.parser')
return soup.find_all("a")
def transform_link(link, base_path, local_files, rewrite_path, rewrite_url):
""" Transform hrefs to be valid URLs on the web-site
Relative URLs are rewritten to `rewrite_path` when `link`
points to a sync'ed file. Else they're rewritten to `rewrite_url`.
Absolute URLs are not changed (they may be external)
Fragments are relative to the page and do not need changes,
except for lower() on local files because hugo generated
anchors are always lower case.
:param link: the link to be re-written
:param base_path: the folder where the source document that contains
the link lives
:param local_files: a dict source file -> (target file, folder) that
maps sync'ed files from their fully qualified source name into their
filename in the site folder
:param rewrite_path: the file local (sync'ed) files are rewritten to
:param rewrite_url: the URL remote files are rewritten to
:note: urlparse treats URLs without scheme like path only URLs,
so 'github.com' will be rewritten to 'rewrite_url/github.com'
"""
# ignore empty links
if not link:
return link
# urlparse returns a named tuple
parsed = urlparse(link)
if is_absolute_url(parsed):
return link
if is_fragment(parsed):
# A fragment only link points to an .md file
return urlunparse(parsed._replace(fragment=parsed.fragment.lower()))
path = os.path.normpath(parsed.path)
# The list if local_file includes paths based on the root of the git
# repo, so we need join base_path and normalize to fq_path to find the
# link in the list of local files
fq_path = os.path.normpath(os.path.join(base_path, parsed.path))
if fq_path in local_files:
target_file = local_files[fq_path][0]
target_folder = local_files[fq_path][1]
is_index = (target_file == FOLDER_INDEX)
filename, ext = os.path.splitext(target_file)
# Special handling for md files
if ext == '.md':
# Links to the index file are rendered as base_path/
if is_index:
target_file = ''
# links to md other files are rendered as .../[md filename]/
else:
target_file = filename + '/'
# for .md files, lower the case of fragments to match hugo's behaviour
parsed = parsed._replace(fragment=parsed.fragment.lower())
if target_folder:
new_path = [rewrite_path, target_folder, target_file]
else:
new_path = [rewrite_path, target_file]
return parsed._replace(path="/".join(new_path)).geturl()
# when not found on disk, append to the base_url
return urljoin(rewrite_url, parsed._replace(path=fq_path).geturl())
def is_absolute_url(parsed_url):
""" check if it is an absolute url """
return all([parsed_url.scheme, parsed_url.netloc])
def is_fragment(parsed_url):
""" determine if the url is an a link """
return len(parsed_url.fragment) > 0 and not any(parsed_url[:-1])
def download_resources_to_project(yaml_list, clones):
""" download the files from local clones based on a spec.
The YAML sync spec can be found in sync/config/README.md """
for entry in yaml_list:
component = entry['component']
repository = entry['repository']
local_clone = clones.get(repository)
if not local_clone:
logging.error(f'No git clone found for {repository} in {clones}')
sys.exit(1)
for index, tag in enumerate(entry['tags']):
logging.info(f'Syncing {component}@{tag["name"]}')
link_base_url = f'{repository}/tree/{tag["name"]}/'
if index == 0:
# first links belongs on the home page
base_path = f'/docs/{component}'.lower()
site_dir = f'{CONTENT_DIR}/{component}'
os.makedirs(site_dir, exist_ok=True)
else:
# the other links belong in the other versions a.k.a vault
base_path = f'/vault/{component}-{tag["displayName"]}'
site_dir = f'{VAULT_DIR}/{component}-{tag["displayName"]}'
os.makedirs(site_dir, exist_ok=True)
results = transform_docs(
git_repo=local_clone,
tag=tag['name'],
folders=tag['folders'],
site_folder=site_dir,
base_path=base_path,
base_url=link_base_url)
logging.debug(f'Finished syncing {component}@{tag["name"]}: ')
logging.debug(f'{results}')
def get_files_in_path(path, file_type):
""" return a list of all the files in path that match the file_type """
file_list = []
# walk through every file in directory and its sub directories
for root, dirs, files in os.walk(path):
for file in files:
# append the file name to the list if is it the correct type
if file.endswith(file_type):
file_list.append(os.path.join(root, file))
return file_list
def load_config(files):
""" return a list of yaml files sorted based on a field called displayOrder """
yaml = YAML()
dic_list = []
for file in files:
with open(file, 'r') as text:
# get the paths from the config file
dic_list.append({
"filename": file,
"content": yaml.load(text)
})
dic_list.sort(key=lambda x: x['content']['displayOrder'])
return dic_list
def save_config(config):
""" save config files back to yaml """
yaml = YAML()
for c in config:
with open(c['filename'], 'w') as out:
yaml.dump(c['content'], out)
def get_tags(sync_config):
""" return a list of tags with, there name, and displayName """
tags = []
for tag in sync_config['tags']:
tags.append({'name': tag['name'], 'displayName': tag['displayName']})
return tags
def get_versions(sync_configs):
""" return the list of all the versions and there tag, name, archive """
component_versions = []
for sync_config in sync_configs:
component_versions.append({
'name': sync_config['component'],
'tags': get_tags(sync_config),
'archive': sync_config['archive']
})
return component_versions
def create_resource(dest_prefix, file, versions):
""" create site resource based on the version and file """
resource_template = jinja_env.get_template(f'{file}.template')
if file.endswith(".js"):
serialize = json.dumps(versions)
resource = resource_template.render(component_versions_json=serialize)
elif file.endswith(".md"):
resource = resource_template.render(component_versions=versions)
else:
logging.warning(f'Cannot create resource for {file}. Only .js and .md supported')
return
with open(f'{dest_prefix}/{file}', 'w') as f:
f.write(resource)
def clone_repo(repo, update):
project = repo.split('/')[-1]
clone_dir = os.path.join(DEFAULT_CACHE_FOLDER, project)
if os.path.isdir(clone_dir):
if not update:
print(f'{project}: Cache folder {clone_dir} found, skipping clone.')
return repo, git.Repo(clone_dir)
# Cleanup and update via fetch --all
print(f'{project}: updating started')
cloned_repo = git.Repo(clone_dir)
cloned_repo.git.reset('--hard')
cloned_repo.git.clean('-xdf')
cloned_repo.git.fetch('--all')
print(f'{project}: updating completed')
return repo, cloned_repo
# Clone the repo
print(f'{project}: cloning started')
cloned_repo = git.Repo.clone_from(repo, clone_dir)
print(f'{project}: cloning completed')
return repo, cloned_repo
def clone_repos(sync_configs, update):
# Make sure the cache folder exists
safe_makedirs(DEFAULT_CACHE_FOLDER)
with Pool() as pool:
results = pool.starmap(clone_repo, [(x['repository'], update) for x in sync_configs])
return {x: y for x, y in results}
@click.command()
@click.option('--config-folder', default=DEFAULT_CONFIG_FOLDER,
help='the folder that contains the config files')
@click.option('--update-cache/--no-update-cache', default=False,
help='update clone caches. !! This will force cleanup caches !!')
def sync(config_folder, update_cache):
""" fetch all the files and sync it to the website """
# get the path of the urls needed
config_files = get_files_in_path(config_folder, ".yaml")
config = [x["content"] for x in load_config(config_files)]
# clone all relevant repos
clones = clone_repos(config, update_cache)
# download resources from the clone cache
download_resources_to_project(config, clones)
versions = get_versions(config)
# create version switcher script
create_resource(JS_ASSET_DIR, "version-switcher.js", versions)
# create index for vault
create_resource(VAULT_DIR, FOLDER_INDEX, versions)
if __name__ == '__main__':
sync()
``` |
{
"source": "JoelLucaAdams/PlaylistBot",
"score": 3
} |
#### File: PlaylistBot/cogs/youtube_api.py
```python
import os
import pickle
import re
from datetime import timedelta
from dotenv import load_dotenv
import google_auth_oauthlib.flow
from google.auth.transport.requests import Request
import googleapiclient.discovery
from discord.ext import commands
Playlists = {
"chill - baka brigade" : "PLXfw-OhAIheTakyvLpf50BN9xQqhhJiN7",
"vibe" : "<KEY>",
"Programming_music" : "<KEY>",
"EDM": "PLXfw-OhAIheShH-C1eiLmOy7ARW3iMbSB",
"Folk": "<KEY>",
"Bass boosted": "<KEY>"
}
class youtube_api(commands.Cog):
"""
Contains commands to call the Youtube API
"""
api_service_name = "youtube"
api_version = "v3"
def oauth2():
"""
Calls the Youtube API using OAuth 2.0
Requirements:
token.pickle - stores login credentials (will be created if not present)
client_secrets.json - OAuth 2.0 client ID (Will fail if not present)
"""
credentials = None
scopes = ["https://www.googleapis.com/auth/youtube.force-ssl"]
#for debug mode
'''
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file('client_secrets.json', scopes=scopes)
flow.run_local_server(port=8080, prompt='consent', authorization_prompt_message='')
credentials = flow.credentials
print(credentials.to_json())
return googleapiclient.discovery.build(youtube_api.api_service_name, youtube_api.api_version, credentials=credentials)
'''
# token.pickle stores the user's credentials from previously successful logins
if os.path.exists('token.pickle'):
print('Loading Credentials From File...')
with open('token.pickle', 'rb') as token:
credentials = pickle.load(token)
# If there are no valid credentials available, then either refresh the token or log in.
if not credentials or not credentials.valid:
if credentials and credentials.expired and credentials.refresh_token:
print('Refreshing Access Token...')
credentials.refresh(Request())
else:
print('Fetching New Tokens...')
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file('client_secrets.json', scopes=scopes)
flow.run_local_server(port=8080, prompt='consent', authorization_prompt_message='')
credentials = flow.credentials
# Save the credentials for the next run
with open('token.pickle', 'wb') as f:
print('Saving Credentials for Future Use...')
pickle.dump(credentials, f)
return googleapiclient.discovery.build(youtube_api.api_service_name, youtube_api.api_version, credentials=credentials)
def key():
"""
Calls the Youtube API using a key
Requirements:
.env - containing a API key with the prefix YT_API_KEY
"""
load_dotenv()
api_key = os.getenv("YT_API_KEY")
return googleapiclient.discovery.build(youtube_api.api_service_name, youtube_api.api_version, developerKey=api_key)
def playlist_items(playlistId: str):
"""
Responds with a list of items in the playlist
Parameters:
playlistId: str - the playlist's ID
"""
youtube = youtube_api.oauth2()
request = youtube.playlistItems().list(
part="snippet", playlistId=playlistId
)
return request.execute()
def add_video(playlistId: str, videoId: str):
"""
Adds a new video to a playlist
Parameters:
playlistId: str - the playlist's ID
videoId: str - the video's ID
"""
youtube = youtube_api.oauth2()
request = youtube.playlistItems().insert(
part="snippet",
body={
"snippet": {
"playlistId": playlistId,
"resourceId": {
"kind": "youtube#video",
"videoId": videoId
}
}
}
)
return request.execute()
def remove_video(playlistId: str, videoId: str):
"""
Removes a video from a playlist
Parameters:
playlistId: str - the playlist's ID
videoId: str - the video's ID
"""
youtube = youtube_api.oauth2()
long_video_id = youtube_api.find_video_from_playlist(playlistId=playlistId, videoId=videoId)['id']
if long_video_id == None:
return "No Video found"
request = youtube.playlistItems().delete(id=long_video_id)
return request.execute()
def find_video_from_playlist(playlistId: str, videoId: str):
"""
Returns json about a video from a playlist
Parameters:
playlistId: str - the playlist's ID
videoId: str - the video's ID
"""
playlist = youtube_api.playlist_items(playlistId)
for item in playlist["items"]:
if item["snippet"]["resourceId"]["videoId"] == videoId:
return item
return None
def find_video(videoId: str):
"""
Returns json on a video
Parameters:
videoId: str - the videos ID
"""
youtube = youtube_api.key()
request = youtube.videos().list(
part="snippet,contentDetails",
id=videoId
)
return request.execute()
def find_playlist(playlistId: str):
"""
Returns a json object with the playlists information
Parameters:
playlistId: str - the playlist's ID
"""
youtube = youtube_api.oauth2()
request = youtube.playlists().list(
part="snippet",
id=playlistId
)
return request.execute()
def get_local_playlist_key(index: int):
"""
Returns a playlist key
Parameters:
index: int - index of playlist key
"""
return list(Playlists)[index]
def get_playlist_length(playlistId: str):
"""
Returns the playlist lenght
Parameters:
playlistId: str - the playlist's ID
"""
youtube = youtube_api.key()
hours_pattern = re.compile(r'(\d+)H')
minutes_pattern = re.compile(r'(\d+)M')
seconds_pattern = re.compile(r'(\d+)S')
total_seconds = 0
nextPageToken = None
while True:
pl_request = youtube.playlistItems().list(
part='contentDetails',
playlistId=playlistId,
maxResults=100,
pageToken=nextPageToken
)
pl_response = pl_request.execute()
vid_ids = []
for item in pl_response['items']:
vid_ids.append(item['contentDetails']['videoId'])
vid_request = youtube.videos().list(
part="contentDetails",
id=','.join(vid_ids)
)
vid_response = vid_request.execute()
for item in vid_response['items']:
duration = item['contentDetails']['duration']
hours = hours_pattern.search(duration)
minutes = minutes_pattern.search(duration)
seconds = seconds_pattern.search(duration)
hours = int(hours.group(1)) if hours else 0
minutes = int(minutes.group(1)) if minutes else 0
seconds = int(seconds.group(1)) if seconds else 0
video_seconds = timedelta(
hours=hours,
minutes=minutes,
seconds=seconds
).total_seconds()
total_seconds += video_seconds
nextPageToken = pl_response.get('nextPageToken')
if not nextPageToken:
break
total_seconds = int(total_seconds)
minutes, seconds = divmod(total_seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'{hours}:{minutes}:{seconds}'
"""
default_playlist = Playlists['chill_baka_brigade']
default_song = "CPhXKak_bHw"
if __name__=="__main__":
#print(youtube_api.remove_video(default_playlist, default_song))
print(youtube_api.remove_video(playlistId=default_playlist, videoId=default_song))
"""
``` |
{
"source": "JoelLucaAdams/PogBot",
"score": 3
} |
#### File: PogBot/cogs/utilities.py
```python
import discord
from discord import *
from discord.ext import commands
from discord.ext.commands import Context
import time
import os
import random
class Utilities(commands.Cog):
"""
General Utilities
"""
@commands.command()
async def ping(self, ctx: Context):
"""
Status check
"""
start_time = time.time()
message = await ctx.send('pong. `DWSPz latency: ' + str(round(ctx.bot.latency * 1000)) + 'ms`')
end_time = time.time()
await message.edit(content='pong. `DWSP latency: ' + str(round(ctx.bot.latency * 1000)) + 'ms` ' +
'`Response time: ' + str(int((end_time - start_time) * 1000)) + 'ms`')
@commands.command()
async def source(self, ctx: Context):
"""
Print a link to the source code
"""
await ctx.send(content='Created by `<NAME>`\n'
'https://github.com/JoelLucaAdams/PogBot')
@commands.command()
async def pog(self, ctx: Context):
"""
Responds with random message
"""
await ctx.message.delete()
pogMessages = ['you pogged?', 'Once you pog you just can\'t stop', 'Pogging bells, pogging bells, pogging all the way', 'You just tested positive for pog', 'Certified Poggers Moment™️']
embed = Embed(title='POGGERS!', description=pogMessages[random.randint(0, len(pogMessages)-1)], color=discord.Colour.green())
embed.set_footer(icon_url=ctx.author.avatar_url, text= f'Requested by {ctx.author.display_name}')
await ctx.send(embed=embed)
@commands.command()
async def image(self, ctx: Context):
"""
Responds with a random image from the great pog wars
"""
files = os.listdir('./Poggers')
index = random.randint(0, len(files))
await ctx.send(file= File(f'./Poggers/{files[index]}'))
```
#### File: JoelLucaAdams/PogBot/PogBot.py
```python
import os
import logging
import discord
from discord.ext import commands
from discord.ext.commands import DefaultHelpCommand
from dotenv import load_dotenv
# logs data to the discord.log file, if this file doesn't exist at runtime it is created automatically
from cogs.utilities import Utilities
logger = logging.getLogger('discord')
logger.setLevel(logging.INFO) # logging levels: NOTSET (all), DEBUG (bot interactions), INFO (bot connected etc)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
# load the private discord token from .env file.
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
# Initialise the Bot object with an accessible help Command object
helpCommand = DefaultHelpCommand()
bot = commands.Bot(
command_prefix="!pog ",
help_command=helpCommand
)
# Setup the General cog with the help command
generalCog = Utilities()
bot.add_cog(generalCog)
helpCommand.cog = generalCog
@bot.event
async def on_ready():
"""
Do something when the bot is ready to use.
"""
print(f'{bot.user.name} has connected to Discord!')
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.streaming, name="poggers"))
@bot.event
async def on_message(message):
"""
Checks if the users input contains pog and reacts with :andyPog:
"""
if message.content.lower().__contains__('pog') and not message.author.bot:
await message.add_reaction(r'<:andypog:764160369912315905>') # use \:andypog: to get link
await bot.process_commands(message)
@bot.event
async def on_command_error(ctx, error):
"""
Handle the Error message in a nice way.
"""
if isinstance(error, commands.errors.CheckFailure):
await ctx.send(error)
elif isinstance(error, commands.errors.MissingRequiredArgument):
await ctx.send('You are missing a required argument.')
elif isinstance(error, commands.errors.CommandNotFound):
pass
else:
await ctx.send('You are missing a required argument.')
logging.error(error)
# Start the bot
bot.run(TOKEN)
``` |
{
"source": "joelluijmes/dbt",
"score": 2
} |
#### File: test/unit/test_compiler.py
```python
import unittest
from unittest.mock import MagicMock, patch
import dbt.flags
import dbt.compilation
from dbt.adapters.postgres import Plugin
from dbt.contracts.files import FileHash
from dbt.contracts.graph.manifest import Manifest
from dbt.contracts.graph.parsed import NodeConfig, DependsOn, ParsedModelNode
from dbt.contracts.graph.compiled import CompiledModelNode, InjectedCTE
from dbt.node_types import NodeType
from datetime import datetime
from .utils import inject_adapter, clear_plugin, config_from_parts_or_dicts
class CompilerTest(unittest.TestCase):
def assertEqualIgnoreWhitespace(self, a, b):
self.assertEqual(
"".join(a.split()),
"".join(b.split()))
def setUp(self):
dbt.flags.STRICT_MODE = True
self.maxDiff = None
self.model_config = NodeConfig.from_dict({
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'vars': {},
'quoting': {},
'column_types': {},
'tags': [],
})
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
}
profile_cfg = {
'outputs': {
'test': {
'type': 'postgres',
'dbname': 'postgres',
'user': 'root',
'host': 'thishostshouldnotexist',
'pass': 'password',
'port': 5432,
'schema': 'public'
}
},
'target': 'test'
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self._generate_runtime_model_patch = patch.object(dbt.compilation, 'generate_runtime_model')
self.mock_generate_runtime_model = self._generate_runtime_model_patch.start()
inject_adapter(Plugin.adapter(self.config), Plugin)
# self.mock_adapter = PostgresAdapter MagicMock(type=MagicMock(return_value='postgres'))
# self.mock_adapter.Relation =
# self.mock_adapter.get_compiler.return_value = dbt.compilation.Compiler
# self.mock_plugin = MagicMock(
# adapter=MagicMock(
# credentials=MagicMock(return_value='postgres')
# )
# )
# inject_adapter(self.mock_adapter, self.mock_plugin)
# so we can make an adapter
def mock_generate_runtime_model_context(model, config, manifest):
def ref(name):
result = f'__dbt__CTE__{name}'
unique_id = f'model.root.{name}'
model.extra_ctes.append(InjectedCTE(id=unique_id, sql=None))
return result
return {'ref': ref}
self.mock_generate_runtime_model.side_effect = mock_generate_runtime_model_context
def tearDown(self):
self._generate_runtime_model_patch.stop()
clear_plugin(Plugin)
def test__prepend_ctes__already_has_cte(self):
ephemeral_config = self.model_config.replace(materialized='ephemeral')
manifest = Manifest(
macros={},
nodes={
'model.root.view': CompiledModelNode(
name='view',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.view',
fqn=['root', 'view'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(nodes=['model.root.ephemeral']),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql='select * from {{ref("ephemeral")}}',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[InjectedCTE(id='model.root.ephemeral', sql='select * from source_table')],
injected_sql='',
compiled_sql=(
'with cte as (select * from something_else) '
'select * from __dbt__CTE__ephemeral'),
checksum=FileHash.from_contents(''),
),
'model.root.ephemeral': CompiledModelNode(
name='ephemeral',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral',
fqn=['root', 'ephemeral'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral.sql',
original_file_path='ephemeral.sql',
raw_sql='select * from source_table',
compiled=True,
compiled_sql='select * from source_table',
extra_ctes_injected=False,
extra_ctes=[],
injected_sql='',
checksum=FileHash.from_contents(''),
),
},
sources={},
docs={},
# '2018-02-14T09:15:13Z'
generated_at=datetime(2018, 2, 14, 9, 15, 13),
disabled=[],
files={},
)
compiler = dbt.compilation.Compiler(self.config)
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes['model.root.view'],
manifest,
{}
)
self.assertEqual(result, manifest.nodes['model.root.view'])
self.assertEqual(result.extra_ctes_injected, True)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
('with __dbt__CTE__ephemeral as ('
'select * from source_table'
'), cte as (select * from something_else) '
'select * from __dbt__CTE__ephemeral'))
self.assertEqual(
manifest.nodes['model.root.ephemeral'].extra_ctes_injected,
True)
def test__prepend_ctes__no_ctes(self):
manifest = Manifest(
macros={},
nodes={
'model.root.view': CompiledModelNode(
name='view',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.view',
fqn=['root', 'view'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql=('with cte as (select * from something_else) '
'select * from source_table'),
compiled=True,
extra_ctes_injected=False,
extra_ctes=[],
injected_sql='',
compiled_sql=('with cte as (select * from something_else) '
'select * from source_table'),
checksum=FileHash.from_contents(''),
),
'model.root.view_no_cte': CompiledModelNode(
name='view_no_cte',
database='dbt',
schema='analytics',
alias='view_no_cte',
resource_type=NodeType.Model,
unique_id='model.root.view_no_cte',
fqn=['root', 'view_no_cte'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql='select * from source_table',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[],
injected_sql='',
compiled_sql=('select * from source_table'),
checksum=FileHash.from_contents(''),
),
},
sources={},
docs={},
generated_at='2018-02-14T09:15:13Z',
disabled=[],
files={},
)
compiler = dbt.compilation.Compiler(self.config)
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes['model.root.view'],
manifest,
{}
)
self.assertEqual(
result,
manifest.nodes.get('model.root.view'))
self.assertTrue(result.extra_ctes_injected)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
manifest.nodes.get('model.root.view').compiled_sql)
compiler = dbt.compilation.Compiler(self.config)
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes.get('model.root.view_no_cte'),
manifest,
{})
self.assertEqual(
result,
manifest.nodes.get('model.root.view_no_cte'))
self.assertTrue(result.extra_ctes_injected)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
manifest.nodes.get('model.root.view_no_cte').compiled_sql)
def test__prepend_ctes(self):
ephemeral_config = self.model_config.replace(materialized='ephemeral')
manifest = Manifest(
macros={},
nodes={
'model.root.view': CompiledModelNode(
name='view',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.view',
fqn=['root', 'view'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(nodes=['model.root.ephemeral']),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql='select * from {{ref("ephemeral")}}',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[InjectedCTE(id='model.root.ephemeral', sql='select * from source_table')],
injected_sql='',
compiled_sql='select * from __dbt__CTE__ephemeral',
checksum=FileHash.from_contents(''),
),
'model.root.ephemeral': CompiledModelNode(
name='ephemeral',
database='dbt',
schema='analytics',
alias='ephemeral',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral',
fqn=['root', 'ephemeral'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral.sql',
original_file_path='ephemeral.sql',
raw_sql='select * from source_table',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[],
injected_sql='',
compiled_sql='select * from source_table',
checksum=FileHash.from_contents(''),
),
},
sources={},
docs={},
generated_at='2018-02-14T09:15:13Z',
disabled=[],
files={},
)
compiler = dbt.compilation.Compiler(self.config)
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes['model.root.view'],
manifest,
{}
)
self.assertEqual(result,
manifest.nodes.get('model.root.view'))
self.assertTrue(result.extra_ctes_injected)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
('with __dbt__CTE__ephemeral as ('
'select * from source_table'
') '
'select * from __dbt__CTE__ephemeral'))
self.assertTrue(manifest.nodes['model.root.ephemeral'].extra_ctes_injected)
def test__prepend_ctes__cte_not_compiled(self):
ephemeral_config = self.model_config.replace(materialized='ephemeral')
parsed_ephemeral = ParsedModelNode(
name='ephemeral',
database='dbt',
schema='analytics',
alias='ephemeral',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral',
fqn=['root', 'ephemeral'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral.sql',
original_file_path='ephemeral.sql',
raw_sql='select * from source_table',
checksum=FileHash.from_contents(''),
)
compiled_ephemeral = CompiledModelNode(
name='ephemeral',
database='dbt',
schema='analytics',
alias='ephemeral',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral',
fqn=['root', 'ephemeral'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral.sql',
original_file_path='ephemeral.sql',
raw_sql='select * from source_table',
compiled=True,
compiled_sql='select * from source_table',
injected_sql='select * from source_table',
extra_ctes_injected=True,
extra_ctes=[],
checksum=FileHash.from_contents(''),
)
manifest = Manifest(
macros={},
nodes={
'model.root.view': CompiledModelNode(
name='view',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.view',
fqn=['root', 'view'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(nodes=['model.root.ephemeral']),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql='select * from {{ref("ephemeral")}}',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[InjectedCTE(id='model.root.ephemeral', sql='select * from source_table')],
injected_sql='',
compiled_sql='select * from __dbt__CTE__ephemeral',
checksum=FileHash.from_contents(''),
),
'model.root.ephemeral': parsed_ephemeral,
},
sources={},
docs={},
generated_at='2018-02-14T09:15:13Z',
disabled=[],
files={},
)
compiler = dbt.compilation.Compiler(self.config)
with patch.object(compiler, 'compile_node') as compile_node:
compile_node.return_value = compiled_ephemeral
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes['model.root.view'],
manifest,
{}
)
compile_node.assert_called_once_with(parsed_ephemeral, manifest, {})
self.assertEqual(result,
manifest.nodes.get('model.root.view'))
self.assertTrue(manifest.nodes['model.root.ephemeral'].compiled)
self.assertTrue(result.extra_ctes_injected)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
('with __dbt__CTE__ephemeral as ('
'select * from source_table'
') '
'select * from __dbt__CTE__ephemeral'))
self.assertTrue(manifest.nodes['model.root.ephemeral'].extra_ctes_injected)
def test__prepend_ctes__multiple_levels(self):
ephemeral_config = self.model_config.replace(materialized='ephemeral')
manifest = Manifest(
macros={},
nodes={
'model.root.view': CompiledModelNode(
name='view',
database='dbt',
schema='analytics',
alias='view',
resource_type=NodeType.Model,
unique_id='model.root.view',
fqn=['root', 'view'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(nodes=['model.root.ephemeral']),
config=self.model_config,
tags=[],
path='view.sql',
original_file_path='view.sql',
raw_sql='select * from {{ref("ephemeral")}}',
compiled=True,
extra_ctes_injected=False,
extra_ctes=[InjectedCTE(id='model.root.ephemeral', sql=None)],
injected_sql=None,
compiled_sql='select * from __dbt__CTE__ephemeral',
checksum=FileHash.from_contents(''),
),
'model.root.ephemeral': ParsedModelNode(
name='ephemeral',
database='dbt',
schema='analytics',
alias='ephemeral',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral',
fqn=['root', 'ephemeral'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral.sql',
original_file_path='ephemeral.sql',
raw_sql='select * from {{ref("ephemeral_level_two")}}',
checksum=FileHash.from_contents(''),
),
'model.root.ephemeral_level_two': ParsedModelNode(
name='ephemeral_level_two',
database='dbt',
schema='analytics',
alias='ephemeral_level_two',
resource_type=NodeType.Model,
unique_id='model.root.ephemeral_level_two',
fqn=['root', 'ephemeral_level_two'],
package_name='root',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on=DependsOn(),
config=ephemeral_config,
tags=[],
path='ephemeral_level_two.sql',
original_file_path='ephemeral_level_two.sql',
raw_sql='select * from source_table',
checksum=FileHash.from_contents(''),
),
},
sources={},
docs={},
generated_at='2018-02-14T09:15:13Z',
disabled=[],
files={},
)
compiler = dbt.compilation.Compiler(self.config)
result, _ = compiler._recursively_prepend_ctes(
manifest.nodes['model.root.view'],
manifest,
{}
)
self.assertEqual(result, manifest.nodes['model.root.view'])
self.assertTrue(result.extra_ctes_injected)
self.assertEqualIgnoreWhitespace(
result.injected_sql,
('with __dbt__CTE__ephemeral_level_two as ('
'select * from source_table'
'), __dbt__CTE__ephemeral as ('
'select * from __dbt__CTE__ephemeral_level_two'
') '
'select * from __dbt__CTE__ephemeral'))
self.assertTrue(manifest.nodes['model.root.ephemeral'].compiled)
self.assertTrue(manifest.nodes['model.root.ephemeral_level_two'].compiled)
self.assertTrue(manifest.nodes['model.root.ephemeral'].extra_ctes_injected)
self.assertTrue(manifest.nodes['model.root.ephemeral_level_two'].extra_ctes_injected)
```
#### File: test/unit/test_snowflake_adapter.py
```python
import agate
import unittest
from contextlib import contextmanager
from unittest import mock
import dbt.flags as flags
from dbt.adapters.snowflake import SnowflakeAdapter
from dbt.adapters.snowflake import Plugin as SnowflakePlugin
from dbt.adapters.snowflake.column import SnowflakeColumn
from dbt.adapters.base.query_headers import MacroQueryStringSetter
from dbt.clients import agate_helper
from dbt.logger import GLOBAL_LOGGER as logger # noqa
from dbt.parser.results import ParseResult
from snowflake import connector as snowflake_connector
from .utils import config_from_parts_or_dicts, inject_adapter, mock_connection, TestAdapterConversions, load_internal_manifest_macros
class TestSnowflakeAdapter(unittest.TestCase):
def setUp(self):
flags.STRICT_MODE = False
profile_cfg = {
'outputs': {
'test': {
'type': 'snowflake',
'account': 'test_account',
'user': 'test_user',
'database': 'test_database',
'warehouse': 'test_warehouse',
'schema': 'public',
},
},
'target': 'test',
}
project_cfg = {
'name': 'X',
'version': '0.1',
'profile': 'test',
'project-root': '/tmp/dbt/does-not-exist',
'quoting': {
'identifier': False,
'schema': True,
},
'query-comment': 'dbt',
}
self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
self.assertEqual(self.config.query_comment.comment, 'dbt')
self.assertEqual(self.config.query_comment.append, False)
self.handle = mock.MagicMock(
spec=snowflake_connector.SnowflakeConnection)
self.cursor = self.handle.cursor.return_value
self.mock_execute = self.cursor.execute
self.patcher = mock.patch(
'dbt.adapters.snowflake.connections.snowflake.connector.connect'
)
self.snowflake = self.patcher.start()
self.load_patch = mock.patch('dbt.parser.manifest.make_parse_result')
self.mock_parse_result = self.load_patch.start()
self.mock_parse_result.return_value = ParseResult.rpc()
self.snowflake.return_value = self.handle
self.adapter = SnowflakeAdapter(self.config)
self.adapter._macro_manifest_lazy = load_internal_manifest_macros(self.config)
self.adapter.connections.query_header = MacroQueryStringSetter(self.config, self.adapter._macro_manifest_lazy)
self.qh_patch = mock.patch.object(self.adapter.connections.query_header, 'add')
self.mock_query_header_add = self.qh_patch.start()
self.mock_query_header_add.side_effect = lambda q: '/* dbt */\n{}'.format(q)
self.adapter.acquire_connection()
inject_adapter(self.adapter, SnowflakePlugin)
def tearDown(self):
# we want a unique self.handle every time.
self.adapter.cleanup_connections()
self.qh_patch.stop()
self.patcher.stop()
self.load_patch.stop()
def test_quoting_on_drop_schema(self):
relation = SnowflakeAdapter.Relation.create(
database='test_database',
schema='test_schema',
quote_policy=self.adapter.config.quoting
)
self.adapter.drop_schema(relation)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\ndrop schema if exists test_database."test_schema" cascade', None)
])
def test_quoting_on_drop(self):
relation = self.adapter.Relation.create(
database='test_database',
schema='test_schema',
identifier='test_table',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.drop_relation(relation)
self.mock_execute.assert_has_calls([
mock.call(
'/* dbt */\ndrop table if exists test_database."test_schema".test_table cascade',
None
)
])
def test_quoting_on_truncate(self):
relation = self.adapter.Relation.create(
database='test_database',
schema='test_schema',
identifier='test_table',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.truncate_relation(relation)
self.mock_execute.assert_has_calls([
mock.call('/* dbt */\ntruncate table test_database."test_schema".test_table', None)
])
def test_quoting_on_rename(self):
from_relation = self.adapter.Relation.create(
database='test_database',
schema='test_schema',
identifier='table_a',
type='table',
quote_policy=self.adapter.config.quoting,
)
to_relation = self.adapter.Relation.create(
database='test_database',
schema='test_schema',
identifier='table_b',
type='table',
quote_policy=self.adapter.config.quoting,
)
self.adapter.rename_relation(
from_relation=from_relation,
to_relation=to_relation
)
self.mock_execute.assert_has_calls([
mock.call(
'/* dbt */\nalter table test_database."test_schema".table_a rename to test_database."test_schema".table_b',
None
)
])
@contextmanager
def current_warehouse(self, response):
# there is probably some elegant way built into mock.patch to do this
fetchall_return = self.cursor.fetchall.return_value
execute_side_effect = self.mock_execute.side_effect
def execute_effect(sql, *args, **kwargs):
if sql == '/* dbt */\nselect current_warehouse() as warehouse':
self.cursor.description = [['name']]
self.cursor.fetchall.return_value = [[response]]
else:
self.cursor.description = None
self.cursor.fetchall.return_value = fetchall_return
return self.mock_execute.return_value
self.mock_execute.side_effect = execute_effect
try:
yield
finally:
self.cursor.fetchall.return_value = fetchall_return
self.mock_execute.side_effect = execute_side_effect
def _strip_transactions(self):
result = []
for call_args in self.mock_execute.call_args_list:
args, kwargs = tuple(call_args)
is_transactional = (
len(kwargs) == 0 and
len(args) == 2 and
args[1] is None and
args[0] in {'BEGIN', 'COMMIT'}
)
if not is_transactional:
result.append(call_args)
return result
def test_pre_post_hooks_warehouse(self):
with self.current_warehouse('warehouse'):
config = {'snowflake_warehouse': 'other_warehouse'}
result = self.adapter.pre_model_hook(config)
self.assertIsNotNone(result)
calls = [
mock.call('/* dbt */\nselect current_warehouse() as warehouse', None),
mock.call('/* dbt */\nuse warehouse other_warehouse', None)
]
self.mock_execute.assert_has_calls(calls)
self.adapter.post_model_hook(config, result)
calls.append(mock.call('/* dbt */\nuse warehouse warehouse', None))
self.mock_execute.assert_has_calls(calls)
def test_pre_post_hooks_no_warehouse(self):
with self.current_warehouse('warehouse'):
config = {}
result = self.adapter.pre_model_hook(config)
self.assertIsNone(result)
self.mock_execute.assert_not_called()
self.adapter.post_model_hook(config, result)
self.mock_execute.assert_not_called()
def test_cancel_open_connections_empty(self):
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_master(self):
key = self.adapter.connections.get_thread_identifier()
self.adapter.connections.thread_connections[key] = mock_connection('master')
self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
def test_cancel_open_connections_single(self):
master = mock_connection('master')
model = mock_connection('model')
model.handle.session_id = 42
key = self.adapter.connections.get_thread_identifier()
self.adapter.connections.thread_connections.update({
key: master,
1: model,
})
with mock.patch.object(self.adapter.connections, 'add_query') as add_query:
query_result = mock.MagicMock()
add_query.return_value = (None, query_result)
self.assertEqual(
len(list(self.adapter.cancel_open_connections())), 1)
add_query.assert_called_once_with('select system$abort_session(42)')
def test_client_session_keep_alive_false_by_default(self):
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', private_key=None, application='dbt')
])
def test_client_session_keep_alive_true(self):
self.config.credentials = self.config.credentials.replace(
client_session_keep_alive=True)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=True, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', private_key=None, application='dbt')
])
def test_user_pass_authentication(self):
self.config.credentials = self.config.credentials.replace(
password='<PASSWORD>',
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
password='<PASSWORD>', role=None, schema='public',
user='test_user', warehouse='test_warehouse', private_key=None,
application='dbt')
])
def test_authenticator_user_pass_authentication(self):
self.config.credentials = self.config.credentials.replace(
password='<PASSWORD>',
authenticator='test_sso_url',
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
password='<PASSWORD>', role=None, schema='public',
user='test_user', warehouse='test_warehouse',
authenticator='test_sso_url', private_key=None,
application='dbt', client_store_temporary_credential=True)
])
def test_authenticator_externalbrowser_authentication(self):
self.config.credentials = self.config.credentials.replace(
authenticator='externalbrowser'
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', authenticator='externalbrowser',
private_key=None, application='dbt', client_store_temporary_credential=True)
])
def test_authenticator_oauth_authentication(self):
self.config.credentials = self.config.credentials.replace(
authenticator='oauth',
token='<PASSWORD>',
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', authenticator='oauth', token='my-<PASSWORD>',
private_key=None, application='dbt', client_store_temporary_credential=True)
])
@mock.patch('dbt.adapters.snowflake.SnowflakeCredentials._get_private_key', return_value='test_key')
def test_authenticator_private_key_authentication(self, mock_get_private_key):
self.config.credentials = self.config.credentials.replace(
private_key_path='/tmp/test_key.p8',
private_key_passphrase='<PASSWORD>',
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', private_key='test_key',
application='dbt')
])
@mock.patch('dbt.adapters.snowflake.SnowflakeCredentials._get_private_key', return_value='test_key')
def test_authenticator_private_key_authentication_no_passphrase(self, mock_get_private_key):
self.config.credentials = self.config.credentials.replace(
private_key_path='/tmp/test_key.p8',
private_key_passphrase=None,
)
self.adapter = SnowflakeAdapter(self.config)
conn = self.adapter.connections.set_connection_name(name='new_connection_with_new_config')
self.snowflake.assert_not_called()
conn.handle
self.snowflake.assert_has_calls([
mock.call(
account='test_account', autocommit=False,
client_session_keep_alive=False, database='test_database',
role=None, schema='public', user='test_user',
warehouse='test_warehouse', private_key='test_key',
application='dbt')
])
class TestSnowflakeAdapterConversions(TestAdapterConversions):
def test_convert_text_type(self):
rows = [
['', 'a1', 'stringval1'],
['', 'a2', 'stringvalasdfasdfasdfa'],
['', 'a3', 'stringval3'],
]
agate_table = self._make_table_of(rows, agate.Text)
expected = ['text', 'text', 'text']
for col_idx, expect in enumerate(expected):
assert SnowflakeAdapter.convert_text_type(agate_table, col_idx) == expect
def test_convert_number_type(self):
rows = [
['', '23.98', '-1'],
['', '12.78', '-2'],
['', '79.41', '-3'],
]
agate_table = self._make_table_of(rows, agate.Number)
expected = ['integer', 'float8', 'integer']
for col_idx, expect in enumerate(expected):
assert SnowflakeAdapter.convert_number_type(agate_table, col_idx) == expect
def test_convert_boolean_type(self):
rows = [
['', 'false', 'true'],
['', 'false', 'false'],
['', 'false', 'true'],
]
agate_table = self._make_table_of(rows, agate.Boolean)
expected = ['boolean', 'boolean', 'boolean']
for col_idx, expect in enumerate(expected):
assert SnowflakeAdapter.convert_boolean_type(agate_table, col_idx) == expect
def test_convert_datetime_type(self):
rows = [
['', '20190101T01:01:01Z', '2019-01-01 01:01:01'],
['', '20190102T01:01:01Z', '2019-01-01 01:01:01'],
['', '20190103T01:01:01Z', '2019-01-01 01:01:01'],
]
agate_table = self._make_table_of(rows, [agate.DateTime, agate_helper.ISODateTime, agate.DateTime])
expected = ['timestamp without time zone', 'timestamp without time zone', 'timestamp without time zone']
for col_idx, expect in enumerate(expected):
assert SnowflakeAdapter.convert_datetime_type(agate_table, col_idx) == expect
def test_convert_date_type(self):
rows = [
['', '2019-01-01', '2019-01-04'],
['', '2019-01-02', '2019-01-04'],
['', '2019-01-03', '2019-01-04'],
]
agate_table = self._make_table_of(rows, agate.Date)
expected = ['date', 'date', 'date']
for col_idx, expect in enumerate(expected):
assert SnowflakeAdapter.convert_date_type(agate_table, col_idx) == expect
def test_convert_time_type(self):
# dbt's default type testers actually don't have a TimeDelta at all.
agate.TimeDelta
rows = [
['', '120s', '10s'],
['', '3m', '11s'],
['', '1h', '12s'],
]
agate_table = self._make_table_of(rows, agate.TimeDelta)
expected = ['time', 'time', 'time']
for col_idx, expect in enumerate(expected):
assert SnowflakeAdapter.convert_time_type(agate_table, col_idx) == expect
class TestSnowflakeColumn(unittest.TestCase):
def test_text_from_description(self):
col = SnowflakeColumn.from_description('my_col', 'TEXT')
assert col.column == 'my_col'
assert col.dtype == 'TEXT'
assert col.char_size is None
assert col.numeric_precision is None
assert col.numeric_scale is None
assert col.is_float() is False
assert col.is_number() is False
assert col.is_numeric() is False
assert col.is_string() is True
assert col.is_integer() is False
assert col.string_size() == 16777216
col = SnowflakeColumn.from_description('my_col', 'VARCHAR')
assert col.column == 'my_col'
assert col.dtype == 'VARCHAR'
assert col.char_size is None
assert col.numeric_precision is None
assert col.numeric_scale is None
assert col.is_float() is False
assert col.is_number() is False
assert col.is_numeric() is False
assert col.is_string() is True
assert col.is_integer() is False
assert col.string_size() == 16777216
def test_sized_varchar_from_description(self):
col = SnowflakeColumn.from_description('my_col', 'VARCHAR(256)')
assert col.column == 'my_col'
assert col.dtype == 'VARCHAR'
assert col.char_size == 256
assert col.numeric_precision is None
assert col.numeric_scale is None
assert col.is_float() is False
assert col.is_number() is False
assert col.is_numeric() is False
assert col.is_string() is True
assert col.is_integer() is False
assert col.string_size() == 256
def test_sized_decimal_from_description(self):
col = SnowflakeColumn.from_description('my_col', 'DECIMAL(1, 0)')
assert col.column == 'my_col'
assert col.dtype == 'DECIMAL'
assert col.char_size is None
assert col.numeric_precision == 1
assert col.numeric_scale == 0
assert col.is_float() is False
assert col.is_number() is True
assert col.is_numeric() is True
assert col.is_string() is False
assert col.is_integer() is False
def test_float_from_description(self):
col = SnowflakeColumn.from_description('my_col', 'FLOAT8')
assert col.column == 'my_col'
assert col.dtype == 'FLOAT8'
assert col.char_size is None
assert col.numeric_precision is None
assert col.numeric_scale is None
assert col.is_float() is True
assert col.is_number() is True
assert col.is_numeric() is False
assert col.is_string() is False
assert col.is_integer() is False
``` |
{
"source": "joelluijmes/leveldb-export",
"score": 3
} |
#### File: leveldb-export/leveldb_export/export.py
```python
from io import BytesIO
from typing import Generator, Union
from google.appengine.api.datastore_types import EmbeddedEntity
from google.appengine.datastore import entity_bytes_pb2 as entity_pb2
from google.appengine.api.datastore import Entity
from .records import RecordsReader
from .utils import embedded_entity_to_dict
def parse_entity_field(value):
"""Function for recursive parsing (e.g., arrays)"""
if isinstance(value, EmbeddedEntity):
# Some nested document
return embedded_entity_to_dict(value, {})
if isinstance(value, list):
return [parse_entity_field(x) for x in value]
return value
def parse_leveldb_documents(path_or_fp: Union[str, BytesIO]) -> Generator[dict, None, None]:
"""
Parses a LevelDB file and returns generator of parsed objects. Objects are returned as parsed
documents from the file, and augmented with a _key dict containing id, name, path, kind,
namespace and kind.
Args:
- path_or_fp (str | io.BytesIO): path to local file (if str) or an open file pointer.
Note, if not str it assumes the file is already open and user is responsible
for closing the file.
Raises:
- RuntimeError: if the document contains already contains a _key property.
- InvalidRecordError: if invalid record is encountered.
Returns:
- Generator[dict]: generator returning each distinctive document.
"""
# Open the file if path was provided
if isinstance(path_or_fp, str):
fp = open(path_or_fp, "rb")
else:
fp = path_or_fp
reader = RecordsReader(fp, no_check_crc=True)
for record in reader:
# Read the record as entity
entity_proto = entity_pb2.EntityProto()
entity_proto.ParseFromString(record)
entity = Entity.FromPb(entity_proto)
# Parse the values
data = {}
for name, value in entity.items():
# NOTE: this check is unlikely, if we run into this we could use a different name
# or make it configurable. At least we will be aware when it happens :)
if name == "_key":
raise RuntimeError("Failed to parse document, _key already present.")
data[name] = parse_entity_field(value)
key = entity.key()
data["_key"] = dict(
id=key.id(),
name=key.name(),
namespace=key.namespace(),
app=key.app(),
path="/".join(key.to_path()),
)
yield data
# On completion, possibly close the file pointer if we opened it
if isinstance(path_or_fp, str):
fp.close()
```
#### File: leveldb-export/leveldb_export/records.py
```python
import logging
import struct
import google_crc32c
# Data format is defined @ https://github.com/google/leveldb/blob/master/doc/log_format.md
ENDIANNESS = "little"
CRC_INIT = 0
BLOCK_SIZE = 32 * 1024
HEADER_FORMAT = "<IHB"
HEADER_LENGTH = struct.calcsize(HEADER_FORMAT)
# the type is the "B" part of the HEADER_FORMAT
RECORD_TYPE_LENGTH = struct.calcsize("<B")
RECORD_TYPE_NONE = 0
RECORD_TYPE_FULL = 1
RECORD_TYPE_FIRST = 2
RECORD_TYPE_MIDDLE = 3
RECORD_TYPE_LAST = 4
class InvalidRecordError(Exception):
"""Raised when invalid record encountered."""
class FileReader(object):
"""Interface specification for writers to be used with recordrecords module.
FileReader defines a reader with position and efficient seek/position
determining. All reads occur at current position.
"""
def read(self, size):
"""Read data from file.
Reads data from current position and advances position past the read data
block.
Args:
size: number of bytes to read.
Returns:
iterable over bytes. If number of bytes read is less then 'size' argument,
it is assumed that end of file was reached.
"""
raise NotImplementedError()
def tell(self):
"""Get current file position.
Returns:
current position as a byte offset in the file as integer.
"""
raise NotImplementedError()
_CRC_MASK_DELTA = 0xA282EAD8
def _unmask_crc(masked_crc):
"""Unmask crc.
Args:
masked_crc: masked integer crc.
Retruns:
orignal crc.
"""
rot = (masked_crc - _CRC_MASK_DELTA) & 0xFFFFFFFF
return ((rot >> 17) | (rot << 15)) & 0xFFFFFFFF
class RecordsReader(object):
"""A reader for records format."""
def __init__(self, reader, no_check_crc=True):
self.__reader = reader
self.no_check_crc = no_check_crc
def __try_read_record(self):
"""Try reading a record.
Returns:
(data, record_type) tuple.
Raises:
EOFError: when end of file was reached.
InvalidRecordError: when valid record could not be read.
"""
block_remaining = BLOCK_SIZE - self.__reader.tell() % BLOCK_SIZE
if block_remaining < HEADER_LENGTH:
return ("", RECORD_TYPE_NONE)
header = self.__reader.read(HEADER_LENGTH)
if len(header) != HEADER_LENGTH:
raise EOFError("Read %s bytes instead of %s" % (len(header), HEADER_LENGTH))
(masked_crc, length, record_type) = struct.unpack(HEADER_FORMAT, header)
if length + HEADER_LENGTH > block_remaining:
raise InvalidRecordError("Length is too big")
data = self.__reader.read(length)
if len(data) != length:
raise EOFError("Not enough data read. Expected: %s but got %s" % (length, len(data)))
if record_type == RECORD_TYPE_NONE:
return ("", record_type)
if not self.no_check_crc:
actual_crc = google_crc32c.value(record_type.to_bytes(RECORD_TYPE_LENGTH, ENDIANNESS) + data)
if actual_crc != _unmask_crc(masked_crc):
raise InvalidRecordError("Data crc does not match")
return (data, record_type)
def __sync(self):
"""Skip reader to the block boundary."""
pad_length = BLOCK_SIZE - self.__reader.tell() % BLOCK_SIZE
if pad_length and pad_length != BLOCK_SIZE:
data = self.__reader.read(pad_length)
if len(data) != pad_length:
raise EOFError("Read %d bytes instead of %d" % (len(data), pad_length))
def read(self):
"""Reads record from current position in reader."""
data = None
while True:
last_offset = self.tell()
try:
(chunk, record_type) = self.__try_read_record()
if record_type == RECORD_TYPE_NONE:
self.__sync()
elif record_type == RECORD_TYPE_FULL:
if data is not None:
logging.warning(
"Ordering corruption: Got FULL record while already " "in a chunk at offset %d",
last_offset,
)
return chunk
elif record_type == RECORD_TYPE_FIRST:
if data is not None:
logging.warning(
"Ordering corruption: Got FIRST record while already " "in a chunk at offset %d",
last_offset,
)
data = chunk
elif record_type == RECORD_TYPE_MIDDLE:
if data is None:
logging.warning(
"Ordering corruption: Got MIDDLE record before FIRST " "record at offset %d",
last_offset,
)
else:
data += chunk
elif record_type == RECORD_TYPE_LAST:
if data is None:
logging.warning(
"Ordering corruption: Got LAST record but no chunk is in " "progress at offset %d",
last_offset,
)
else:
result = data + chunk
data = None
return result
else:
raise InvalidRecordError("Unsupported record type: %s" % record_type)
except InvalidRecordError:
logging.warning(
"Invalid record encountered at %s. Syncing to " "the next block",
last_offset,
)
data = None
self.__sync()
def __iter__(self):
"""Iterate through records."""
try:
while True:
yield self.read()
except EOFError:
pass
def tell(self):
"""Return file's current position."""
return self.__reader.tell()
``` |
{
"source": "joelmacey/accelerated-data-pipelines",
"score": 2
} |
#### File: CurationEngine/src/createNewEventRule.py
```python
import traceback
import os
import json
import logging
import boto3
import botocore
from boto3.dynamodb.types import TypeDeserializer
logger = logging.getLogger()
class CreateNewEventRuleException(Exception):
pass
# Subclass of boto's TypeDeserializer for DynamoDB to adjust
# for DynamoDB Stream format.
class StreamTypeDeserializer(TypeDeserializer):
def _deserialize_n(self, value):
return float(value)
def _deserialize_b(self, value):
return value # Already in Base64
def lambda_handler(event, context):
'''
lambda_handler Top level lambda handler ensuring all exceptions
are caught and logged.
:param event: AWS Lambda uses this to pass in event data.
:type event: Python type - Dict / list / int / string / float / None
:param context: AWS Lambda uses this to pass in runtime information.
:type context: LambdaContext
:return: The event object passed into the method
:rtype: Python type - Dict / list / int / string / float / None
:raises CreateNewEventRuleException: On any error or exception
'''
try:
return create_new_event_rule(event, context)
except CreateNewEventRuleException:
raise
except Exception as e:
traceback.print_exc()
raise CreateNewEventRuleException(e)
def create_new_event_rule(event, context):
"""
create_new_event_rule Creates a new event rule and event target in
event bridge to be used in the accelerated data pipelines.
:param event: AWS Lambda uses this to pass in event data.
:type event: Python type - Dict / list / int / string / float / None
:param context: AWS Lambda uses this to pass in runtime information.
:type context: LambdaContext
:return: The event object passed into the method
:rtype: Python type - Dict / list / int / string / float / None
"""
ddb_deserializer = StreamTypeDeserializer()
records = event['Records']
start_curation_process_function_arn = os.environ['START_CURATION_PROCESS_FUNCTION_ARN']
for record in records:
ddb = record['dynamodb']
# Get the event type and curation type for the record
event_name = record['eventName'].upper() # INSERT, MODIFY, REMOVE
if (event_name == 'INSERT') or (event_name == 'MODIFY'):
if 'NewImage' not in ddb:
logger.warning(
'Cannot process stream if it does not contain NewImage')
continue
doc_fields = ddb_deserializer.deserialize({'M': ddb['NewImage']})
curation_type = doc_fields['curationType']
print(f'Creating or modifying event for curationType {curation_type}')
put_rule(curation_type, doc_fields['cronExpression'])
put_target(curation_type, start_curation_process_function_arn)
elif event_name == 'REMOVE':
doc_fields = ddb_deserializer.deserialize({'M': ddb['Keys']})
curation_type = doc_fields['curationType']
print(f'Removing event for curationType {curation_type}')
remove_targets(curation_type)
delete_rule(curation_type)
return 'Success'
def put_rule(curation_type, schedule_expression):
client = boto3.client('events')
response = client.put_rule(
Name=f'{curation_type}-scheduled-curation',
ScheduleExpression=schedule_expression,
State='ENABLED',
Description=f'Event rule for curation type {curation_type}'
)
def delete_rule(curation_type):
client = boto3.client('events')
response = client.delete_rule(
Name=f'{curation_type}-scheduled-curation'
)
def put_target(curation_type, function_arn):
client = boto3.client('events')
input = {"curationType": curation_type}
response = client.put_targets(
Rule=f'{curation_type}-scheduled-curation',
Targets=[
{
'Id': f'{curation_type}-event-target',
'Arn': function_arn,
'Input': json.dumps(input)
}
]
)
def remove_targets(curation_type):
client = boto3.client('events')
response = client.remove_targets(
Rule=f'{curation_type}-scheduled-curation',
Ids=[
f'{curation_type}-event-target',
]
)
```
#### File: CurationEngine/src/recordUnsuccessfulCuration.py
```python
import time
import traceback
import json
import boto3
import os
class RecordUnsuccessfulCurationException(Exception):
pass
def lambda_handler(event, context):
'''
lambda_handler Top level lambda handler ensuring all exceptions
are caught and logged.
:param event: AWS Lambda uses this to pass in event data.
:type event: Python type - Dict / list / int / string / float / None
:param context: AWS Lambda uses this to pass in runtime information.
:type context: LambdaContext
:return: The event object passed into the method
:rtype: Python type - Dict / list / int / string / float / None
:raises RecordUnsuccessfulCurationException: On any error or exception
'''
try:
return record_unsuccessfull_curation(event, context)
except RecordUnsuccessfulCurationException:
raise
except Exception as e:
traceback.print_exc()
raise RecordUnsuccessfulCurationException(e)
def record_unsuccessfull_curation(event, context):
"""
record_unsuccessfull_curation Records the unsuccessful curation
in the curation history table and sends an SNS notification.
:param event: AWS Lambda uses this to pass in event data.
:type event: Python type - Dict / list / int / string / float / None
:param context: AWS Lambda uses this to pass in runtime information.
:type context: LambdaContext
:return: The event object passed into the method
:rtype: Python type - Dict / list / int / string / float / None
"""
record_unsuccessful_curation_in_curation_history(event, context)
send_unsuccessful_curation_sns(event, context)
return event
def record_unsuccessful_curation_in_curation_history(event, context):
'''
record_unsuccessful_curation_in_curation_history Records the unsuccessful
curation in the curation history table.
:param event: AWS Lambda uses this to pass in event data.
:type event: Python type - Dict / list / int / string / float / None
:param context: AWS Lambda uses this to pass in runtime information.
:type context: LambdaContext
'''
dynamodb = boto3.resource('dynamodb')
try:
curationType = event['curationDetails']['curationType']
curation_execution_name = event['curationDetails']['curationExecutionName']
error = event['error-info']['Error']
error_cause = json.loads(event['error-info']['Cause'])
curation_history_table = event["settings"]["curationHistoryTableName"]
if 'stackTrace' in error_cause:
del error_cause['stackTrace']
dynamodb_item = {
'curationType': curationType,
'timestamp': int(time.time() * 1000),
'curationExecutionName': curation_execution_name,
'error': error,
'errorCause': error_cause
}
if 'scriptFileCommitId' in event:
dynamodb_item['scriptFileCommitId'] = event['scriptFileCommitId']
if 'queryOutputLocation' in event['queryDetails']:
dynamodb_item['curationKey'] = event['queryDetails']['queryOutputLocation']
if 'queryExecutionId' in event['queryDetails']:
dynamodb_item['athenaQueryExecutionId'] = event['queryDetails']['queryExecutionId']
if 'curationLocation' in event['curationDetails']:
dynamodb_item['curationOutputLocation'] = event['curationDetails']['curationLocation']
if 'outputDetails' in event:
dynamodb_item['outputDetails'] = event['outputDetails']
if 'glueDetails' in event:
dynamodb_item['glueDetails'] = event['glueDetails']
dynamodb_table = dynamodb.Table(curation_history_table)
dynamodb_table.put_item(Item=dynamodb_item)
except Exception as e:
traceback.print_exc()
raise RecordUnsuccessfulCurationException(e)
def send_unsuccessful_curation_sns(event, context):
'''
send_unsuccessful_curation_sns Sends an SNS notifying subscribers
that curation has failed.
:param event: AWS Lambda uses this to pass in event data.
:type event: Python type - Dict / list / int / string / float / None
:param context: AWS Lambda uses this to pass in runtime information.
:type context: LambdaContext
'''
curationType = event['curationDetails']['curationType']
error = event['error-info']['Error']
error_cause = json.loads(event['error-info']['Cause'])
subject = f'Data Pipeline - curation for {curationType} has failed'
message = f'The curation for {curationType} has failed due to {error} with detail:\n{error_cause}'
if 'SNS_FAILURE_ARN' in os.environ:
failureSNSTopicARN = os.environ['SNS_FAILURE_ARN']
send_sns(failureSNSTopicARN, subject, message)
def send_sns(topic_arn, subject, message):
'''
send_sns Sends an SNS with the given subject and message to the
specified ARN.
:param topic_arn: The SNS ARN to send the notification to
:type topic_arn: Python String
:param subject: The subject of the SNS notification
:type subject: Python String
:param message: The SNS notification message
:type message: Python String
'''
client = boto3.client('sns')
client.publish(TopicArn=topic_arn, Subject=subject, Message=message)
```
#### File: CurationEngine/src/retrieveCurationDetails.py
```python
import traceback
import boto3
class RetrieveCurationDetailsException(Exception):
pass
def get_code_commit_file(repo, filePath):
client = boto3.client('codecommit')
response = client.get_file(
repositoryName=repo,
filePath=filePath
)
return response
def lambda_handler(event, context):
'''
lambda_handler Top level lambda handler ensuring all exceptions
are caught and logged.
:param event: AWS Lambda uses this to pass in event data.
:type event: Python type - Dict / list / int / string / float / None
:param context: AWS Lambda uses this to pass in runtime information.
:type context: LambdaContext
:return: The event object passed into the method
:rtype: Python type - Dict / list / int / string / float / None
:raises RetrieveCurationDetailsException: On any error or exception
'''
try:
return get_curation_details(event, context)
except RetrieveCurationDetailsException:
raise
except Exception as e:
traceback.print_exc()
raise RetrieveCurationDetailsException(e)
def get_curation_details(event, context):
"""
get_file_settings Retrieves the curation details from the
curation details dynamodb table and attaches them to the event
:param event: AWS Lambda uses this to pass in event data.
:type event: Python type - Dict / list / int / string / float / None
:param context: AWS Lambda uses this to pass in runtime information.
:type context: LambdaContext
:return: The event object passed into the method
:rtype: Python type - Dict / list / int / string / float / None
"""
attach_file_settings_to_event(event, context)
return event
def attach_file_settings_to_event(event, context):
'''
attach_file_settings_to_event Attach the configured file settings
to the lambda event.
:param event: AWS Lambda uses this to pass in event data.
:type event: Python type - Dict / list / int / string / float / None
:param context: AWS Lambda uses this to pass in runtime information.
:type context: LambdaContext
'''
dynamodb = boto3.resource('dynamodb')
table = event["settings"]["curationDetailsTableName"]
ddb_table = dynamodb.Table(table)
# Get the item. There can only be one or zero - it is the table's
# partition key - but use strong consistency so we respond instantly
# to any change. This can be revisited if we want to conserve RCUs
# by, say, caching this value and updating it every minute.
response = ddb_table.get_item(
Key={'curationType': event['curationDetails']['curationType']}, ConsistentRead=True)
item = response['Item']
# Retrieve all the details around Athena
athenaDetails = {}
if 'athenaDetails' in item:
athenaDetails['athenaOutputBucket'] = item['athenaDetails']['athenaOutputBucket'] \
if 'athenaOutputBucket' in item['athenaDetails'] \
else None
athenaDetails['athenaOutputFolderPath'] = item['athenaDetails']['athenaOutputFolderPath'] \
if 'athenaOutputFolderPath' in item['athenaDetails'] \
else None
if 'deleteAthenaQueryFile' in item['athenaDetails'] and item['athenaDetails']['deleteAthenaQueryFile'] == False:
athenaDetails['deleteAthenaQueryFile'] = False
else:
athenaDetails['deleteAthenaQueryFile'] = True
if 'deleteMetadataFile' in item['athenaDetails'] and item['athenaDetails']['deleteMetadataFile'] == False:
athenaDetails['deleteMetadataFileBool'] = False
else:
athenaDetails['deleteMetadataFileBool'] = True
else:
athenaDetails = {
"athenaOutputBucket": None,
"athenaOutputFolderPath": None,
"deleteAthenaQueryFile": True,
"deleteMetadataFileBool": True
}
# Retrieve all the details around the output of the file
outputDetails = {}
outputDetails['outputFilename'] = item['outputDetails']['filename'] \
if 'filename' in item['outputDetails'] \
else None
outputDetails['outputFolderPath'] = item['outputDetails']['outputFolderPath'] \
if 'outputFolderPath' in item['outputDetails'] \
else None
if 'includeTimestampInFilename' in item['outputDetails'] and item['outputDetails']['includeTimestampInFilename'] == True:
outputDetails['includeTimestampInFilenameBool'] = True
else:
outputDetails['includeTimestampInFilenameBool'] = False
outputDetails['metadata'] = item['outputDetails']['metadata'] \
if 'metadata' in item['outputDetails'] \
else None
outputDetails['tags'] = item['outputDetails']['tags'] \
if 'tags' in item['outputDetails'] \
else None
outputDetails['outputBucket'] = item['outputDetails']['outputBucket']
event.update({'scriptFilePath': item['sqlFilePath']})
event.update({'glueDetails': item['glueDetails']})
event.update({'athenaDetails': athenaDetails})
event.update({'outputDetails': outputDetails})
code_commit_res = get_code_commit_file(event['settings']['scriptsRepo'], event['scriptFilePath'])
event.update({'scriptFileCommitId':code_commit_res['commitId']})
``` |
{
"source": "joelmatejka/double-sided-g-code",
"score": 3
} |
#### File: joelmatejka/double-sided-g-code/convert.py
```python
import argparse
import os.path
from pygcode import *
def append_code_from_file(filename, output_gcode, last_file):
output_gcode.append(Line("; start of file " + filename))
with open(filename, 'r') as fh:
for line_text in fh.readlines():
line = Line(line_text)
for code in line.block.gcodes:
if code == GCodeEndProgram():
# Hopefully only M02 can break continuity
# also hope that there is only M02 on the last line
output_gcode.append(Line("; end of file " + filename))
return
output_gcode.append(line)
output_gcode.append(Line("; end of file " + filename))
def append_rotation(output_gcode, angle, speed):
output_gcode.append(Line("; rotate to " + str(angle)))
output_gcode.append(Line(str(GCodeLinearMove(A=angle)) + " " + str(GCodeFeedRate(speed))))
def append_move_xy(output_gcode, x, y, speed):
output_gcode.append(Line("; move to x: " + str(x) + " y: " + str(y)))
output_gcode.append(Line(str(GCodeLinearMove(X=x, Y=y)) + " " + str(GCodeFeedRate(speed))))
def append_move_z(output_gcode, z, speed):
output_gcode.append(Line("; move to z: " + str(z)))
output_gcode.append(Line(str(GCodeLinearMove(Z=z)) + " " + str(GCodeFeedRate(speed))))
parser = argparse.ArgumentParser(
description='Simple G-code converter utility for conversion of two input files into one double-sided')
def files(s):
try:
filename, angle = s.split(':')
angle = int(angle)
if not os.path.isfile(filename):
raise ValueError("Path to file is not correct: " + filename)
return filename, angle
except:
raise argparse.ArgumentTypeError("G-code file list must be filename:angle")
def coordinates(s):
try:
x, y, z = map(int, s.split(':'))
return x, y, z
except:
raise argparse.ArgumentTypeError("Safe position must be x:y:z")
# hack how not to show required arguments in optional where are all non-positional by default
required_arguments = parser.add_argument_group('required arguments')
required_arguments.add_argument('--files_input', help="G-code files with angles of rotation in deg filename:angle",
dest="files_input", type=files, nargs='+', required=True)
required_arguments.add_argument('--file_output', help="Output G-code", dest="file_output", required=True)
required_arguments.add_argument('--safe_position', help="Position (X,Y,Z) where we can safely rotate A axis",
dest="safe_position", type=coordinates, required=True)
required_arguments.add_argument('--speed_xy', help="Speed of X and Y axis for movement into safe position",
dest="speed_xy", type=int, required=True)
required_arguments.add_argument('--speed_z', help="Speed of Z axis for movement into safe position", dest="speed_z",
type=int, required=True)
required_arguments.add_argument('--speed_a', help="Speed of A axis in deg/min", dest="speed_a", type=int, required=True)
argres = parser.parse_args()
safe_x, safe_y, safe_z = argres.safe_position
speed_xy = argres.speed_xy
speed_z = argres.speed_z
speed_a = argres.speed_a
output_gcode = []
output_gcode.append(Line("; this code was generated by A-axis G-code generator available at: https://github.com/joelmatejka/a-axis-g-code-generator"))
output_gcode.append(Line("; arguments passed to the script are following: " + str(argres)))
for (filename, angle) in argres.files_input:
append_move_z(output_gcode, safe_z, speed_z)
append_move_xy(output_gcode, safe_x, safe_y, speed_xy)
append_rotation(output_gcode, angle, speed_a)
append_code_from_file(filename, output_gcode, False)
# TODO: shall we move at the and to zero A position?
# append_rotation(output_gcode, 0, speed_a)
output_gcode.append(GCodeEndProgram())
# print('\n'.join(str(line) for line in output_gcode))
with open(argres.file_output, 'w') as fh:
for line in output_gcode:
fh.write(str(line) + '\n')
``` |
{
"source": "Joel-max-s/pycryptobot",
"score": 3
} |
#### File: models/config/logger_parser.py
```python
import re
from .default_parser import isCurrencyValid, defaultConfigParse, merge_config_and_args
from models.helper.LogHelper import Logger
def parser(app, logger_config):
#print('Logger Configuration parser')
if not logger_config:
raise Exception('There is an error in your config dictionnary')
if not app:
raise Exception('No app is passed')
if 'filelog' in logger_config:
if isinstance(logger_config['filelog'], int):
if logger_config['filelog'] in [0, 1]:
app.filelog = logger_config['filelog']
else:
raise TypeError('filelog must be type of int')
if app.filelog:
if 'logfile' in logger_config:
if isinstance(logger_config['logfile'], str):
if app.logfile == "pycryptobot.log":
app.logfile = logger_config['logfile']
else:
raise TypeError('logfile must be type of str')
if 'fileloglevel' in logger_config:
if isinstance(logger_config['fileloglevel'], str):
if logger_config['fileloglevel'] in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'):
app.fileloglevel = logger_config['fileloglevel']
else:
raise TypeError('fileloglevel must be one of: "CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"')
else:
raise TypeError('fileloglevel must be type of str')
if 'consolelog' in logger_config:
if isinstance(logger_config['consolelog'], int):
if logger_config['consolelog'] in [0, 1]:
app.consolelog = logger_config['consolelog']
else:
raise TypeError('consolelog must be type of int')
if app.consolelog:
if 'consoleloglevel' in logger_config:
if isinstance(logger_config['consoleloglevel'], str):
if logger_config['consoleloglevel'] in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'):
app.consoleloglevel = logger_config['consoleloglevel']
else:
raise TypeError('consoleloglevel must be one of: "CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"')
else:
raise TypeError('consoleloglevel must be type of str')
```
#### File: tests/unit_tests/test_config.py
```python
import sys
import pytest
sys.path.append('.')
# pylint: disable=import-error
from models.PyCryptoBot import PyCryptoBot
app = PyCryptoBot()
def test_get_version_from_readme():
global app
version = app.getVersionFromREADME()
assert version != 'v0.0.0'
```
#### File: tests/unit_tests/test_exchange_enum.py
```python
import sys
import pytest
from models.exchange.ExchangesEnum import Exchange
sys.path.append('.')
def test_enum_value_is_correct_for_binance():
assert Exchange.BINANCE.value == "binance"
def test_enum_value_is_correct_for_coinbasepro():
assert Exchange.COINBASEPRO.value == "coinbasepro"
def test_enum_value_is_correct_for_kucoin():
assert Exchange.KUCOIN.value == "kucoin"
def test_enum_value_is_correct_for_dummy():
assert Exchange.DUMMY.value == "dummy"
def test_converting_string_to_enum():
assert Exchange("binance") == Exchange.BINANCE
assert Exchange("coinbasepro") == Exchange.COINBASEPRO
assert Exchange("kucoin") == Exchange.KUCOIN
def test_exception_thrown_when_invalid_value():
with pytest.raises(ValueError) as exc_info:
Exchange("xxx")
assert exc_info.type is ValueError
assert exc_info.value.args[0] == "'xxx' is not a valid Exchange"
```
#### File: tests/unit_tests/test_margin_calculation.py
```python
import sys
sys.path.append('.')
from models.helper.MarginHelper import calculate_margin
from models.helper.LogHelper import Logger
Logger.configure()
def test_margin_binance():
# using VET-GBP
buy_filled= 595.23
buy_price = 0.1249
buy_size = 74.344227 # round(buy_filled * buy_price, 8)
buy_fee = 0.07434423 # round(buy_size * 0.001, 8)
sell_percent = 100
sell_price = 0.1335
sell_size = 79.463205 # round((sell_percent / 100) * (sell_price * buy_filled), 8)
sell_taker_fee = 0.001
sell_filled = 79.38374179 # round((sell_size - round(sell_size * sell_taker_fee, 8)), 8)
sell_fee = 0.0
expected_sell_fee = 0.07946321 # round(sell_size * sell_taker_fee, 8)
expected_profit = 5.03951479 # round(sell_filled - buy_size, 8)
expected_margin = 6.77862289 # round((expected_profit / buy_size) * 100, 8)
actual_margin, actual_profit, actual_sell_fee = calculate_margin(buy_size, buy_filled, buy_price, buy_fee,
sell_percent, sell_price, sell_fee, sell_taker_fee)
assert round(actual_margin, 8) == round(expected_margin, 8)
assert round(actual_profit, 8) == round(expected_profit, 8)
assert round(actual_sell_fee, 8) == round(expected_sell_fee, 8)
def test_margin_coinbase_pro():
# using LINK-GBP
buy_filled = 8.95
buy_price = 34.50004
buy_size = 308.775358 # round(buy_filled * buy_price, 8)
buy_fee = 1.54387679 # round(buy_size * 0.005, 8)
sell_percent = 100
sell_price = 30.66693
sell_size = 274.4690235 # round((sell_percent / 100) * (sell_price * buy_filled), 8)
sell_taker_fee = 0.0035
sell_filled = 273.50838192 # round((sell_size - round(sell_size * sell_taker_fee, 8)), 8)
sell_fee = 0.0
expected_sell_fee = 0.96064158 # round(sell_size * sell_taker_fee, 8)
expected_profit = -35.26697608 # round(sell_filled - buy_size, 8)
expected_margin = -11.42156431 # round((expected_profit / buy_size) * 100, 8)
actual_margin, actual_profit, actual_sell_fee = calculate_margin(buy_size, buy_filled, buy_price, buy_fee,
sell_percent, sell_price, sell_fee, sell_taker_fee)
assert round(actual_margin, 8) == round(expected_margin, 8)
assert round(actual_profit, 8) == round(expected_profit, 8)
assert round(actual_sell_fee, 8) == round(expected_sell_fee, 8)
def test_calculate_negative_margin_on_binance_when_coin_price_over_1():
# this test using BNB-USDT as market
buy_filled = 0.067
buy_price = 667.26
buy_size = 44.70642 # round(buy_filled * buy_price, 8)
buy_fee = 0.04470642 # round(buy_size * 0.001, 8)
sell_percent = 100
sell_price = 590.34
sell_size = 39.55278 # round((sell_percent / 100) * (sell_price * buy_filled), 8)
sell_taker_fee = 0.001
sell_filled = 39.51322722 # round((sell_size - round(sell_size * sell_taker_fee, 8)), 8)
sell_fee = 0.0
expected_sell_fee = 0.03955278 # round(sell_size * sell_taker_fee, 8)
expected_profit = -5.19319278 # round(sell_filled - buy_size, 8)
expected_margin = -11.61621257 # round((expected_profit / buy_size) * 100, 8)
actual_margin, actual_profit, actual_sell_fee = calculate_margin(buy_size, buy_filled, buy_price, buy_fee,
sell_percent, sell_price, sell_fee, sell_taker_fee)
assert round(actual_margin, 8) == round(expected_margin, 8)
assert round(actual_profit, 8) == round(expected_profit, 8)
assert round(actual_sell_fee, 8) == round(expected_sell_fee, 8)
def test_calculate_negative_margin_on_binance_when_coin_price_over_1_2():
buy_filled = 0.24944
buy_price = 385.4
buy_size = 96.134176 # round(buy_filled * buy_price, 8)
buy_fee = 0.09613418 # round(buy_size * 0.001, 8)
sell_percent = 100
sell_price = 320.95
sell_size = 80.057768 # round((sell_percent / 100) * (sell_price * buy_filled), 8)
sell_taker_fee = 0.001
sell_filled = 79.97771023 # round((sell_size - round(sell_size * sell_taker_fee, 8)), 8)
sell_fee = 0.0
expected_sell_fee = 0.08005777 # round(sell_size * sell_taker_fee, 8)
expected_profit = -16.15646577 # round(sell_filled - buy_size, 8)
expected_margin = -16.80616243 # round((expected_profit / buy_size) * 100, 8)
actual_margin, actual_profit, actual_sell_fee = calculate_margin(buy_size, buy_filled, buy_price, buy_fee,
sell_percent, sell_price, sell_fee, sell_taker_fee)
assert round(actual_margin, 8) == round(expected_margin, 8)
assert round(actual_profit, 8) == round(expected_profit, 8)
assert round(actual_sell_fee, 8) == round(expected_sell_fee, 8)
def test_calculate_negative_margin_on_binance_when_coin_price_under_1():
# this test is using CHZ-USDT as market
buy_filled = 177.2
buy_price = 0.4968600000000001
buy_size = 88.043592 # round(buy_filled * buy_price, 8)
buy_fee = 0.1772 # buy_filled(buy_size * 0.001, 8)
sell_percent = 100
sell_price = 0.43913
sell_size = 77.813836 # round((sell_percent / 100) * (sell_price * buy_filled), 8)
sell_taker_fee = 0.001
sell_filled = 77.73602216 # round((sell_size - round(sell_size * sell_taker_fee, 8)), 8)
sell_fee = 0.0
expected_sell_fee = 0.07781384 # round(sell_size * sell_taker_fee, 8)
expected_profit = -10.30756984 # round(sell_filled - buy_size, 8)
expected_margin = -11.70734815 # round((expected_profit / buy_size) * 100, 8)
actual_margin, actual_profit, actual_sell_fee = calculate_margin(buy_size, buy_filled, buy_price, buy_fee,
sell_percent, sell_price, sell_fee, sell_taker_fee)
assert round(actual_margin, 8) == round(expected_margin, 8)
assert round(actual_profit, 8) == round(expected_profit, 8)
assert round(actual_sell_fee, 8) == round(expected_sell_fee, 8)
def test_binance_microtrading_1():
# this test is using CHZ-USDT as market
buy_filled = 99.9
buy_price = 10.0
buy_size = 1000 # round(buy_filled * buy_price, 8)
buy_fee = 1 # round(buy_size * 0.001, 8)
sell_percent = 100
sell_price = 10.0
sell_size = 999.0 # round((sell_percent / 100) * (sell_price * buy_filled), 8)
sell_taker_fee = 0.001
sell_filled = 998.001 # round((sell_size - round(sell_size * sell_taker_fee, 8)), 8)
sell_fee = 0.0
expected_sell_fee = 0.999 # round(sell_size * sell_taker_fee, 8)
expected_profit = -1.999 # round(sell_filled - buy_size, 8)
expected_margin = -0.1999 # round((expected_profit / buy_size) * 100, 8)
actual_margin, actual_profit, actual_sell_fee = calculate_margin(buy_size, buy_filled, buy_price, buy_fee,
sell_percent, sell_price, sell_fee, sell_taker_fee)
assert round(actual_margin, 8) == round(expected_margin, 8)
assert round(actual_profit, 8) == round(expected_profit, 8)
assert round(actual_sell_fee, 8) == round(expected_sell_fee, 8)
def test_binance_microtrading_2():
# this test is using CHZ-USDT as market
buy_filled = 99.9
buy_price = 10.0
buy_size = 1000 # round(buy_filled * buy_price, 8)
buy_fee = 1 # round(buy_size * 0.001, 8)
sell_percent = 100
sell_price = 10.0001
sell_size = 999.00999 # round((sell_percent / 100) * (sell_price * buy_filled), 8)
sell_taker_fee = 0.001
sell_filled = 998.01098001 # round((sell_size - round(sell_size * sell_taker_fee, 8)), 8)
sell_fee = 0.0
expected_sell_fee = 0.99900999 # round(sell_size * sell_taker_fee, 8)
expected_profit = -1.98901999 # round(sell_filled - buy_size, 8)
expected_margin = -0.198902 # round((expected_profit / buy_size) * 100, 8)
actual_margin, actual_profit, actual_sell_fee = calculate_margin(buy_size, buy_filled, buy_price, buy_fee,
sell_percent, sell_price, sell_fee, sell_taker_fee)
assert round(actual_margin, 8) == round(expected_margin, 8)
assert round(actual_profit, 8) == round(expected_profit, 8)
assert round(actual_sell_fee, 8) == round(expected_sell_fee, 8)
def test_binance_microtrading_3():
# this test is using CHZ-USDT as market
buy_filled = 99.9
buy_price = 10.0
buy_size = 1000 # round(buy_filled * buy_price, 8)
buy_fee = 1 # round(buy_size * 0.001, 8)
sell_percent = 100
sell_price = 10.01
sell_size = 999.999 # round((sell_percent / 100) * (sell_price * buy_filled), 8)
sell_taker_fee = 0.001
sell_filled = 998.999001 # round((sell_size - round(sell_size * sell_taker_fee, 8)), 8)
sell_fee = 0.0
expected_sell_fee = 0.999999 # round(sell_size * sell_taker_fee, 8)
expected_profit = -1.000999 # round(sell_filled - buy_size, 8)
expected_margin = -0.1000999 # round((expected_profit / buy_size) * 100, 8)
actual_margin, actual_profit, actual_sell_fee = calculate_margin(buy_size, buy_filled, buy_price, buy_fee,
sell_percent, sell_price, sell_fee, sell_taker_fee)
assert round(actual_margin, 8) == round(expected_margin, 8)
assert round(actual_profit, 8) == round(expected_profit, 8)
assert round(actual_sell_fee, 8) == round(expected_sell_fee, 8)
def test_binance_microtrading_zero_margin():
# this test is using CHZ-USDT as market
buy_filled = 99.9
buy_price = 10.0
buy_size = 1000 # round(buy_filled * buy_price, 8)
buy_fee = 1 # round(buy_size * 0.001, 8)
sell_percent = 100
sell_price = 10.02003004
sell_size = 1001.001001 # round((sell_percent / 100) * (sell_price * buy_filled), 8)
sell_taker_fee = 0.001
sell_filled = 1000.0 # round((sell_size - round(sell_size * sell_taker_fee, 8)), 8)
sell_fee = 0.0
expected_sell_fee = 1.001001 # round(sell_size * sell_taker_fee, 8)
expected_profit = 0.0 # round(sell_filled - buy_size, 8)
expected_margin = 0.0 # round((expected_profit / buy_size) * 100, 8)
actual_margin, actual_profit, actual_sell_fee = calculate_margin(buy_size, buy_filled, buy_price, buy_fee,
sell_percent, sell_price, sell_fee, sell_taker_fee)
assert round(actual_margin, 8) == round(expected_margin, 8)
assert round(actual_profit, 8) == round(expected_profit, 8)
assert round(actual_sell_fee, 8) == round(expected_sell_fee, 8)
def test_binance_microtrading_BNB_max_fee_test():
# this test is using CHZ-USDT as market
buy_size = 1000 # round(buy_filled * buy_price, 8)
buy_fee = 0.75 # round(buy_size * 0.00075, 8)
buy_price = 10.0
buy_filled = 99.925
sell_percent = 100
sell_price = 10.016
sell_size = 1000.8488 # round((sell_percent / 100) * (sell_price * buy_filled), 8)
sell_taker_fee = 0.00075
sell_filled = 1000.0981634 # round((sell_size - round(sell_size * sell_taker_fee, 8)), 8)
sell_fee = 0.0
expected_sell_fee = 0.7506366 # round(sell_size * sell_taker_fee, 8)
expected_profit = 0.0981634 # round(sell_filled - buy_size, 8)
expected_margin = 0.00981634 # round((expected_profit / buy_size) * 100, 8)
actual_margin, actual_profit, actual_sell_fee = calculate_margin(buy_size, buy_filled, buy_price, buy_fee,
sell_percent, sell_price, sell_fee, sell_taker_fee)
assert round(actual_margin, 8) == round(expected_margin, 8)
assert round(actual_profit, 8) == round(expected_profit, 8)
assert round(actual_sell_fee, 8) == round(expected_sell_fee, 8)
def test_binance_microtrading_USDTRY_01():
# this test is using CHZ-USDT as market
buy_size = 9021.17796 # round(buy_filled * buy_price, 8)
buy_fee = 9.02117796 # round(buy_size * 0.001, 8)
buy_price = 8.628
buy_filled = 1045.57
sell_percent = 100
sell_price = 8.639
sell_size = 9032.67923 # round((sell_percent / 100) * (sell_price * buy_filled), 8)
sell_taker_fee = 0.001
sell_filled = 9023.64655077 # round((sell_size - round(sell_size * sell_taker_fee, 8)), 8)
sell_fee = 9.03267923
expected_sell_fee = 9.03267923 # round(sell_size * sell_taker_fee, 8)
expected_profit = 2.46859077 # round(sell_filled - buy_size, 8)
expected_margin = 0.02736439 # round((expected_profit / buy_size) * 100, 8)
actual_margin, actual_profit, actual_sell_fee = calculate_margin(buy_size, buy_filled, buy_price, buy_fee,
sell_percent, sell_price, sell_fee, sell_taker_fee)
assert round(actual_margin, 8) == round(expected_margin, 8)
assert round(actual_profit, 8) == round(expected_profit, 8)
assert round(actual_sell_fee, 8) == round(expected_sell_fee, 8)
```
#### File: websvc/app/__init__.py
```python
import sys
from flask import Flask, send_from_directory
from websvc.app.pages import Pages
app = Flask(__name__, static_url_path="")
pages = Pages()
@app.route("/")
def exchanges():
return Pages.exchanges()
@app.route("/css/<path:path>")
def send_js(path):
return send_from_directory("css", path)
@app.route("/js/<path:path>")
def send_css(path):
return send_from_directory("js", path)
@app.route("/binance")
def binance():
return Pages.binance_markets()
@app.route("/binance/<market>")
def binance_market(market):
return Pages.technical_analysis('binance', market, '15m', '1h', '6h')
@app.route("/coinbasepro")
def coinbasepro():
return Pages.coinbasepro_markets()
@app.route("/coinbasepro/<market>")
def coinbasepro_market(market):
return Pages.technical_analysis('coinbasepro', market, 900, 3600, 21600)
``` |
{
"source": "joel-mb/Scenic",
"score": 3
} |
#### File: scenic/core/type_support.py
```python
import sys
import inspect
import numbers
import typing
from scenic.core.distributions import (Distribution, RejectionException, StarredDistribution,
distributionFunction)
from scenic.core.lazy_eval import (DelayedArgument, valueInContext, requiredProperties,
needsLazyEvaluation, toDelayedArgument)
from scenic.core.vectors import Vector
from scenic.core.errors import RuntimeParseError, saveErrorLocation
# Typing and coercion rules:
#
# coercible to a scalar:
# instances of numbers.Real (by calling float())
# coercible to a heading:
# anything coercible to a scalar
# anything with a toHeading() method
# coercible to a Vector:
# tuples/lists of length 2
# anything with a toVector() method
# coercible to an object of type T:
# instances of T
#
# Finally, Distributions are coercible to T iff their valueType is.
## Basic types
class Heading(float):
"""Dummy class used as a target for type coercions to headings."""
pass
def underlyingType(thing):
"""What type this value ultimately evaluates to, if we can tell."""
if isinstance(thing, Distribution):
return thing.valueType
elif isinstance(thing, TypeChecker) and len(thing.types) == 1:
return thing.types[0]
else:
return type(thing)
def isA(thing, ty):
"""Does this evaluate to a member of the given Scenic type?"""
return issubclass(underlyingType(thing), ty)
def unifyingType(opts): # TODO improve?
"""Most specific type unifying the given types."""
types = []
for opt in opts:
if isinstance(opt, StarredDistribution):
ty = underlyingType(opt)
typeargs = typing.get_args(ty)
if typeargs == ():
types.append(ty)
else:
for ty in typeargs:
if ty is not Ellipsis:
types.append(ty)
else:
types.append(underlyingType(opt))
if all(issubclass(ty, numbers.Real) for ty in types):
return float
mro = inspect.getmro(types[0])
for parent in mro:
if all(issubclass(ty, parent) for ty in types):
return parent
raise RuntimeError(f'broken MRO for types {types}')
## Type coercions (for internal use -- see the type checking API below)
def canCoerceType(typeA, typeB):
"""Can values of typeA be coerced into typeB?"""
import scenic.syntax.veneer as veneer # TODO improve
if typing.get_origin(typeA) is typing.Union:
# only raise an error now if none of the possible types will work;
# we'll do more careful checking at runtime
return any(canCoerceType(ty, typeB) for ty in typing.get_args(typeA))
if typeB is float:
return issubclass(typeA, numbers.Real)
elif typeB is Heading:
return canCoerceType(typeA, float) or hasattr(typeA, 'toHeading')
elif typeB is Vector:
return issubclass(typeA, (tuple, list)) or hasattr(typeA, 'toVector')
elif typeB is veneer.Behavior:
return issubclass(typeA, typeB) or typeA in (type, type(None))
else:
return issubclass(typeA, typeB)
def canCoerce(thing, ty):
"""Can this value be coerced into the given type?"""
tt = underlyingType(thing)
if canCoerceType(tt, ty):
return True
elif isinstance(thing, Distribution) and tt is object:
return True # fall back on type-checking at runtime
else:
return False
def coerce(thing, ty, error='wrong type'):
"""Coerce something into the given type."""
assert canCoerce(thing, ty), (thing, ty)
import scenic.syntax.veneer as veneer # TODO improve?
realType = ty
if ty is float:
coercer = coerceToFloat
elif ty is Heading:
coercer = coerceToHeading
ty = numbers.Real
realType = float
elif ty is Vector:
coercer = coerceToVector
elif ty is veneer.Behavior:
coercer = coerceToBehavior
else:
coercer = None
if isinstance(thing, Distribution):
vt = thing.valueType
if typing.get_origin(vt) is typing.Union:
possibleTypes = typing.get_args(vt)
else:
possibleTypes = (vt,)
if all(issubclass(possible, ty) for possible in possibleTypes):
return thing # no coercion necessary
else:
return TypecheckedDistribution(thing, realType, error, coercer=coercer)
elif coercer:
try:
return coercer(thing)
except CoercionFailure as e:
raise RuntimeParseError(f'{error} ({e.args[0]})') from None
else:
return thing
class CoercionFailure(Exception):
pass
def coerceToFloat(thing) -> float:
return float(thing)
def coerceToHeading(thing) -> float:
if hasattr(thing, 'toHeading'):
return thing.toHeading()
return float(thing)
def coerceToVector(thing) -> Vector:
if isinstance(thing, (tuple, list)):
l = len(thing)
if l != 2:
raise CoercionFailure('expected 2D vector, got '
f'{type(thing).__name__} of length {l}')
return Vector(*thing)
else:
return thing.toVector()
def coerceToBehavior(thing):
import scenic.syntax.veneer as veneer # TODO improve
if thing is None or isinstance(thing, veneer.Behavior):
return thing
else:
assert issubclass(thing, veneer.Behavior)
return thing()
class TypecheckedDistribution(Distribution):
def __init__(self, dist, ty, errorMessage, coercer=None):
super().__init__(dist, valueType=ty)
self.dist = dist
self.errorMessage = errorMessage
self.coercer = coercer
self.loc = saveErrorLocation()
def sampleGiven(self, value):
val = value[self.dist]
suffix = None
if self.coercer:
if canCoerceType(type(val), self.valueType):
try:
return self.coercer(val)
except CoercionFailure as e:
suffix = f' ({e.args[0]})'
elif isinstance(val, self.valueType):
return val
if suffix is None:
suffix = f' (expected {self.valueType.__name__}, got {type(val).__name__})'
raise RuntimeParseError(self.errorMessage + suffix, self.loc)
def conditionTo(self, value):
self.dist.conditionTo(value)
def __repr__(self):
return f'TypecheckedDistribution({self.dist}, {self.valueType})'
def coerceToAny(thing, types, error):
"""Coerce something into any of the given types, printing an error if impossible."""
for ty in types:
if canCoerce(thing, ty):
return coerce(thing, ty, error)
from scenic.syntax.veneer import verbosePrint
verbosePrint(f'Failed to coerce {thing} of type {underlyingType(thing)} to {types}',
file=sys.stderr)
raise RuntimeParseError(error)
## Top-level type checking/conversion API
def toTypes(thing, types, typeError='wrong type'):
"""Convert something to any of the given types, printing an error if impossible."""
if needsLazyEvaluation(thing):
# cannot check the type now; create proxy object to check type after evaluation
return TypeChecker(thing, types, typeError)
else:
return coerceToAny(thing, types, typeError)
def toType(thing, ty, typeError='wrong type'):
"""Convert something to a given type, printing an error if impossible."""
return toTypes(thing, (ty,), typeError)
def toScalar(thing, typeError='non-scalar in scalar context'):
"""Convert something to a scalar, printing an error if impossible."""
return toType(thing, float, typeError)
def toHeading(thing, typeError='non-heading in heading context'):
"""Convert something to a heading, printing an error if impossible."""
return toType(thing, Heading, typeError)
def toVector(thing, typeError='non-vector in vector context'):
"""Convert something to a vector, printing an error if impossible."""
return toType(thing, Vector, typeError)
def evaluateRequiringEqualTypes(func, thingA, thingB, typeError='type mismatch'):
"""Evaluate the func, assuming thingA and thingB have the same type.
If func produces a lazy value, it should not have any required properties beyond
those of thingA and thingB."""
if not needsLazyEvaluation(thingA) and not needsLazyEvaluation(thingB):
if underlyingType(thingA) is not underlyingType(thingB):
raise RuntimeParseError(typeError)
return func()
else:
# cannot check the types now; create proxy object to check types after evaluation
return TypeEqualityChecker(func, thingA, thingB, typeError)
## Proxy objects for lazy type checking
class TypeChecker(DelayedArgument):
"""Checks that a given lazy value has one of a given list of types."""
def __init__(self, arg, types, error):
def check(context):
val = arg.evaluateIn(context)
return coerceToAny(val, types, error)
super().__init__(requiredProperties(arg), check)
self.inner = arg
self.types = types
def __str__(self):
return f'TypeChecker({self.inner},{self.types})'
class TypeEqualityChecker(DelayedArgument):
"""Lazily evaluates a function, after checking that two lazy values have the same type."""
def __init__(self, func, checkA, checkB, error):
props = requiredProperties(checkA) | requiredProperties(checkB)
def check(context):
ca = valueInContext(checkA, context)
cb = valueInContext(checkB, context)
if underlyingType(ca) is not underlyingType(cb):
raise RuntimeParseError(error)
return valueInContext(func(), context)
super().__init__(props, check)
self.inner = func
self.checkA = checkA
self.checkB = checkB
def __str__(self):
return f'TypeEqualityChecker({self.inner},{self.checkA},{self.checkB})'
``` |
{
"source": "joelmeyerson/autopick-bc",
"score": 3
} |
#### File: joelmeyerson/autopick-bc/box_to_star.py
```python
import os
import shutil
import argparse
import sys
import re
import pathlib
import math
def main():
# parse arguments
parser = argparse.ArgumentParser(description='Convert box file to star file.')
parser.add_argument('-f', '--boxfile', type=str, help='box file name', required=True)
parser.add_argument('-o', '--starfile', type=str, help='star file name end in _path.star', required=True)
args = parser.parse_args()
# check that input file exists
if os.path.isfile(args.boxfile) == False:
print("Input box file does not exist. Exiting.")
exit()
# check that path for output file exists
outpath = pathlib.Path(args.starfile).parent
if (os.path.exists(outpath) == False):
print("Output file path does not exist. Exiting.")
exit()
# get lines in box and store in list
boxes = []
with open(args.boxfile, "r") as openfile:
for line in openfile:
if bool(re.search(r'\d', str(line))):
boxes.append(line.split())
# get boxsize from first line in box file
boxsize = int(boxes[0][3])
with open(args.starfile, "w") as starfile:
starfile.writelines('\n')
starfile.writelines('# version 30001\n')
starfile.writelines('\n')
starfile.writelines('data_\n')
starfile.writelines('\n')
starfile.writelines('loop_\n')
starfile.writelines('_rlnCoordinateX #1\n')
starfile.writelines('_rlnCoordinateY #2\n')
starfile.writelines('_rlnClassNumber #3\n')
starfile.writelines('_rlnAnglePsi #4\n')
starfile.writelines('_rlnAutopickFigureOfMerit #5\n')
for box in boxes:
# box coord system is in bottom left of box, move to center for star format
box_x = int(box[0]) + boxsize/2
box_y = int(box[1]) + boxsize/2
starfile.writelines("%s" % str(format(box_x, '.1f').rjust(10)) + ' ' + str(format(box_y, '.1f').rjust(10)) + ' 2 -999.00000 -999.00000\n')
print("\nSuccessfully generated star file.")
if __name__ == "__main__":
main()
```
#### File: joelmeyerson/autopick-bc/extract_particles.py
```python
import sys
import re
import numpy as np
import mrcfile
from PIL import Image
from progress.bar import Bar
def extract(projpath, starfile, train_dir, test_dir, good_or_bad):
# extract name of image stack and index for image slice
files = []
with open(starfile, "r") as openfile:
for line in openfile:
# find the header entry which contains column number that stores image name
if re.search(r'_rlnImageName', line):
img_column = int(str(line.split()[1]).strip("#"))
# use keywords to find lines with image names
if re.search(r'(mrc | mrcs)', line):
column_str = line.split()[img_column-1]
path = projpath + "/" + str((column_str.split("@", 1)[1]))
img = str(path.split("/")[-1])
idx = int((column_str.split("@", 1)[0].lstrip("0")))
#store path, stack name, and image slice index
files.append([path, img, idx])
# store image slices in a tensor then write images to disk
files = np.array(files)
num_file = int(len(files))
# set fractions to use for training and testing (validation data fraction set within TF)
num_file = int(len(files))
train_fraction = round(num_file * 0.8)
# create progress bar
bar = Bar('Extracting ' + str(num_file) + ' ' + good_or_bad + ' particles:', fill='#', suffix='%(percent)d%%', max=num_file)
# extract each particle from its MRC stack and convert to PNG
for i in range(num_file):
mrc_path = files[i, 0]
mrc_file = files[i, 1]
mrc_slice = int(files[i, 2]) - 1
mrc = mrcfile.open(mrc_path, mode=u'r', permissive=False, header_only=False)
# handle case where mrc stack has only one image in the stack so has only two dimensions
if mrc.data.ndim == 2:
img_array = np.flip(mrc.data[:, :])
else: # it has three dimensions
img_array = np.flip(mrc.data[mrc_slice, :, :])
img_array = np.flip(mrc.data[mrc_slice, :, :], axis=0)
img_array = img_array + abs(img_array.min()) # make all 32 bit floating point pixel values >= 0
img_array /= img_array.max() # normalize all pixels between 0 and 1
img_array *= 255 # normalize all pixels between 0 and 255
# write image
mrc_base_name = mrc_file.split('.', 1)[0] # get file base name
if i <= train_fraction:
Image.fromarray(img_array).convert("L").save(train_dir + '/' + mrc_base_name + '-' + str(mrc_slice) + '.png')
else:
Image.fromarray(img_array).convert("L").save(test_dir + '/' + mrc_base_name + '-' + str(mrc_slice) + '.png')
mrc.close()
bar.next()
bar.finish()
```
#### File: joelmeyerson/autopick-bc/gen_model.py
```python
import os
import shutil
import argparse
import numpy as np
import matplotlib.pyplot as plt
import random
import json
from PIL import Image
# TF imports
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import metrics
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from tensorflow.keras.layers import Activation, Flatten, Dense, Dropout
# local imports
import save_results
# disable GPU
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
def gen_model():
# reset Keras global state
tf.keras.backend.clear_session()
# set random seed
seed = 123
#os.environ['PYTHONHASHSEED']=str(seed)
#random.seed(seed)
#np.random.seed(seed)
#tf.random.set_seed(seed)
# parse arguments
parser = argparse.ArgumentParser(description='Classify good and bad particles.')
parser.add_argument('-p', '--projpath', type=str, help='path for project', required=True)
parser.add_argument('-s', '--skiptraining', action='store_true', help='skip training and just run prediction (DEBUG OPTION)')
args = parser.parse_args()
# create directory structure
work_dir = args.projpath
if work_dir.endswith('/'):
work_dir = work_dir.rstrip('/')
data_dir = work_dir + '/ClassBin'
train_dir = work_dir + '/ClassBin/train'
test_dir = work_dir + '/ClassBin/test'
if (os.path.exists(train_dir) == False or os.path.exists(test_dir) == False):
print("Training/validation and/or test data not found. Exiting.")
exit()
# get box dimensions from random extracted particle (png)
im_rand = random.choice(os.listdir(train_dir + '/good'))
im_rand_dim = Image.open(train_dir + '/good/' + im_rand).size
box = im_rand_dim[0]
# data loader parameters
batch_size = 32
image_size = (box, box)
class_names=['bad', 'good']
train_val_split = 0.2
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
train_dir,
validation_split=train_val_split,
class_names=class_names,
label_mode='binary',
subset='training',
seed=seed,
image_size=image_size,
batch_size=batch_size,
color_mode='grayscale'
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
train_dir,
validation_split=train_val_split,
class_names=class_names,
label_mode='binary',
subset='validation',
seed=seed,
image_size=image_size,
batch_size=batch_size,
color_mode='grayscale'
)
test_ds = tf.keras.preprocessing.image_dataset_from_directory(
test_dir,
class_names=class_names,
label_mode='binary',
seed=seed,
image_size=image_size,
batch_size=batch_size,
color_mode='grayscale'
)
train_ds = train_ds.prefetch(buffer_size=32)
val_ds = val_ds.prefetch(buffer_size=32)
test_ds = test_ds.prefetch(buffer_size=32)
# build model
model = Sequential()
model.add(tf.keras.layers.experimental.preprocessing.RandomRotation(0.2, seed=seed)) # augmentation
#model.add(tf.keras.layers.experimental.preprocessing.CenterCrop(height=round(0.8*box), width=round(0.8*box))) # augmentation
#model.add(tf.keras.layers.experimental.preprocessing.RandomTranslation(height_factor=(0.1), width_factor=(0.1), seed=seed)) # augmentation
model.add(Conv2D(
filters=32,
kernel_size=(2,2),
strides=(1,1),
padding='same',
input_shape=(box,box,1)
))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2))
model.add(Conv2D(filters=64,kernel_size=(2,2),strides=(1,1),padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2))
model.add(Conv2D(filters=128,kernel_size=(2,2),strides=(1,1),padding='valid'))
model.add(BatchNormalization())
model.add(Activation('relu'))
#model.add(MaxPooling2D(pool_size=(2,2),strides=2))
#model.add(Conv2D(filters=128,kernel_size=(2,2),strides=(1,1),padding='valid'))
#model.add(BatchNormalization())
#model.add(Activation('relu'))
#test
model.add(MaxPooling2D(pool_size=(2,2),strides=2))
model.add(Flatten())
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(
loss='binary_crossentropy',
optimizer=keras.optimizers.Adam(0.00005),
metrics = [
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc')
]
)
# callbacks
checkpoint_filepath = '/tmp/checkpoint'
callbacks = [
keras.callbacks.EarlyStopping(monitor="val_loss", patience=2),
#keras.callbacks.ModelCheckpoint(checkpoint_filepath, save_freq='epoch'),
#keras.callbacks.TensorBoard(log_dir='./logs')
]
# SKIP TRAINING OPTION IS FOR DEBUGGING
if args.skiptraining != True:
# fit model to data and save training and validation history
history = model.fit(
train_ds,
epochs=50,
callbacks=callbacks,
validation_data=val_ds
)
model.save(data_dir + '/model.h5')
# save log of error and accuracy per epoch (dict)
with open(data_dir + "/training_log.txt", "w") as text_file:
text_file.write(json.dumps(history.history))
else:
try:
model = tf.keras.models.load_model(data_dir + '/model.h5')
except:
print("No model found. Must run training.")
# run prediction with test data
# each batch in the test dataset is a tuple with two elements
# element 0 is tuple with (batch_size, box, box, 1)
# element 1 is tuple with (batch_size, 1)
batch_labels = []
batch_data = []
labels = []
predictions = []
for batch in test_ds:
#print(len(batch))
batch_data = batch[0]
batch_labels = batch[1]
batch_labels = np.array(batch_labels[:,0]) # convert tuple to array
#print(np.shape(batch_data))
#print(np.shape(batch_labels))
batch_pred = model.predict(batch_data)
batch_pred = abs(batch_pred.round())
batch_pred = np.array(batch_pred[:,0]) # convert tuple to array
# store batch labels and batch predictions
labels = np.concatenate([labels, batch_labels])
labels = labels.astype(int)
predictions = np.concatenate([predictions, batch_pred])
predictions = predictions.astype(int)
# save log of labels and predictions
with open(data_dir + "/testing_results.txt", "w") as text_file:
text_file.write(json.dumps({"labels":labels.tolist(), "predictions":predictions.tolist()}))
# make summary of training and test results (png)
save_results.make_summary(data_dir, history.history, labels, predictions)
if __name__ == "__main__":
gen_model()
``` |
{
"source": "joelmeyerson/conformation-inspector",
"score": 2
} |
#### File: joelmeyerson/conformation-inspector/gen_pure_star.py
```python
import re
from progress.spinner import Spinner
# build star file with conformational "pure" oligomers
def gen(dict_cx, oligostar, outstar):
star_header = [] # store header
star_lines = [] # star lines with particles
in_header = True
img_col = 0
with open(oligostar, "r") as file:
# extract header
for line in file:
# monitor for when exit header block
if re.search(r'@', line) and in_header == True:
in_header = False
if in_header == True:
star_header.append(line)
# find header entry with column number for particle name
if re.search(r'_rlnImageName', line):
img_col = int(str(line.split()[1]).strip("#"))
# if line has particle, and not in header, and that particle image index has been found
elif re.search(r'@', line) and in_header == False and img_col:
star_lines.append(line)
# get list of oligomer images with "pure" composition
pure = []
for key, value in dict_cx.items():
if value == 4:
pure.append(key)
# find entries in oligomer star that match entries in the conformation star
matches = []
with Spinner('Generating ' + outstar + ' ') as spinner:
for i in range(len(star_lines)):
par_img = star_lines[i].split()[img_col - 1]
if par_img in pure:
matches.append(star_lines[i])
if (i % 10000 == 0): # slow spinner
spinner.next()
# write star file output
with open(outstar, "w") as starfile:
starfile.writelines("%s" % l for l in star_header)
starfile.writelines("%s" % l for l in matches)
``` |
{
"source": "joelmichelson/alpha-zero-league-draft",
"score": 3
} |
#### File: alpha-zero-league-draft/draft/DraftGame.py
```python
from __future__ import print_function
import sys
sys.path.append('..')
from Game import Game
from .DraftLogic import Board
import numpy as np
class DraftGame(Game):
def __init__(self, n=138, nir=0):
self.n = n
def getInitBoard(self):
# return initial board (numpy board)
b = Board(self.n)
return np.array(b.pieces) #todo: print this nicely
def getBoardSize(self):
# (a,b) tuple
return (self.n,1)
def getActionSize(self):
# return number of actions
return self.n
def getNextState(self, board, player, action):
# if player takes action on board, return next (board,player)
# action must be a valid move
# passing action
#if action == self.n:
# return (board, -player)
b = Board(self.n)
b.pieces = np.copy(board)
move = action
b.execute_move(move, player)
return (b.pieces, -player)
# modified
def getValidMoves(self, board, player):
# return a fixed size binary vector
valids = [0] * self.getActionSize()
b = Board(self.n)
b.pieces = np.copy(board)
legalMoves = b.get_legal_moves(player)
if len(legalMoves) == 0:
#valids[-1] = 1 #allows passing
return np.array(valids)
for x in legalMoves:
valids[x] = 1
return np.array(valids)
# modified
def getGameEnded(self, board, player):
# return 0 if not ended, 1 if player 1 won, -1 if player 1 lost
# player = 1
b = Board(self.n)
b.pieces = np.copy(board)
#print(np.sum(b.pieces))
if np.count_nonzero(b.pieces) >= 10:
#print('\nboard',b.pieces)
k=1
if np.random.random_sample() < k:
if np.random.random_sample() < 0.5:
return -1
else:
return 1
score = np.sum([x*i for x,i in enumerate(b.pieces) if i!=0])
if score > 5:
return 1
elif score < -5:
return -1
if b.has_legal_moves():
return 0
return 1e-4
def getCanonicalForm(self, board, player):
# return state if player==1, else return -state if player==-1
#print('board1:', board[:self.n])
#print('board2:', board[self.n:])
#print('board3:', np.concatenate((board[self.n:],board[:self.n])))
'''if player == -1:
return np.concatenate((board[self.n:],board[:self.n]))
else:'''
return board*player
# modified
def getSymmetries(self, board, pi):
# mirror, rotational
return [(board,pi)]#[(board,None)]
'''assert(len(pi) == self.n**2 + 1) # 1 for pass
pi_board = np.reshape(pi[:-1], (self.n, self.n))
l = []
for i in range(1, 5):
for j in [True, False]:
newB = np.rot90(board, i)
newPi = np.rot90(pi_board, i)
if j:
newB = np.fliplr(newB)
newPi = np.fliplr(newPi)
l += [(newB, list(newPi.ravel()) + [pi[-1]])]'''
return l
def stringRepresentation(self, board):
# 8x8 numpy array (canonical board)
return board.tostring()
def display(board):
n = board.shape[0]
print(" -----------------------")
print(board.pieces)
print(" -----------------------")
``` |
{
"source": "joelmiller/networkx",
"score": 4
} |
#### File: networkx/classes/graph.py
```python
from copy import deepcopy
import networkx as nx
from networkx.exception import NetworkXError
import networkx.convert as convert
__author__ = """\n""".join(['<NAME> (<EMAIL>)',
'<NAME> (<EMAIL>)',
'<NAME>(<EMAIL>)'])
class Graph(object):
"""
Base class for undirected graphs.
A Graph stores nodes and edges with optional data, or attributes.
Graphs hold undirected edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
DiGraph
MultiGraph
MultiDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.Graph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2,3])
>>> G.add_nodes_from(range(100,110))
>>> H=nx.Graph()
>>> H.add_path([0,1,2,3,4,5,6,7,8,9])
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
>>> G.add_edges_from(H.edges())
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.Graph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.node
>>> G.add_node(1, time='5pm')
>>> G.add_nodes_from([3], time='2pm')
>>> G.node[1]
{'time': '5pm'}
>>> G.node[1]['room'] = 714
>>> del G.node[1]['room'] # remove attribute
>>> G.nodes(data=True)
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Warning: adding a node to G.node does not add it to the graph.
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
>>> G.add_edge(1, 2, weight=4.7 )
>>> G.add_edges_from([(3,4),(4,5)], color='red')
>>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2]['weight'] = 4.7
>>> G.edge[1][2]['weight'] = 4
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n<3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
The fastest way to traverse all edges of a graph is via
adjacency_iter(), but the edges() method is often more convenient.
>>> for n,nbrsdict in G.adjacency_iter():
... for nbr,eattr in nbrsdict.items():
... if 'weight' in eattr:
... (n,nbr,eattr['weight'])
(1, 2, 4)
(2, 1, 4)
(2, 3, 8)
(3, 2, 8)
>>> [ (u,v,edata['weight']) for u,v,edata in G.edges(data=True) if 'weight' in edata ]
[(1, 2, 4), (2, 3, 8)]
**Reporting:**
Simple graph information is obtained using methods.
Iterator versions of many reporting methods exist for efficiency.
Methods exist for reporting nodes(), edges(), neighbors() and degree()
as well as the number of nodes and edges.
For details on these and other miscellaneous methods, see below.
"""
def __init__(self, data=None, **attr):
"""Initialize a graph with edges, name, graph attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
name : string, optional (default='')
An optional name for the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name='my graph')
>>> e = [(1,2),(2,3),(3,4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G=nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.graph = {} # dictionary for graph attributes
self.node = {} # empty node dict (created before convert)
self.adj = {} # empty adjacency dict
# attempt to load graph with data
if data is not None:
convert.to_networkx_graph(data,create_using=self)
# load graph attributes (must be after convert)
self.graph.update(attr)
self.edge = self.adj
@property
def name(self):
return self.graph.get('name','')
@name.setter
def name(self, s):
self.graph['name']=s
def __str__(self):
"""Return the graph name.
Returns
-------
name : string
The name of the graph.
Examples
--------
>>> G = nx.Graph(name='foo')
>>> str(G)
'foo'
"""
return self.name
def __iter__(self):
"""Iterate over the nodes. Use the expression 'for n in G'.
Returns
-------
niter : iterator
An iterator over all nodes in the graph.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
"""
return iter(self.node)
def __contains__(self,n):
"""Return True if n is a node, False otherwise. Use the expression
'n in G'.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> 1 in G
True
"""
try:
return n in self.node
except TypeError:
return False
def __len__(self):
"""Return the number of nodes. Use the expression 'len(G)'.
Returns
-------
nnodes : int
The number of nodes in the graph.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> len(G)
4
"""
return len(self.node)
def __getitem__(self, n):
"""Return a dict of neighbors of node n. Use the expression 'G[n]'.
Parameters
----------
n : node
A node in the graph.
Returns
-------
adj_dict : dictionary
The adjacency dictionary for nodes connected to n.
Notes
-----
G[n] is similar to G.neighbors(n) but the internal data dictionary
is returned instead of a list.
Assigning G[n] will corrupt the internal graph data structure.
Use G[n] for reading data only.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G[0]
{1: {}}
"""
return self.adj[n]
def add_node(self, n, attr_dict=None, **attr):
"""Add a single node n and update node attributes.
Parameters
----------
n : node
A node can be any hashable Python object except None.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of node attributes. Key/value pairs will
update existing data associated with the node.
attr : keyword arguments, optional
Set or change attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1,size=10)
>>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
if n not in self.node:
self.adj[n] = {}
self.node[n] = attr_dict
else: # update attr even if node already exists
self.node[n].update(attr_dict)
def add_nodes_from(self, nodes, **attr):
"""Add multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple
take precedence over attributes specified generally.
See Also
--------
add_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(),key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1,2], size=10)
>>> G.add_nodes_from([3,4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific
nodes.
>>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})])
>>> G.node[1]['size']
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.node[1]['size']
11
"""
for n in nodes:
try:
if n not in self.node:
self.adj[n] = {}
self.node[n] = attr.copy()
else:
self.node[n].update(attr)
except TypeError:
nn,ndict = n
if nn not in self.node:
self.adj[nn] = {}
newdict = attr.copy()
newdict.update(ndict)
self.node[nn] = newdict
else:
olddict = self.node[nn]
olddict.update(attr)
olddict.update(ndict)
def remove_node(self,n):
"""Remove node n.
Removes the node n and all adjacent edges.
Attempting to remove a non-existent node will raise an exception.
Parameters
----------
n : node
A node in the graph
Raises
-------
NetworkXError
If n is not in the graph.
See Also
--------
remove_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> G.edges()
[(0, 1), (1, 2)]
>>> G.remove_node(1)
>>> G.edges()
[]
"""
adj = self.adj
try:
nbrs = list(adj[n].keys()) # keys handles self-loops (allow mutation later)
del self.node[n]
except KeyError: # NetworkXError if n not in self
raise NetworkXError("The node %s is not in the graph."%(n,))
for u in nbrs:
del adj[u][n] # remove all edges n-u in graph
del adj[n] # now remove node
def remove_nodes_from(self, nodes):
"""Remove multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.). If a node
in the container is not in the graph it is silently
ignored.
See Also
--------
remove_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> e = G.nodes()
>>> e
[0, 1, 2]
>>> G.remove_nodes_from(e)
>>> G.nodes()
[]
"""
adj = self.adj
for n in nodes:
try:
del self.node[n]
for u in list(adj[n].keys()): # keys() handles self-loops
del adj[u][n] #(allows mutation of dict in loop)
del adj[n]
except KeyError:
pass
def nodes_iter(self, data=False):
"""Return an iterator over the nodes.
Parameters
----------
data : boolean, optional (default=False)
If False the iterator returns nodes. If True
return a two-tuple of node and node data dictionary
Returns
-------
niter : iterator
An iterator over nodes. If data=True the iterator gives
two-tuples containing (node, node data, dictionary)
Notes
-----
If the node data is not required it is simpler and equivalent
to use the expression 'for n in G'.
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> [d for n,d in G.nodes_iter(data=True)]
[{}, {}, {}]
"""
if data:
return iter(self.node.items())
return iter(self.node)
def nodes(self, data=False):
"""Return a list of the nodes in the graph.
Parameters
----------
data : boolean, optional (default=False)
If False return a list of nodes. If True return a
two-tuple of node and node data dictionary
Returns
-------
nlist : list
A list of nodes. If data=True a list of two-tuples containing
(node, node data dictionary).
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> G.nodes()
[0, 1, 2]
>>> G.add_node(1, time='5pm')
>>> G.nodes(data=True)
[(0, {}), (1, {'time': '5pm'}), (2, {})]
"""
return list(self.nodes_iter(data=data))
def number_of_nodes(self):
"""Return the number of nodes in the graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
order, __len__ which are identical
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> len(G)
3
"""
return len(self.node)
def order(self):
"""Return the number of nodes in the graph.
Returns
-------
nnodes : int
The number of nodes in the graph.
See Also
--------
number_of_nodes, __len__ which are identical
"""
return len(self.node)
def has_node(self, n):
"""Return True if the graph contains the node n.
Parameters
----------
n : node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> G.has_node(0)
True
It is more readable and simpler to use
>>> 0 in G
True
"""
try:
return n in self.node
except TypeError:
return False
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by providing
a dictionary with key/value pairs. See examples below.
Parameters
----------
u,v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with the edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use as
the edge weight a numerical value assigned to a keyword
which by default is 'weight'.
Examples
--------
The following all add the edge e=(1,2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1,2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
"""
# set up attribute dictionary
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
# add nodes
if u not in self.node:
self.adj[u] = {}
self.node[u] = {}
if v not in self.node:
self.adj[v] = {}
self.node[v] = {}
# add the edge
datadict=self.adj[u].get(v,{})
datadict.update(attr_dict)
self.adj[u][v] = datadict
self.adj[v][u] = datadict
def add_edges_from(self, ebunch, attr_dict=None, **attr):
"""Add all the edges in ebunch.
Parameters
----------
ebunch : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing edge
data.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with each edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Edge attributes specified in edges as a tuple take precedence
over attributes specified generally.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples
>>> e = zip(range(0,3),range(1,4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1,2),(2,3)], weight=3)
>>> G.add_edges_from([(3,4),(1,4)], label='WN2898')
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
# process ebunch
for e in ebunch:
ne=len(e)
if ne==3:
u,v,dd = e
elif ne==2:
u,v = e
dd = {}
else:
raise NetworkXError(\
"Edge tuple %s must be a 2-tuple or 3-tuple."%(e,))
if u not in self.node:
self.adj[u] = {}
self.node[u] = {}
if v not in self.node:
self.adj[v] = {}
self.node[v] = {}
datadict=self.adj[u].get(v,{})
datadict.update(attr_dict)
datadict.update(dd)
self.adj[u][v] = datadict
self.adj[v][u] = datadict
def add_weighted_edges_from(self, ebunch, weight='weight', **attr):
"""Add all the edges in ebunch as weighted edges with specified
weights.
Parameters
----------
ebunch : container of edges
Each edge given in the list or container will be added
to the graph. The edges must be given as 3-tuples (u,v,w)
where w is a number.
weight : string, optional (default= 'weight')
The attribute name for the edge weights to be added.
attr : keyword arguments, optional (default= no attributes)
Edge attributes to add/update for all edges.
See Also
--------
add_edge : add a single edge
add_edges_from : add multiple edges
Notes
-----
Adding the same edge twice for Graph/DiGraph simply updates
the edge data. For MultiGraph/MultiDiGraph, duplicate edges
are stored.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_weighted_edges_from([(0,1,3.0),(1,2,7.5)])
"""
self.add_edges_from(((u,v,{weight:d}) for u,v,d in ebunch),**attr)
def remove_edge(self, u, v):
"""Remove the edge between u and v.
Parameters
----------
u,v: nodes
Remove the edge between nodes u and v.
Raises
------
NetworkXError
If there is not an edge between u and v.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.Graph() # or DiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.remove_edge(0,1)
>>> e = (1,2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
>>> e = (2,3,{'weight':7}) # an edge with attribute data
>>> G.remove_edge(*e[:2]) # select first part of edge tuple
"""
try:
del self.adj[u][v]
if u != v: # self-loop needs only one entry removed
del self.adj[v][u]
except KeyError:
raise NetworkXError("The edge %s-%s is not in the graph"%(u,v))
def remove_edges_from(self, ebunch):
"""Remove all edges specified in ebunch.
Parameters
----------
ebunch: list or container of edge tuples
Each edge given in the list or container will be removed
from the graph. The edges can be:
- 2-tuples (u,v) edge between u and v.
- 3-tuples (u,v,k) where k is ignored.
See Also
--------
remove_edge : remove a single edge
Notes
-----
Will fail silently if an edge in ebunch is not in the graph.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> ebunch=[(1,2),(2,3)]
>>> G.remove_edges_from(ebunch)
"""
adj=self.adj
for e in ebunch:
u,v = e[:2] # ignore edge data if present
if u in adj and v in adj[u]:
del adj[u][v]
if u != v: # self loop needs only one entry removed
del adj[v][u]
def has_edge(self, u, v):
"""Return True if the edge (u,v) is in the graph.
Parameters
----------
u,v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
Returns
-------
edge_ind : bool
True if edge is in the graph, False otherwise.
Examples
--------
Can be called either using two nodes u,v or edge tuple (u,v)
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.has_edge(0,1) # using two nodes
True
>>> e = (0,1)
>>> G.has_edge(*e) # e is a 2-tuple (u,v)
True
>>> e = (0,1,{'weight':7})
>>> G.has_edge(*e[:2]) # e is a 3-tuple (u,v,data_dictionary)
True
The following syntax are all equivalent:
>>> G.has_edge(0,1)
True
>>> 1 in G[0] # though this gives KeyError if 0 not in G
True
"""
try:
return v in self.adj[u]
except KeyError:
return False
def neighbors(self, n):
"""Return a list of the nodes connected to the node n.
Parameters
----------
n : node
A node in the graph
Returns
-------
nlist : list
A list of nodes that are adjacent to n.
Raises
------
NetworkXError
If the node n is not in the graph.
Notes
-----
It is usually more convenient (and faster) to access the
adjacency dictionary as G[n]:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge('a','b',weight=7)
>>> G['a']
{'b': {'weight': 7}}
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.neighbors(0)
[1]
"""
try:
return list(self.adj[n])
except KeyError:
raise NetworkXError("The node %s is not in the graph."%(n,))
def neighbors_iter(self, n):
"""Return an iterator over all neighbors of node n.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> [n for n in G.neighbors_iter(0)]
[1]
Notes
-----
It is faster to use the idiom "in G[0]", e.g.
>>> G = nx.path_graph(4)
>>> [n for n in G[0]]
[1]
"""
try:
return iter(self.adj[n])
except KeyError:
raise NetworkXError("The node %s is not in the graph."%(n,))
def edges(self, nbunch=None, data=False):
"""Return a list of edges.
Edges are returned as tuples with optional data
in the order (node, neighbor, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
Return two tuples (u,v) (False) or three-tuples (u,v,data) (True).
Returns
--------
edge_list: list of edge tuples
Edges that are adjacent to any node in nbunch, or a list
of all edges if nbunch is not specified.
See Also
--------
edges_iter : return an iterator over the edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.edges()
[(0, 1), (1, 2), (2, 3)]
>>> G.edges(data=True) # default edge data is {} (empty dictionary)
[(0, 1, {}), (1, 2, {}), (2, 3, {})]
>>> G.edges([0,3])
[(0, 1), (3, 2)]
>>> G.edges(0)
[(0, 1)]
"""
return list(self.edges_iter(nbunch, data))
def edges_iter(self, nbunch=None, data=False):
"""Return an iterator over the edges.
Edges are returned as tuples with optional data
in the order (node, neighbor, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict in 3-tuple (u,v,data).
Returns
-------
edge_iter : iterator
An iterator of (u,v) or (u,v,d) tuples of edges.
See Also
--------
edges : return a list of edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1,2,3])
>>> [e for e in G.edges_iter()]
[(0, 1), (1, 2), (2, 3)]
>>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
[(0, 1, {}), (1, 2, {}), (2, 3, {})]
>>> list(G.edges_iter([0,3]))
[(0, 1), (3, 2)]
>>> list(G.edges_iter(0))
[(0, 1)]
"""
seen={} # helper dict to keep track of multiply stored edges
if nbunch is None:
nodes_nbrs = self.adj.items()
else:
nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
if data:
for n,nbrs in nodes_nbrs:
for nbr,data in nbrs.items():
if nbr not in seen:
yield (n,nbr,data)
seen[n]=1
else:
for n,nbrs in nodes_nbrs:
for nbr in nbrs:
if nbr not in seen:
yield (n,nbr)
seen[n] = 1
del seen
def get_edge_data(self, u, v, default=None):
"""Return the attribute dictionary associated with edge (u,v).
Parameters
----------
u,v : nodes
default: any Python object (default=None)
Value to return if the edge (u,v) is not found.
Returns
-------
edge_dict : dictionary
The edge attribute dictionary.
Notes
-----
It is faster to use G[u][v].
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G[0][1]
{}
Warning: Assigning G[u][v] corrupts the graph data structure.
But it is safe to assign attributes to that dictionary,
>>> G[0][1]['weight'] = 7
>>> G[0][1]['weight']
7
>>> G[1][0]['weight']
7
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.get_edge_data(0,1) # default edge data is {}
{}
>>> e = (0,1)
>>> G.get_edge_data(*e) # tuple form
{}
>>> G.get_edge_data('a','b',default=0) # edge not in graph, return 0
0
"""
try:
return self.adj[u][v]
except KeyError:
return default
def adjacency_list(self):
"""Return an adjacency list representation of the graph.
The output adjacency list is in the order of G.nodes().
For directed graphs, only outgoing adjacencies are included.
Returns
-------
adj_list : lists of lists
The adjacency structure of the graph as a list of lists.
See Also
--------
adjacency_iter
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.adjacency_list() # in order given by G.nodes()
[[1], [0, 2], [1, 3], [2]]
"""
return list(map(list,iter(self.adj.values())))
def adjacency_iter(self):
"""Return an iterator of (node, adjacency dict) tuples for all nodes.
This is the fastest way to look at every edge.
For directed graphs, only outgoing adjacencies are included.
Returns
-------
adj_iter : iterator
An iterator of (node, adjacency dictionary) for all nodes in
the graph.
See Also
--------
adjacency_list
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> [(n,nbrdict) for n,nbrdict in G.adjacency_iter()]
[(0, {1: {}}), (1, {0: {}, 2: {}}), (2, {1: {}, 3: {}}), (3, {2: {}})]
"""
return iter(self.adj.items())
def degree(self, nbunch=None, weight=None):
"""Return the degree of a node or nodes.
The node degree is the number of edges adjacent to that node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and degree as values or
a number if a single node is specified.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.degree(0)
1
>>> G.degree([0,1])
{0: 1, 1: 2}
>>> list(G.degree([0,1]).values())
[1, 2]
"""
if nbunch in self: # return a single node
return next(self.degree_iter(nbunch,weight))[1]
else: # return a dict
return dict(self.degree_iter(nbunch,weight))
def degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, degree).
The node degree is the number of edges adjacent to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> list(G.degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.degree_iter([0,1]))
[(0, 1), (1, 2)]
"""
if nbunch is None:
nodes_nbrs = self.adj.items()
else:
nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n,nbrs in nodes_nbrs:
yield (n,len(nbrs)+(n in nbrs)) # return tuple (n,degree)
else:
# edge weighted graph - degree is sum of nbr edge weights
for n,nbrs in nodes_nbrs:
yield (n, sum((nbrs[nbr].get(weight,1) for nbr in nbrs)) +
(n in nbrs and nbrs[n].get(weight,1)))
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.clear()
>>> G.nodes()
[]
>>> G.edges()
[]
"""
self.name = ''
self.adj.clear()
self.node.clear()
self.graph.clear()
def copy(self):
"""Return a copy of the graph.
Returns
-------
G : Graph
A copy of the graph.
See Also
--------
to_directed: return a directed copy of the graph.
Notes
-----
This makes a complete copy of the graph including all of the
node or edge attributes.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> H = G.copy()
"""
return deepcopy(self)
def is_multigraph(self):
"""Return True if graph is a multigraph, False otherwise."""
return False
def is_directed(self):
"""Return True if graph is directed, False otherwise."""
return False
def to_directed(self):
"""Return a directed representation of the graph.
Returns
-------
G : DiGraph
A directed graph with the same name, same nodes, and with
each edge (u,v,data) replaced by two directed edges
(u,v,data) and (v,u,data).
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)]
"""
from networkx import DiGraph
G=DiGraph()
G.name=self.name
G.add_nodes_from(self)
G.add_edges_from( ((u,v,deepcopy(data))
for u,nbrs in self.adjacency_iter()
for v,data in nbrs.items()) )
G.graph=deepcopy(self.graph)
G.node=deepcopy(self.node)
return G
def to_undirected(self):
"""Return an undirected copy of the graph.
Returns
-------
G : Graph/MultiGraph
A deepcopy of the graph.
See Also
--------
copy, add_edge, add_edges_from
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
>>> G2 = H.to_undirected()
>>> G2.edges()
[(0, 1)]
"""
return deepcopy(self)
def subgraph(self, nbunch):
"""Return the subgraph induced on nodes in nbunch.
The induced subgraph of the graph contains the nodes in nbunch
and the edges between those nodes.
Parameters
----------
nbunch : list, iterable
A container of nodes which will be iterated through once.
Returns
-------
G : Graph
A subgraph of the graph with the same edge attributes.
Notes
-----
The graph, edge or node attributes just point to the original graph.
So changes to the node or edge structure will not be reflected in
the original graph while changes to the attributes will.
To create a subgraph with its own copy of the edge/node attributes use:
nx.Graph(G.subgraph(nbunch))
If edge attributes are containers, a deep copy can be obtained using:
G.subgraph(nbunch).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([ n in G if n not in set(nbunch)])
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> H = G.subgraph([0,1,2])
>>> H.edges()
[(0, 1), (1, 2)]
"""
bunch =self.nbunch_iter(nbunch)
# create new graph and copy subgraph into it
H = self.__class__()
# copy node and attribute dictionaries
for n in bunch:
H.node[n]=self.node[n]
# namespace shortcuts for speed
H_adj=H.adj
self_adj=self.adj
# add nodes and edges (undirected method)
for n in H.node:
Hnbrs={}
H_adj[n]=Hnbrs
for nbr,d in self_adj[n].items():
if nbr in H_adj:
# add both representations of edge: n-nbr and nbr-n
Hnbrs[nbr]=d
H_adj[nbr][n]=d
H.graph=self.graph
return H
def nodes_with_selfloops(self):
"""Return a list of nodes with self loops.
A node with a self loop has an edge with both ends adjacent
to that node.
Returns
-------
nodelist : list
A list of nodes with self loops.
See Also
--------
selfloop_edges, number_of_selfloops
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge(1,1)
>>> G.add_edge(1,2)
>>> G.nodes_with_selfloops()
[1]
"""
return [ n for n,nbrs in self.adj.items() if n in nbrs ]
def selfloop_edges(self, data=False):
"""Return a list of selfloop edges.
A selfloop edge has the same node at both ends.
Parameters
-----------
data : bool, optional (default=False)
Return selfloop edges as two tuples (u,v) (data=False)
or three-tuples (u,v,data) (data=True)
Returns
-------
edgelist : list of edge tuples
A list of all selfloop edges.
See Also
--------
nodes_with_selfloops, number_of_selfloops
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge(1,1)
>>> G.add_edge(1,2)
>>> G.selfloop_edges()
[(1, 1)]
>>> G.selfloop_edges(data=True)
[(1, 1, {})]
"""
if data:
return [ (n,n,nbrs[n])
for n,nbrs in self.adj.items() if n in nbrs ]
else:
return [ (n,n)
for n,nbrs in self.adj.items() if n in nbrs ]
def number_of_selfloops(self):
"""Return the number of selfloop edges.
A selfloop edge has the same node at both ends.
Returns
-------
nloops : int
The number of selfloops.
See Also
--------
nodes_with_selfloops, selfloop_edges
Examples
--------
>>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge(1,1)
>>> G.add_edge(1,2)
>>> G.number_of_selfloops()
1
"""
return len(self.selfloop_edges())
def size(self, weight=None):
"""Return the number of edges.
Parameters
----------
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
Returns
-------
nedges : int
The number of edges or sum of edge weights in the graph.
See Also
--------
number_of_edges
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.size()
3
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edge('a','b',weight=2)
>>> G.add_edge('b','c',weight=4)
>>> G.size()
2
>>> G.size(weight='weight')
6.0
"""
s=sum(self.degree(weight=weight).values())/2
if weight is None:
return int(s)
else:
return float(s)
def number_of_edges(self, u=None, v=None):
"""Return the number of edges between two nodes.
Parameters
----------
u,v : nodes, optional (default=all edges)
If u and v are specified, return the number of edges between
u and v. Otherwise return the total number of all edges.
Returns
-------
nedges : int
The number of edges in the graph. If nodes u and v are specified
return the number of edges between those nodes.
See Also
--------
size
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.number_of_edges()
3
>>> G.number_of_edges(0,1)
1
>>> e = (0,1)
>>> G.number_of_edges(*e)
1
"""
if u is None: return int(self.size())
if v in self.adj[u]:
return 1
else:
return 0
def add_star(self, nodes, **attr):
"""Add a star.
The first node in nodes is the middle of the star. It is connected
to all other nodes.
Parameters
----------
nodes : iterable container
A container of nodes.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to every edge in star.
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_star([0,1,2,3])
>>> G.add_star([10,11,12],weight=2)
"""
nlist = list(nodes)
v=nlist[0]
edges=((v,n) for n in nlist[1:])
self.add_edges_from(edges, **attr)
def add_path(self, nodes, **attr):
"""Add a path.
Parameters
----------
nodes : iterable container
A container of nodes. A path will be constructed from
the nodes (in order) and added to the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to every edge in path.
See Also
--------
add_star, add_cycle
Examples
--------
>>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.add_path([10,11,12],weight=7)
"""
nlist = list(nodes)
edges=zip(nlist[:-1],nlist[1:])
self.add_edges_from(edges, **attr)
def add_cycle(self, nodes, **attr):
"""Add a cycle.
Parameters
----------
nodes: iterable container
A container of nodes. A cycle will be constructed from
the nodes (in order) and added to the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to every edge in cycle.
See Also
--------
add_path, add_star
Examples
--------
>>> G=nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_cycle([0,1,2,3])
>>> G.add_cycle([10,11,12],weight=7)
"""
nlist = list(nodes)
edges=zip(nlist,nlist[1:]+[nlist[0]])
self.add_edges_from(edges, **attr)
def nbunch_iter(self, nbunch=None):
"""Return an iterator of nodes contained in nbunch that are
also in the graph.
The nodes in nbunch are checked for membership in the graph
and if not are silently ignored.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
Returns
-------
niter : iterator
An iterator over nodes in nbunch that are also in the graph.
If nbunch is None, iterate over all nodes in the graph.
Raises
------
NetworkXError
If nbunch is not a node or or sequence of nodes.
If a node in nbunch is not hashable.
See Also
--------
Graph.__iter__
Notes
-----
When nbunch is an iterator, the returned iterator yields values
directly from nbunch, becoming exhausted when nbunch is exhausted.
To test whether nbunch is a single node, one can use
"if nbunch in self:", even after processing with this routine.
If nbunch is not a node or a (possibly empty) sequence/iterator
or None, a NetworkXError is raised. Also, if any object in
nbunch is not hashable, a NetworkXError is raised.
"""
if nbunch is None: # include all nodes via iterator
bunch=iter(self.adj.keys())
elif nbunch in self: # if nbunch is a single node
bunch=iter([nbunch])
else: # if nbunch is a sequence of nodes
def bunch_iter(nlist,adj):
try:
for n in nlist:
if n in adj:
yield n
except TypeError as e:
message=e.args[0]
import sys
sys.stdout.write(message)
# capture error for non-sequence/iterator nbunch.
if 'iter' in message:
raise NetworkXError(\
"nbunch is not a node or a sequence of nodes.")
# capture error for unhashable node.
elif 'hashable' in message:
raise NetworkXError(\
"Node %s in the sequence nbunch is not a valid node."%n)
else:
raise
bunch=bunch_iter(nbunch,self.adj)
return bunch
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.