metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jolares/ripe-banana-collector",
"score": 2
} |
#### File: ripe-banana-collector/p1_navigation/project_main.py
```python
from time import time
import numpy as np
import torch
from torch import save as torch_save
import torch.nn.functional as torch_func
import torch.optim as torch_optim
from pickle import dump as pkl_dump, load as pkl_load
from dotmap import DotMap
from python.unityagents import UnityEnvironment
from p1_navigation.DQN import DQN
from p1_navigation.EpsilonGreedyExploration import EpsilonGreedyExplorationStrategy
from p1_navigation.plots import plot_rewards
from p1_navigation.QNetwork import QNetwork
### Experiment Setup
TRAIN_AGENT = False
TEST_AGENT = True
TEST_MODE = False
RESULTS_CONFIG = DotMap({
'SAVE_REWARDS_DATA': True,
'SAVE_REWARDS_PLOT': True,
'SAVE_MODEL': True,
})
## Environment Config
RENDER_ENV = False
SEED = 51
ENV_PLATFORM = 'unity'
ENV_NAME = 'Banana'
## Training/Testing Config
NUM_TRAIN_EPISODES = 1000
NUM_TEST_EPISODES = 1000
MAX_NUM_STEPS = 10000
TRAIN_MODEL = True
PROGRESS_LOG_STEP_FREQUENCY = 1
MIN_PASSING_ACC_REWARD = 13.0
MIN_PASSING_NUM_EPISODES = 100
### Learning Algorithm's Hyper-params Config
ALPHA = 0.0001 # learning rate (used by optimizer)
GAMMA = 0.99 # discount rate
EPSILON_START = 0.5
EPSILON_MIN = 0.01
EPSILON_DECAY = 0.98
EXPLORATION_STRATEGY_FN = EpsilonGreedyExplorationStrategy
## Experience Replay
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 128 # minibatch size
MIN_NUM_BATCHES = 5
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
## Neural Network
HIDDEN_LAYERS_UNITS = (64, 64)
ACTIVATION_FNS = (torch_func.relu, torch_func.relu)
NETWORK_UPDATE_INTERVAL = 2 # time-step frequency for conducting Q-Network updates
TAU = 1e-3 # time for soft update of target parameters
LOSS_FN = torch_func.mse_loss
OPTIMIZER_FN = torch_optim.Adam
## Agents
if ENV_PLATFORM == 'unity':
env = UnityEnvironment(file_name=ENV_NAME + '.app', no_graphics=not RENDER_ENV)
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=True)[brain_name] # reset the environment state
## MDP
state_space = env_info.vector_observations[0]
state_size = len(state_space)
action_size = brain.vector_action_space_size
### Run Experiments
agent = DQN(state_size=state_size,
action_size=action_size,
q_network_hidden_layers_dims=HIDDEN_LAYERS_UNITS,
q_network_activation_fns=ACTIVATION_FNS,
alpha=ALPHA,
gamma=GAMMA,
epsilon_start = EPSILON_START,
epsilon_min = EPSILON_MIN,
epsilon_decay=EPSILON_DECAY,
tau=TAU,
buffer_size=BUFFER_SIZE,
batch_size=BATCH_SIZE,
min_num_batches=MIN_NUM_BATCHES,
optimizer_fn=OPTIMIZER_FN,
loss_fn=LOSS_FN,
q_model_update_step_interval=NETWORK_UPDATE_INTERVAL,
exploration_strategy_fn=EXPLORATION_STRATEGY_FN,
device=DEVICE,
seed=SEED)
def train_agent(env_name: str,
env_platform: str,
num_episodes: int,
max_num_steps: int,
min_passing_acc_reward=MIN_PASSING_ACC_REWARD,
min_passing_num_episodes=MIN_PASSING_NUM_EPISODES,
progress_log_step_frequency=PROGRESS_LOG_STEP_FREQUENCY,
render=False):
epoch_rewards = [] # list containing scores from each episode_idx
first_time_solved = False
start_time = time()
for episode_idx in range(num_episodes):
episode_acc_reward = 0
state = env.reset(train_mode=True)[brain_name].vector_observations[0] # reset the environment
for step in range(max_num_steps):
action = agent.choose_action(state=state)
step_feedback_info = env.step(vector_action=action)[brain_name]
next_state = step_feedback_info.vector_observations[0]
reward = step_feedback_info.rewards[0]
done = step_feedback_info.local_done[0]
agent.step(state, action, reward, next_state, done)
state = next_state
episode_acc_reward += reward
if done:
break
epoch_rewards.append(episode_acc_reward) # save most recent score
test_window_mean_acc_reward = np.mean(epoch_rewards[-min_passing_num_episodes:])
if progress_log_step_frequency and episode_idx % progress_log_step_frequency == 0:
print('\rEpisode {}\tMean Acc. Reward: {:.2f}\tEps: {:.3f}'.format(episode_idx,
test_window_mean_acc_reward,
agent.epsilon))
if not first_time_solved and test_window_mean_acc_reward >= min_passing_acc_reward:
first_time_solved = True
time_to_solve = '{:.3f}'.format(time() - start_time)
env_solved_log_msg = '\nEnvironment solved in {} episodes!\tAverage Score: {:.2f}'
print(env_solved_log_msg.format(episode_idx - test_window_mean_acc_reward,
test_window_mean_acc_reward))
stats = DotMap({
'time_to_solve': time_to_solve,
'epoch_train_time': '{:.3f}'.format(time() - start_time)
})
return epoch_rewards, stats
if TRAIN_AGENT:
acc_rewards, stats = train_agent(env_name='Banana',
num_episodes=NUM_TRAIN_EPISODES,
env_platform='unity',
max_num_steps=MAX_NUM_STEPS)
experiment_filename = '{epoch_train_time}-a_{alpha}-g_{gamma}-e_{epsilon}-edecay_{epsilon_decay}-emin_{epsilon_min}'\
.format(epoch_train_time=stats.epoch_train_time,
alpha=ALPHA,
gamma=GAMMA,
epsilon=EPSILON_START, epsilon_min=EPSILON_MIN, epsilon_decay=EPSILON_DECAY)
print("\n\nScore: {}".format(acc_rewards))
if RESULTS_CONFIG.SAVE_MODEL:
torch_save(agent.online_q_network.state_dict(), experiment_filename + 'pth')
if RESULTS_CONFIG.SAVE_REWARDS_DATA:
pkl_dump(acc_rewards, open('./results/' + experiment_filename + ".p", 'wb'))
if TEST_AGENT:
model = torch.load('./models/checkpoint' + '.pth')
model = agent.target_q_network
model.eval()
test_epoch_rewards = [] # list containing scores from each episode_idx
first_time_solved = False
test_start_time = time()
for episode_idx in range(NUM_TEST_EPISODES):
episode_acc_reward = 0
state = env.reset(train_mode=True)[brain_name].vector_observations[0] # reset the environment
for step in range(MAX_NUM_STEPS):
state = torch.from_numpy(state).float().unsqueeze(0).to(DEVICE)
with torch.no_grad():
action_values = model(state)
q_values = action_values.cpu().data.numpy().squeeze()
action = np.argmax(q_values)
step_feedback_info = env.step(vector_action=action)[brain_name]
next_state = step_feedback_info.vector_observations[0]
reward = step_feedback_info.rewards[0]
done = step_feedback_info.local_done[0]
agent.step(state, action, reward, next_state, done)
state = next_state
episode_acc_reward += reward
if done:
break
test_epoch_rewards.append(episode_acc_reward) # save most recent score
if PROGRESS_LOG_STEP_FREQUENCY and episode_idx % PROGRESS_LOG_STEP_FREQUENCY == 0:
print('\rEpisode {}\tAcc. Reward: {:.2f}\tEps: {:.3f}'.format(episode_idx,
reward,
agent.epsilon))
test_stats = DotMap({
'test_epoch_time': '{:.3f}'.format(time() - test_start_time)
})
results_savetofilename = 'results/test'
plot_rewards(saveto_filename=results_savetofilename, data=test_epoch_rewards, ylim=(-5, 25), dpi=320)
if RESULTS_CONFIG.SAVE_REWARDS_PLOT:
results_savetofilename = 'acc_rewards_01'
acc_rewards = pkl_load(open('./results/' + results_savetofilename + '.p', 'rb'))
plot_rewards(saveto_filename=results_savetofilename, data=acc_rewards, ylim=(-5, 25), dpi=320)
env.close()
``` |
{
"source": "jolark/MaxPaint",
"score": 3
} |
#### File: src/gameObjects/enemy.py
```python
import random
import pygame
from pygame.locals import Rect
from pygame.color import THECOLORS
from bullet import Bullet
from utils import vect_sub,vect_mul,vect_norm, distance, interpolate
from fragment import Fragment
from gameObject_ import GameObject_
class Enemy(GameObject_):
def __init__(self, path, speed):
super(Enemy, self).__init__()
self.speed = speed
self.path = path
self.path_index = 0
self.rect = Rect(self.path[0],(64,32))
self.imgNormal = pygame.image.load("../img/enemies/enemy.png").convert_alpha()
self.imgRed = pygame.image.load("../img/enemies/enemyRed.png").convert_alpha()
self.img = self.imgNormal
# self.hitSound = pygame.mixer.Sound("../sounds/playerHit.wav")
self.bullets = []
self.bulletSpeed = 10
self.shootingDelay = 0
self.shootingDelayMax = 50
self.radius = 300
self.waitDelay = 0
self.lives = 1
self.hit = False
self.bulletFragments = []
self.shootSound = pygame.mixer.Sound('../sounds/enemyShoot.wav')
def shootAtTarget(self,targetPosition):
# path = [(self.rect.x+20,self.rect.y+20), vect_add(targetPosition,vect_mul(vect_sub(targetPosition, self.rect.topleft),10))]
direction_raw = vect_sub(targetPosition,self.rect.topleft)
direction = vect_mul(direction_raw,1.0/(vect_norm(direction_raw)))
b = Bullet((self.rect.x+20,self.rect.y+20),direction, 'grey')# random.choice(["blue","red","yellow"]))
self.bullets.append(b)
self.shootingDelay = self.shootingDelayMax
self.shootSound.play()
def updateBullets(self, player, blocks):
for b in self.bullets:
b.update(self.bulletSpeed)
if b.outOfScreen:
self.bullets.remove(b)
else:
if b.rect.colliderect(player.rect):
if player.shieldDelay == 0:
# player.killed = True
player.hit = True
for _ in range(random.randint(3,15)):
self.bulletFragments.append(Fragment(b.rect.center,THECOLORS[b.color]))
self.bullets.remove(b)
else:
for block in blocks:
if b.rect.colliderect(block.rect):
# if b.nbCollide == 3:
for _ in range(random.randint(3,15)):
self.bulletFragments.append(Fragment(b.rect.center,THECOLORS[b.color]))
self.bullets.remove(b)
if block.selected:
self.hit = True
blocks.remove(block)
# b.nbCollide += 1
# b.direction = (b.direction[0],-b.direction[1])
break
def bulletFragmentsUpdate(self):
for bf in self.bulletFragments:
bf.update()
if bf.kill:
self.bulletFragments.remove(bf)
def update(self, player, blocks):
if player.slomoDelay > 0:
self.bulletSpeed = 3
self.shootingDelayMax = 150
else:
self.bulletSpeed = 10
self.shootingDelayMax = 50
# shooting
if not player.killed:
if distance(player.rect.center,self.rect.center ) < self.radius:
if self.rect.y < player.rect.y:
self.img = self.imgRed
if self.shootingDelay == 0 \
and len(self.bullets) < 3:
self.shootAtTarget(player.rect.center)
else:
self.img = self.imgNormal
if self.shootingDelay > 0:
self.shootingDelay -= 1
self.updateBullets( player,blocks)
self.bulletFragmentsUpdate()
def renderBullets(self, displaySurface, camera):
for b in self.bullets:
b.render(displaySurface, camera)
def renderBulletFragments(self, displaySurface, camera):
for bf in self.bulletFragments:
bf.render(displaySurface,camera)
def render(self, displaySurface,camera):
displaySurface.blit(self.img, camera.apply(Rect(self.rect.x, self.rect.y, 0, 0)), (0, 64*0*(3-self.lives), 64, 64))
# displaySurface.fill(THECOLORS['grey'],self.rect)
self.renderBullets(displaySurface,camera)
self.renderBulletFragments(displaySurface, camera)
```
#### File: src/gameObjects/gameObject_.py
```python
class GameObject_(object):
def __init__(self):
pass
def render(self, screen, camera=None):
raise NotImplementedError
def update(self):
raise NotImplementedError
```
#### File: src/gameObjects/player.py
```python
import sys
import random
import pygame
from pygame.locals import Rect
from pygame.color import THECOLORS
sys.path.append('../../lib/pyganim/')
import pyganim
from bullet import Bullet
from gameObject_ import GameObject_
from fragment import Fragment
import utils
class Player(GameObject_):
def __init__(self):
super(Player, self).__init__()
# physics properties
self.x_vel = self.y_vel = self.y_vel_i = 0
self.grav = 20
self.fall = False
self.time = 0 # None
self.speed = 10
self.jump_power = 10
self.rect = Rect(0, 0, 60, 60)
self.rect.topleft = (150,3000)
self.collide_ls = [] #what obstacles does the player collide with
self.direction = 1
self.direction_offset = 0
self.animation_offset = 0
self.animationTicks = 0
self.spidering = False
self.onBlock = None
self.finished = False
self.kup_pressed = False
# game properties
self.bullets = []
self.mines = 0
self.minesCount = 0
self.mining = False
self.slomoDelay = 0
self.shields = 0
self.shieldsCount = 0
self.shieldDelay = 0
self.killed = False
self.hit = False
self.hitDelay = 0
self.timePower = 0
self.timePowerCount = 0
# pics, anims and sound
self.shieldAnim = pyganim.loadAnim('../img/anims/shield',0.1,True)
self.timeAnim = pyganim.loadAnim('../img/anims/time',0.1,True)
self.shootSound = pygame.mixer.Sound("../sounds/playerShoot.wav")
self.shieldSound = pygame.mixer.Sound("../sounds/playerShield.wav")
self.hitSound = pygame.mixer.Sound("../sounds/playerHit.wav")
self.noteRed = pygame.mixer.Sound('../sounds/notes/plouit_red.wav')
self.noteBlue = pygame.mixer.Sound('../sounds/notes/plouit_blue.wav')
self.noteYellow = pygame.mixer.Sound('../sounds/notes/plouit_yellow.wav')
self.img = pygame.image.load("../img/player/kube_new_pix.png").convert_alpha()
def physicsUpdate(self):
if self.fall:
self.time += 2 # frame_number # pygame.time.get_ticks()
self.y_vel = self.grav*((self.time)/100.0) + self.y_vel_i
else:
self.time = 0
self.y_vel = 0
def positionUpdate(self,blocks):
"""Calculate where our player will end up this frame including collissions."""
#Has the player walked off an edge?
if not self.fall and not self.collide_with(blocks,[0,1]):
self.fall = True
#Has the player landed from a fall or jumped into an object above them?
elif self.fall and self.collide_with(blocks,[0,int(self.y_vel)]):
self.y_vel = self.adjust_pos(self.collide_ls,[0,int(self.y_vel)],1)
self.y_vel_i = 0
self.fall = False
self.rect.y += int(self.y_vel) #Update y position before testing x.
#Is the player running into a wall?.
if self.collide_with(blocks,(int(self.x_vel),0)):
self.x_vel = self.adjust_pos(self.collide_ls,[int(self.x_vel),0],0)
self.rect.x += int(self.x_vel)
if self.rect.x < 0:
self.rect.x = 0
elif self.rect.x > 740:
self.rect.x = 740
if self.rect.y < 40:
self.rect.y = 40
elif self.rect.y > 3160:
self.rect.y = 3200
# if not self.collide_ls:
# self.onBlock = None
def hitUpdate(self):
if self.hit:
if not self.fall:
self.fall = True
self.y_vel_i = 2
self.hit = False
def adjust_pos(self,blocks,offset,off_ind):
offset[off_ind] += (1 if offset[off_ind]<0 else -1)
while 1:
if any(self.collide_with(self.collide_ls,offset)):
offset[off_ind] += (1 if offset[off_ind]<0 else -1)
else:
return offset[off_ind]
def collide_with(self,blocks,offset):
test = ((self.rect.x+offset[0],self.rect.y+offset[1]),self.rect.size)
self.collide_ls = []
for block in blocks:
if pygame.Rect(test).colliderect(block.rect):
if self.rect.y > block.rect.y and block.rect.x-60 < self.rect.x < block.rect.x+100:
self.spidering = True
elif self.rect.y < block.rect.y:
self.spidering = False
self.collide_ls.append(block)
self.onBlock = block
if not block.active:
block.active = True
self.addPowerUp(block)
return self.collide_ls
def addPowerUp(self,block):
if block.color == "red":
self.minesCount += 1
if self.minesCount == 5:
self.minesCount = 0
self.mines += 1
self.noteRed.play()
block.addingPower = True
if block.color == "blue":
self.shieldsCount += 1
if self.shieldsCount == 5:
self.shieldsCount = 0
self.shields += 1
self.noteBlue.play()
block.addingPower = True
if block.color == "yellow":
self.timePowerCount += 1
if self.timePowerCount == 5:
self.timePowerCount = 0
self.timePower += 1
self.noteYellow.play()
block.addingPower = True
def shieldUpdate(self):
if self.shieldDelay > 0:
self.shieldDelay -= 1
def slomoUpdate(self):
if self.slomoDelay > 0:
self.slomoDelay -= 1
def animationUpdate(self,frame_number):
self.direction_offset = self.spidering*128 + (self.direction==-1) * 64
if abs(self.x_vel) > 1:
self.animation_offset = 64 *int(self.animationTicks / 8 % 4)
elif self.fall and not self.spidering:
self.animation_offset = 128
else:
self.animation_offset = 0
if self.animationTicks == 32:
self.animationTicks = 0
else:
self.animationTicks += 1
def controlsUpdate(self):
keys = pygame.key.get_pressed()
self.x_vel = 0
if not self.finished:
if keys[pygame.K_LEFT]:
self.x_vel -= self.speed
self.direction = -1
if keys[pygame.K_RIGHT]:
self.x_vel += self.speed
self.direction = 1
if keys[pygame.K_UP] and not self.kup_pressed:
self.kup_pressed = True
self.y_vel_i = -self.jump_power
self.fall = True
if not keys[pygame.K_UP]:
self.kup_pressed = False
if keys[pygame.K_x]:
if self.onBlock and self.onBlock.active and not self.onBlock.selected \
and not self.mining and self.mines > 0:
self.onBlock.selected = True
self.mines -= 1
self.mining = True
elif not keys[pygame.K_x]:
self.mining = False
if keys[pygame.K_w]:
if self.slomoDelay == 0 and self.timePower > 0 and self.shieldDelay == 0:
self.timePower -= 1
self.timeAnim.play()
self.slomoDelay = 100
if keys[pygame.K_c]:
if self.shieldDelay == 0 and self.shields > 0 and self.slomoDelay == 0:
self.shields -= 1
self.shieldAnim.play()
self.shieldDelay = 100
if self.rect.y < 100 and keys[pygame.K_UP]:
self.finished = True
def update(self, blocks, frame_number):
self.controlsUpdate()
self.positionUpdate(blocks)
self.physicsUpdate()
self.hitUpdate()
self.shieldUpdate()
self.slomoUpdate()
self.animationUpdate(frame_number)
def render(self, displaySurface,camera):
displaySurface.blit(self.img, camera.apply(self.rect) , (self.animation_offset, self.direction_offset, 64, 64))
if self.shieldDelay > 0:
self.shieldAnim.blit(displaySurface, camera.apply(Rect(self.rect.x -20, self.rect.y-15, 0, 0)))
if self.slomoDelay > 0:
self.timeAnim.blit(displaySurface, camera.apply(Rect(self.rect.x +16+16*(self.direction == 1), self.rect.y+16+16*self.spidering, 0, 0)))
```
#### File: src/screens/mainMenuScreen.py
```python
import sys
import pygame
from pygame.locals import *
from pygame.color import THECOLORS
from screen_ import Screen_
from utils import save
#import pgext
sys.path.append('../')
import utils
class MainMenuScreen(Screen_):
def __init__(self):
super(MainMenuScreen, self).__init__()
self.font = utils.getFont('VolterGoldfish', 44)
self.infofont = utils.getFont('VolterGoldfish', 18)
self.background = pygame.image.load("../img/backgrounds/mainMenu.png").convert()
self.menuEntries = ["Start Game","Options","Quit"] ## add continue -> levelMenuScreen
self.menuPositions = [(200,100),(200,300),(200,500)]
self.menuInfo = ["Start or continue your adventure","Change game and user settings","Exit the game. Goodbye!"]
self.menuChoice = 0
self.activeColor = THECOLORS["black"]
self.inactiveColor = THECOLORS["grey64"]
def render(self,backgroundScreen):
backgroundScreen.blit(self.background, (0,0))
backgroundScreen.blit(self.infofont.render(self.menuInfo[self.menuChoice], 1, THECOLORS["white"]),(200,605))
for i in range(len(self.menuEntries)):
if i == self.menuChoice:
backgroundScreen.blit(self.font.render(self.menuEntries[i], 1, self.activeColor), self.menuPositions[i])
else:
backgroundScreen.blit(self.font.render(self.menuEntries[i], 1, self.inactiveColor), self.menuPositions[i])
def handle_events(self, events):
for event in events:
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.manager.go_to('startScreen')
elif event.key == K_RETURN:
if self.menuChoice == 0:
# self.manager.go_to('levelSelectScreen')
save([('powers',[0,0,0])])
self.manager.go_to_game(0)
elif self.menuChoice == 1:
self.manager.go_to('optionsScreen')
elif self.menuChoice == 2:
exit()
elif event.key == K_UP:
self.menuChoice = (self.menuChoice - 1) % len(self.menuEntries)
elif event.key == K_DOWN:
self.menuChoice = (self.menuChoice + 1) % len(self.menuEntries)
```
#### File: src/screens/optionsScreen.py
```python
import sys
import pygame
from pygame.locals import *
from pygame.color import *
from screen_ import Screen_
sys.path.append('../')
import utils
from utils import save,load,exist
class OptionsScreen(Screen_):
def __init__(self):
super(OptionsScreen, self).__init__()
self.font = utils.getFont('VolterGoldfish', 44)
self.infofont = utils.getFont('VolterGoldfish', 18)
self.keyfont = utils.getFont('VolterGoldfish', 24)
self.background = pygame.image.load("../img/backgrounds/options.png").convert()
self.soundBar = pygame.image.load("../img/backgrounds/soundBar.png").convert_alpha()
self.musicBar = pygame.image.load("../img/backgrounds/soundBar.png").convert_alpha()
self.keyImg = pygame.image.load("../img/backgrounds/key.png").convert_alpha()
self.volumeTest = pygame.mixer.Sound("../sounds/volumeTest.wav")
self.menuEntries = ["Music","Sound","Controls","Back"]
self.menuPositions = [(200,100),(200,200),(200,300),(200,500)]
self.menuChoice = 0
self.activeColor = THECOLORS["black"]
self.inactiveColor = THECOLORS["grey64"]
self.soundLevel = 0
if exist('soundLevel'):
self.soundLevel = load('soundLevel')
self.musicLevel = 0
if exist('musicLevel'):
self.musicLevel = load('musicLevel')
self.actionKeys = ['slow time','shield','trap','left','jump','right']
self.keys = ['w','x','c','<','^','>']
self.actionKeysPos = [(200,450),(330,450),(430,450),(530,450),(630,450),(730,450)]
def render(self, backgroundScreen):
backgroundScreen.blit(self.background, (0,0))
for i in range(len(self.menuEntries)):
if i == self.menuChoice:
backgroundScreen.blit(self.font.render(self.menuEntries[i], 1, self.activeColor), self.menuPositions[i])
else:
backgroundScreen.blit(self.font.render(self.menuEntries[i], 1, self.inactiveColor), self.menuPositions[i])
for i in range(len(self.actionKeys)):
backgroundScreen.blit(self.infofont.render(self.actionKeys[i],1,self.inactiveColor),(200+(i>0)*(i*100+30),450))
backgroundScreen.blit(self.keyImg,(200+(i>0)*(i*100+30),380))
backgroundScreen.blit(self.keyfont.render(self.keys[i],1,self.inactiveColor),(200+(i>0)*(i*100+30)+20,390))
for i in range(5):
backgroundScreen.blit(self.soundBar, (400+i*40,120), (0, (self.soundLevel > i)*30, 30, 30))
backgroundScreen.blit(self.musicBar, (400+i*40,220), (0, (self.musicLevel > i)*30, 30, 30))
def handle_events(self, events):
for event in events:
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
save([('soundLevel',self.soundLevel),('musicLevel',self.musicLevel)])
self.manager.go_to('mainMenuScreen')
if event.key == K_RETURN:
if self.menuChoice == 3:
save([('soundLevel',self.soundLevel),('musicLevel',self.musicLevel)])
self.manager.go_to('mainMenuScreen')
if event.key == K_UP:
self.menuChoice = (self.menuChoice - 1) % len(self.menuEntries)
if event.key == K_DOWN:
self.menuChoice = (self.menuChoice + 1) % len(self.menuEntries)
if event.key == K_LEFT:
if self.menuChoice == 0:
self.soundLevel = max(0,self.soundLevel-1)
self.volumeTest.set_volume(self.soundLevel*0.2)
self.volumeTest.play()
if self.menuChoice == 1:
self.musicLevel = max(0,self.musicLevel-1)
pygame.mixer.music.set_volume(self.musicLevel*0.2)
self.volumeTest.set_volume(self.musicLevel*0.2)
self.volumeTest.play()
if event.key == K_RIGHT:
if self.menuChoice == 0:
self.soundLevel = min(5,self.soundLevel+1)
self.volumeTest.set_volume(self.soundLevel*0.2)
self.volumeTest.play()
if self.menuChoice == 1:
self.musicLevel = min(5,self.musicLevel+1)
pygame.mixer.music.set_volume(self.musicLevel*0.2)
self.volumeTest.set_volume(self.musicLevel*0.2)
self.volumeTest.play()
if event.type == QUIT:
save([('soundLevel',self.soundLevel),('musicLevel',self.musicLevel)])
```
#### File: src/screens/screenManager.py
```python
from startScreen import StartScreen
from optionsScreen import OptionsScreen
from mainMenuScreen import MainMenuScreen
from levelSelectScreen import LevelSelectScreen
from gameScreen import GameScreen
class ScreenManager(object):
def __init__(self):
self.screen = StartScreen()
self.screen.manager = self
def go_to(self, screen):
if screen == 'levelSelectScreen':
self.screen = LevelSelectScreen()
elif screen == 'mainMenuScreen':
self.screen = MainMenuScreen()
elif screen == 'optionsScreen':
self.screen = OptionsScreen()
elif screen == 'startScreen':
self.screen = StartScreen()
self.screen.manager = self
def go_to_game(self,index):
self.screen = GameScreen(index)
self.screen.manager = self
```
#### File: src/screens/screen_.py
```python
class Screen_(object):
def __init__(self):
pass
def render(self, screen):
raise NotImplementedError
def update(self):
pass
def handle_events(self, events):
raise NotImplementedError
``` |
{
"source": "jolars/BlitzL1",
"score": 2
} |
#### File: python/blitzl1/_blitzl1.py
```python
import os
import numpy as np
import ctypes
from scipy import sparse
import pickle
_dir = os.path.abspath(os.path.dirname(__file__))
_lib = np.ctypeslib.load_library("libblitzl1", _dir)
_index_t = ctypes.c_int32
_value_t = ctypes.c_double
_size_t = ctypes.c_int32
_pointer = ctypes.POINTER(ctypes.c_void_p)
_value_t_p = ctypes.POINTER(_value_t)
_index_t_p = ctypes.POINTER(_index_t)
_size_t_p = ctypes.POINTER(_size_t)
_char_p = ctypes.c_char_p
_bool = ctypes.c_bool
_int = ctypes.c_int
_lib.BlitzL1_new_sparse_dataset.restype = _pointer
_lib.BlitzL1_new_sparse_dataset.argtypes = [
_index_t_p, _index_t_p, _value_t_p, _value_t_p, _size_t, _size_t, _size_t]
_lib.BlitzL1_new_dense_dataset.restype = _pointer
_lib.BlitzL1_new_dense_dataset.argtypes = [
_value_t_p, _value_t_p, _size_t, _size_t]
_lib.BlitzL1_get_column_norm.restype = _value_t
_lib.BlitzL1_get_column_norm.argtype = [_pointer, _index_t]
_lib.BlitzL1_get_label_i.restype = _value_t
_lib.BlitzL1_get_label_i.argtype = [_pointer, _index_t]
_lib.BlitzL1_new_solver.restype = _pointer
_lib.BlitzL1_new_solver.argtype = None
_lib.BlitzL1_solve_problem.restype = None
_lib.BlitzL1_solve_problem.argtype = [_pointer, _pointer, _value_t, _char_p,
_value_t_p, _value_t, _char_p, _value_t, _value_t, _int, _int, _char_p]
_lib.BlitzL1_set_tolerance.restype = None
_lib.BlitzL1_set_tolerance.argtype = [_pointer, _value_t]
_lib.BlitzL1_get_tolerance.restype = _value_t
_lib.BlitzL1_get_tolerance.argtype = None
_lib.BlitzL1_set_max_time.restype = None
_lib.BlitzL1_set_max_time.argtype = [_pointer, _value_t]
_lib.BlitzL1_get_max_time.restype = _value_t
_lib.BlitzL1_get_max_time.argtype = None
_lib.BlitzL1_set_use_intercept.restype = None
_lib.BlitzL1_set_use_intercept.argtype = [_pointer, _bool]
_lib.BlitzL1_get_use_intercept.restype = _bool
_lib.BlitzL1_get_use_intercept.argtype = None
_lib.BlitzL1_set_verbose.restype = None
_lib.BlitzL1_set_verbose.argtype = [_pointer, _bool]
_lib.BlitzL1_get_verbose.restype = _bool
_lib.BlitzL1_get_verbose.argtype = None
_lib.BlitzL1_compute_lambda_max.restype = _value_t
_lib.BlitzL1_compute_lambda_max.argtype = [_pointer, _pointer, _char_p]
_solver = _lib.BlitzL1_new_solver()
def set_tolerance(value):
_lib.BlitzL1_set_tolerance(_solver, _value_t(value))
def get_tolerance():
return _lib.BlitzL1_get_tolerance(_solver)
def set_max_time(value):
_lib.BlitzL1_set_max_time(_solver, _value_t(value))
def get_max_time():
return _lib.BlitzL1_get_max_time(_solver)
def set_use_intercept(value):
_lib.BlitzL1_set_use_intercept(_solver, _bool(value))
def get_use_intercept():
return _lib.BlitzL1_get_use_intercept(_solver)
def set_verbose(value):
_lib.BlitzL1_set_verbose(_solver, _bool(value))
def get_verbose():
return _lib.BlitzL1_get_verbose(_solver)
def data_as(obj, ctypes_type):
if obj.dtype != ctypes_type._type_:
obj = obj.astype(ctypes_type._type_)
return (obj, obj.ctypes.data_as(ctypes_type))
class _L1Problem(object):
def __init__(self, A, b):
self._loss_arg = _char_p(self._LOSS_TYPE.encode('utf-8'))
self._load_dataset(A, b)
def _load_dataset(self, A, b):
self._shape = A.shape
n = _size_t(A.shape[0])
d = _size_t(A.shape[1])
(self._b, labels_arg) = data_as(b, _value_t_p)
if sparse.issparse(A):
if not sparse.isspmatrix_csc(A):
A = A.tocsc()
(self._indices, indices_arg) = data_as(A.indices, _index_t_p)
(self._indptr, indptr_arg) = data_as(A.indptr, _index_t_p)
(self._data, data_arg) = data_as(A.data, _value_t_p)
nnz = _size_t(A.nnz)
self._dataset = _lib.BlitzL1_new_sparse_dataset(
indices_arg, indptr_arg, data_arg, labels_arg, n, d, nnz)
else:
if not A.flags.f_contiguous:
A = np.asfortranarray(A)
(self._data, data_arg) = data_as(A, _value_t_p)
self._dataset = _lib.BlitzL1_new_dense_dataset(
data_arg, labels_arg, n, d)
def _get_A_column_norm(self, j):
return _lib.BlitzL1_get_column_norm(self._dataset, _index_t(j))
def _get_label_i(self, i):
return _lib.BlitzL1_get_label_i(self._dataset, _index_t(i))
def compute_lambda_max(self):
return _lib.BlitzL1_compute_lambda_max(_solver, self._dataset, self._loss_arg)
def solve(self,
l1_penalty,
initial_x=None,
initial_intercept=None,
log_directory="",
max_iter=20):
(n, d) = self._shape
# Initial conditions:
if initial_x is not None:
x = initial_x
else:
x = np.zeros(d)
(x, x_arg) = data_as(x, _value_t_p)
if initial_intercept is not None:
intercept_arg = _value_t(initial_intercept)
else:
intercept_arg = _value_t(0.0)
# Regularization strength:
lambda_arg = _value_t(l1_penalty)
# Maximum number of iterations
max_iter_arg = _int(max_iter)
# Log directory:
if log_directory:
try:
os.mkdir(log_directory)
except:
pass
log_dir_arg = _char_p(log_directory.encode('utf-8'))
# Misc solution variables:
obj_arg = _value_t()
duality_gap_arg = _value_t()
num_itr_arg = _int()
solution_status = " " * 64
solution_status_arg = _char_p(solution_status.encode('utf-8'))
# Solve problem:
_lib.BlitzL1_solve_problem(_solver,
self._dataset,
lambda_arg,
self._loss_arg,
x_arg,
ctypes.byref(intercept_arg),
solution_status_arg,
ctypes.byref(obj_arg),
ctypes.byref(duality_gap_arg),
ctypes.byref(num_itr_arg),
max_iter_arg,
log_dir_arg)
solution_status = solution_status.strip().strip('\x00')
# Return solution object:
return self._SOLUTION_TYPE(x,
intercept_arg.value,
obj_arg.value,
duality_gap_arg.value,
num_itr_arg.value,
solution_status)
def load_solution(filepath):
in_file = open(filepath, 'rb')
sol = pickle.load(in_file)
in_file.close()
return sol
class _Solution(object):
def __init__(self, x, intercept, obj, duality_gap, num_itr, status):
self.x = x
self.intercept = intercept
self.objective_value = obj
self.duality_gap = duality_gap
self.status = status
self._num_iterations = num_itr
def _compute_Ax(self, A):
if sparse.issparse(A):
result = A * np.mat(self.x).T + self.intercept
return np.array(result).flatten()
else:
return np.dot(A, self.x) + self.intercept
def save(self, filepath):
out_file = open(filepath, "wb")
pickle.dump(self, out_file)
out_file.close()
class LassoSolution(_Solution):
def predict(self, A):
return self._compute_Ax(A)
def evaluate_loss(self, A, b):
predictions = self.predict(A)
return 0.5 * np.linalg.norm(b - predictions) ** 2
class LogRegSolution(_Solution):
def predict(self, A):
Ax = self._compute_Ax(A)
return 1 / (1 + np.exp(-Ax))
def evaluate_loss(self, A, b):
exp_mbAx = np.exp(-b * self._compute_Ax(A))
return sum(np.log1p(exp_mbAx))
class LassoProblem(_L1Problem):
_LOSS_TYPE = "squared"
_SOLUTION_TYPE = LassoSolution
class LogRegProblem(_L1Problem):
_LOSS_TYPE = "logistic"
_SOLUTION_TYPE = LogRegSolution
``` |
{
"source": "jolarter/algoritmos",
"score": 4
} |
#### File: jolarter/algoritmos/com.py
```python
def comb(n,k):
if k==0:
r=1
elif n<k:
r=0
else:
r = comb(n-1,k-1)+comb(n-1,k)
return r
print comb(10,5)
print comb(4,2)
print "Recursivo"
#print comb(45,6)
def combma(n,k,mem):
if mem[n][k] == None:
if k==0:
mem[n][k]=1
elif n<k:
mem[n][k]=0
else:
mem[n][k] = combma(n-1,k-1,mem)+combma(n-1,k,mem)
return mem[n][k]
def combm(n,k):
mem = [[None for j in range(0,k+1)] for i in range(0,n+1)]
r = combma(n,k,mem)
print(mem)
return r
print "Con memoria"
print combm(8,4)
def dynamiccom(n,k):
mem = [[None for j in range(0,k+1)] for i in range(0,n+1)];
for i in range(0,n+1):
for j in range(0,k+1):
if j == 0:
mem[i][j] = 1
elif i < j:
mem[i][j] = 0
else:
mem[i][j] = mem[i-1][j-1]+mem[i-1][j]
print(mem)
return mem[n][k]
print "Dinamico"
#print dynamiccom(45,6)
print dynamiccom(8,4)
def dynamiccomb2(n,k):
mem = [1] + [0 for j in range(1,k+1)]
for i in range(1, n+1):
for j in range(min(n,k),max(i-k-1,0),-1):
mem[j] = mem[j]+mem[j-1]
print(mem)
return mem[k]
print dynamiccomb2(8,4)
```
#### File: jolarter/algoritmos/fib.py
```python
def fib(n):
if n==0 or n==1:
r=1
s=1
else:
r1,s1=fib(n-1)
r2,s2=fib(n-2)
r=r1+r2
s=s1+s2+1
return r,s
def fibma(n,mem):
if mem[n]==None:
if n==0 or n==1:
mem[n]=1
s=1
else:
r1,s1=fibma(n-1,mem)
r2,s2=fibma(n-2,mem)
mem[n]=r1+r2
s=s1+s2+1
return mem[n],s
else:
return mem[n],1
def fibm(n):
mem = [None for i in range(0,n+1)]
return fibma(n,mem)
def dynamicfib(n):
mem = [None for i in range(0,n+1)]
mem[0] = 1
mem[1] = 1
for i in range(2,n+1):
mem[i] = mem[i-1]+mem[i-2]
return mem[n]
def dynamicfib2(n):
mem = [1,1]
for i in range(2,n+1):
mem[i & 1] = mem[(i-1) & 1] + mem[(i-2) & 1]
return mem[n & 1]
print fib(30)
print fibm(30)
print dynamicfib(30)
print dynamicfib2(30)
```
#### File: jolarter/algoritmos/morral.py
```python
def u(j,w,weights,utilities):
if j == 0:
result = 0
elif w < weights[j]:
result = u(j - 1, w, weights,utilities)
else:
result = max(u(j-1, w, weights, utilities), utilities[j] + u(j-1, w - weights[j], weights, utilities))
return result
def um(j,w,weights,utilities,m):
if m [j][w] == None:
if j == 0:
result = 0
elif w < weights[j]:
result = u(j - 1, w, weights,utilities,m)
else:
result = max(u(j-1, w, weights, utilities,m), utilities[j] + u(j-1, w - weights[j], weights, utilities,m))
m[j][w] = result
else:
result = m[j][w]
return result
def umm(w,weights,utilities):
n = len(weights)-1
m = [[None for w in range(0,w+1)] for j in range(0,n+1)]
print(m)
'''return um(n,weight,weights,utilities,m)'''
def udyn0(w,weights,utilities):
n = len(weights)-1
m = [[0 for w in range(0,w+1)]]+[[None for w in range(0,w+1)] for j in range(1,n+1)]
print (m)
w0=[1,2,3,4]
u0=[240,200,140,150]
umm(4,w0,u0)
udyn0(4,w0,u0)
```
#### File: jolarter/algoritmos/tablashash.py
```python
class HashTable:
def __init__(self,n):
self.n = n
self.arr = [-1 for i in range(0,n)]
def insertar(self, key, value):
self.arr[self.hash(key)]=value
def buscar(self, key):
return self.arr[self.hash(key)]
def hash(self, cadena):
sum = 0
for i in range(len(cadena)): sum += ord(cadena[i])
return sum % self.n
def hash2(self, cadena):
sum = 0
for i in range(len(cadena)): sum += ord(cadena[i])*(self.n**i)
return (sum/self.n) % self.n
def __str__(self):
return str(self.arr)
ht = HashTable(17)
ht.insertar("key1","Julian")
ht.insertar("key2","tttt")
ht.insertar("key3","Laura")
print(ht)
print(ht.buscar("key1"))
print(ht.buscar("key2"))
print(ht.buscar("key3"))
``` |
{
"source": "joldie/bioinformatics-tools",
"score": 3
} |
#### File: joldie/bioinformatics-tools/find_conserved_blocks_v2.2.py
```python
from Bio.Alphabet import IUPAC
from Bio import AlignIO
from Bio.Align import MultipleSeqAlignment
import time
import os
# Measure execution time of script
start_time = time.time()
# Program settings
###########################
matchingFraction = 0.85 # 1 = identical (no mismatches)
minLength = 200 # minimum number of base pairs matching in sequence
minSequences = 7 # minimum number of sequences in a block with matching sequence
# Input file data
###########################
inputFileName = "file.fasta"
inputFileType = "fasta"
alphabetFormat = IUPAC.unambiguous_dna
gapCharacter = "-"
inputFilePath = os.path.dirname(
os.path.realpath(__file__)) + "/" + inputFileName
outputFilePath = inputFilePath + "_(" + str(matchingFraction) + "-" + \
str(minLength) + "-" + str(minSequences) + ").txt"
###########################
alignment = AlignIO.read(inputFilePath, inputFileType, alphabet=alphabetFormat)
sequenceLength = alignment.get_alignment_length()
numSequences = alignment.__len__()
# Error checking for user input
if matchingFraction < 0 or matchingFraction > 1:
print('Error: \'matchingFraction\' must be between 0 and 1')
exit()
if minLength >= sequenceLength or minLength <= 0:
print('Error: \'minLength\' must be a positive number less than the sequence length in input file')
exit()
if minSequences >= numSequences or minSequences <= 0:
print('Error: \'minSequences\' must be a positive number less than the number of sequences in input file')
exit()
def isBlockConserved(blockSequence):
maxAllowedMismatchedColumns = blockSequence.get_alignment_length(
) - int(blockSequence.get_alignment_length() * matchingFraction)
mismatchedColumns = 0
for column in range(blockSequence.get_alignment_length()):
# First, check if entire first column is filled with gap characters
# If so, return False
if column == 0:
gapCharacterCount = 0
for row in range(blockSequence.__len__()):
if blockSequence[row][column] == gapCharacter:
gapCharacterCount += 1
if gapCharacterCount == blockSequence.__len__():
return False
# Find the first non-gap character in column
firstNonGapCharacter = ""
for row in range(blockSequence.__len__()):
if blockSequence[row][column] != gapCharacter and firstNonGapCharacter == "":
firstNonGapCharacter = blockSequence[row][column]
break
# Next, count number of columns with non-identical values or gap characters
# If count increases above threshold, return False
for row in range(blockSequence.__len__()):
if blockSequence[row][column] == gapCharacter or blockSequence[row][column] != firstNonGapCharacter:
mismatchedColumns += 1
if mismatchedColumns > maxAllowedMismatchedColumns:
return False
break
# If loop executes completely to end, return True
return True
def isBlockAlreadyCounted(startRow, startColumn, endRow, endColumn):
for block in conservedBlocksInfo:
if startRow >= block[0] and startColumn >= block[1] and endRow <= block[2] and endColumn <= block[3]:
return True
return False
def removeTrailingBlankColumns(blockSequence):
removedColumns = 0
blankColumn = True
while blankColumn:
gapCharacterCount = 0
for row in range(blockSequence.__len__()):
if blockSequence[row][-1] == gapCharacter:
gapCharacterCount += 1
if gapCharacterCount == blockSequence.__len__():
# Remove last column, as it contains only gap characters
blockSequence = blockSequence[:, :-1]
removedColumns += 1
else:
blankColumn = False
return blockSequence, removedColumns
def getNumberBlankColumns(blockSequence):
numberColumns = 0
for column in range(blockSequence.get_alignment_length()):
gapCharacterCount = 0
for row in range(blockSequence.__len__()):
if blockSequence[row][column] == gapCharacter:
gapCharacterCount += 1
if gapCharacterCount == blockSequence.__len__():
numberColumns += 1
return numberColumns
def getMismatchString(blockSequence):
mismatchString = ""
for column in range(blockSequence.get_alignment_length()):
mismatch = False
firstNonGapCharacter = ""
for row in range(blockSequence.__len__()):
if blockSequence[row][column] != gapCharacter and firstNonGapCharacter == "":
firstNonGapCharacter = blockSequence[row][column]
break
for row in range(blockSequence.__len__()):
if blockSequence[row][column] != firstNonGapCharacter:
mismatchString += "X"
mismatch = True
break
if not mismatch:
mismatchString += "-"
return mismatchString
def getNumberPositionString(length, start, multiple):
returnString = ""
i = start
while i < length + start:
if i % multiple == 0:
returnString += str(i)
# Move counter forward by number of extra characters used to print string
i += (len(str(i)) - 1)
else:
returnString += " "
i += 1
return returnString
print("Input file = " + inputFilePath)
print("\'matchingFraction\' = " + str(matchingFraction))
print("\'minLength\' = " + str(minLength))
print("\'minSequences\' = " + str(minSequences))
print("")
conservedBlocks = []
conservedBlocksInfo = []
for startSequence in range(numSequences - minSequences + 1):
current_time = time.time() - start_time
print("Start of sequence #" + str(startSequence + 1) +
" (current time = " + str(int(current_time)) + "s)")
for index in range(sequenceLength - minLength + 1):
length = minLength
sequences = minSequences
# Only test block if it (or larger version of it) not already added
if not isBlockAlreadyCounted(startSequence, index, startSequence + sequences, index + length):
largestConservedBlock = MultipleSeqAlignment([]) # Empty object
consensusRight = True
consensusDown = True
while consensusRight:
block = alignment[startSequence:(
startSequence + sequences), index:(index + length)]
if (index + length) > sequenceLength:
# Reached end of sequence
consensusRight = False
elif not isBlockConserved(block):
consensusRight = False
if length == minLength:
consensusDown = False # Don't bother searching down, as start block is not conserved
else:
length -= 1
else:
length += 1
largestConservedBlock = block
while consensusDown:
block = alignment[startSequence:(
startSequence + sequences), index:(index + length)]
if (startSequence + sequences) > numSequences:
# Reached end of sequence list
consensusDown = False
elif not isBlockConserved(block):
consensusDown = False
sequences -= 1
else:
sequences += 1
largestConservedBlock = block
if index != sequenceLength and length > minLength:
# Before saving data, remove any columns of gap characters at end of block
largestConservedBlock, removedColumns = removeTrailingBlankColumns(
largestConservedBlock)
# Save block data in list
conservedBlocks.append(largestConservedBlock)
conservedBlocksInfo.append(
[startSequence, index, startSequence + sequences, index + length])
# Print information about saved data
print("Block found: " + str(conservedBlocks[-1].__len__()) + " sequences, " + str(
conservedBlocks[-1].get_alignment_length()) + " long (from position " + str(index + 1) + " to " + str(index + length + 1) + ")")
# Print output to text file
textFile = open(outputFilePath, "w")
textFile.write("Python script = " + os.path.basename(__file__) + "\n")
textFile.write("Input file = " + inputFilePath + "\n")
textFile.write("\'matchingFraction\' = " + str(matchingFraction) + "\n")
textFile.write("\'minLength\' = " + str(minLength) + "\n")
textFile.write("\'minSequences\' = " + str(minSequences) + "\n\n")
i = 0
for block in conservedBlocks:
textFile.write("Number blank columns = " +
str(getNumberBlankColumns(block)) + "\n")
textFile.write("Length = " + str(block.get_alignment_length() - getNumberBlankColumns(block)) + " / Length with gaps = " + str(block.get_alignment_length()
) + " (position " + str(conservedBlocksInfo[i][1] + 1) + " to " + str(conservedBlocksInfo[i][1] + block.get_alignment_length() + 1) + ")\n")
textFile.write("Number of sequences = " + str(block.__len__()) + "\n")
for row in range(block.__len__()):
textFile.write("[" + str(conservedBlocksInfo[i][0] +
row + 1) + "] " + block[row].name + "\n")
textFile.write(getNumberPositionString(
block.get_alignment_length(), 1, 50) + "\n")
textFile.write(getMismatchString(block) + "\n")
for row in range(block.__len__()):
textFile.write(str(block[row, :].seq) + "\n")
textFile.write("\n")
i += 1
textFile.close()
# Print execution time of script
end_time = time.time()
total_time = end_time - start_time
print("Program execution time: " + str(int(total_time)) + "s")
``` |
{
"source": "jole6826/pyswarms",
"score": 4
} |
#### File: pyswarms/single/general_optimizer.py
```python
r"""
A general Particle Swarm Optimization (general PSO) algorithm.
It takes a set of candidate solutions, and tries to find the best
solution using a position-velocity update method. Uses a user specified
topology.
The position update can be defined as:
.. math::
x_{i}(t+1) = x_{i}(t) + v_{i}(t+1)
Where the position at the current timestep :math:`t` is updated using
the computed velocity at :math:`t+1`. Furthermore, the velocity update
is defined as:
.. math::
v_{ij}(t + 1) = m * v_{ij}(t) + c_{1}r_{1j}(t)[y_{ij}(t) − x_{ij}(t)]
+ c_{2}r_{2j}(t)[\hat{y}_{j}(t) − x_{ij}(t)]
Here, :math:`c1` and :math:`c2` are the cognitive and social parameters
respectively. They control the particle's behavior given two choices: (1) to
follow its *personal best* or (2) follow the swarm's *global best* position.
Overall, this dictates if the swarm is explorative or exploitative in nature.
In addition, a parameter :math:`w` controls the inertia of the swarm's
movement.
An example usage is as follows:
.. code-block:: python
import pyswarms as ps
from pyswarms.backend.topology import Pyramid
from pyswarms.utils.functions import single_obj as fx
# Set-up hyperparameters and topology
options = {'c1': 0.5, 'c2': 0.3, 'w':0.9}
my_topology = Pyramid(static=False)
# Call instance of GlobalBestPSO
optimizer = ps.single.GeneralOptimizerPSO(n_particles=10, dimensions=2,
options=options, topology=my_topology)
# Perform optimization
stats = optimizer.optimize(fx.sphere_func, iters=100)
This algorithm was adapted from the earlier works of <NAME> and
<NAME> in Particle Swarm Optimization [IJCNN1995]_.
.. [IJCNN1995] <NAME> and <NAME>, "Particle Swarm Optimization,"
Proceedings of the IEEE International Joint Conference on Neural
Networks, 1995, pp. 1942-1948.
"""
# Import from stdlib
import logging
# Import modules
import numpy as np
# Import from package
from ..base import SwarmOptimizer
from ..backend.operators import compute_pbest
from ..backend.topology import Topology, Ring, Random, VonNeumann
from ..utils.console_utils import cli_print, end_report
class GeneralOptimizerPSO(SwarmOptimizer):
def __init__(
self,
n_particles,
dimensions,
options,
topology,
bounds=None,
velocity_clamp=None,
center=1.00,
ftol=-np.inf,
init_pos=None,
):
"""Initialize the swarm
Attributes
----------
n_particles : int
number of particles in the swarm.
dimensions : int
number of dimensions in the space.
options : dict with keys :code:`{'c1', 'c2', 'w'}` or :code:`{'c1', 'c2', 'w', 'k', 'p'}`
a dictionary containing the parameters for the specific
optimization technique.
* c1 : float
cognitive parameter
* c2 : float
social parameter
* w : float
inertia parameter
if used with the :code:`Ring`, :code:`VonNeumann` or :code:`Random` topology the additional
parameter k must be included
* k : int
number of neighbors to be considered. Must be a
positive integer less than :code:`n_particles`
if used with the :code:`Ring` topology the additional
parameters k and p must be included
* p: int {1,2}
the Minkowski p-norm to use. 1 is the
sum-of-absolute values (or L1 distance) while 2 is
the Euclidean (or L2) distance.
if used with the :code:`VonNeumann` topology the additional
parameters p and r must be included
* r: int
the range of the VonNeumann topology.
This is used to determine the number of
neighbours in the topology.
topology : pyswarms.backend.topology.Topology
a :code:`Topology` object that defines the topology to use
in the optimization process. The currently available topologies
are:
* Star
All particles are connected
* Ring (static and dynamic)
Particles are connected to the k nearest neighbours
* VonNeumann
Particles are connected in a VonNeumann topology
* Pyramid (static and dynamic)
Particles are connected in N-dimensional simplices
* Random (static and dynamic)
Particles are connected to k random particles
Static variants of the topologies remain with the same neighbours
over the course of the optimization. Dynamic variants calculate
new neighbours every time step.
bounds : tuple of :code:`np.ndarray` (default is :code:`None`)
a tuple of size 2 where the first entry is the minimum bound
while the second entry is the maximum bound. Each array must
be of shape :code:`(dimensions,)`.
velocity_clamp : tuple (default is :code:`None`)
a tuple of size 2 where the first entry is the minimum velocity
and the second entry is the maximum velocity. It
sets the limits for velocity clamping.
center : list (default is :code:`None`)
an array of size :code:`dimensions`
ftol : float
relative error in objective_func(best_pos) acceptable for
convergence
"""
super(GeneralOptimizerPSO, self).__init__(
n_particles,
dimensions=dimensions,
options=options,
bounds=bounds,
velocity_clamp=velocity_clamp,
center=center,
ftol=ftol,
init_pos=init_pos,
)
# Initialize logger
self.logger = logging.getLogger(__name__)
# Invoke assertions
self.assertions()
# Initialize the resettable attributes
self.reset()
# Initialize the topology and check for type
if not isinstance(topology, Topology):
raise TypeError("Parameter `topology` must be a Topology object")
else:
self.top = topology
# Case for the Ring topology
if isinstance(topology, (Ring, VonNeumann)):
# Assign p-value as attributes
self.p = options["p"]
# Exceptions for the p value
if "p" not in self.options:
raise KeyError("Missing p in options")
if self.p not in [1, 2]:
raise ValueError(
"p-value should either be 1 (for L1/Minkowski) "
"or 2 (for L2/Euclidean)."
)
# Case for Random, VonNeumann and Ring topologies
if isinstance(topology, (Random, Ring, VonNeumann)):
if not isinstance(topology, VonNeumann):
self.k = options["k"]
if not isinstance(self.k, int):
raise ValueError(
"No. of neighbors must be an integer between"
"0 and no. of particles."
)
if not 0 <= self.k <= self.n_particles - 1:
raise ValueError(
"No. of neighbors must be between 0 and no. "
"of particles."
)
if "k" not in self.options:
raise KeyError("Missing k in options")
else:
# Assign range r as attribute
self.r = options["r"]
if not isinstance(self.r, int):
raise ValueError("The range must be a positive integer")
if (
self.r <= 0
or not 0
<= VonNeumann.delannoy(self.swarm.dimensions, self.r)
<= self.n_particles - 1
):
raise ValueError(
"The range must be set such that the computed"
"Delannoy number (number of neighbours) is"
"between 0 and the no. of particles."
)
def optimize(self, objective_func, iters, print_step=1, verbose=1, **kwargs):
"""Optimize the swarm for a number of iterations
Performs the optimization to evaluate the objective
function :code:`f` for a number of iterations :code:`iter.`
Parameters
----------
objective_func : function
objective function to be evaluated
iters : int
number of iterations
print_step : int (default is 1)
amount of steps for printing into console.
verbose : int (default is 1)
verbosity setting.
kwargs : dict
arguments for the objective function
Returns
-------
tuple
the global best cost and the global best position.
"""
cli_print("Arguments Passed to Objective Function: {}".format(kwargs),
verbose, 2, logger=self.logger)
for i in range(iters):
# Compute cost for current position and personal best
self.swarm.current_cost = objective_func(self.swarm.position, **kwargs)
self.swarm.pbest_cost = objective_func(self.swarm.pbest_pos, **kwargs)
self.swarm.pbest_pos, self.swarm.pbest_cost = compute_pbest(
self.swarm
)
best_cost_yet_found = self.swarm.best_cost
# If the topology is a ring topology just use the local minimum
if isinstance(self.top, Ring) and not isinstance(self.top, VonNeumann):
# Update gbest from neighborhood
self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(
self.swarm, self.p, self.k
)
# If the topology is a VonNeumann topology pass the neighbour and range attribute to compute_gbest()
if isinstance(self.top, VonNeumann):
# Update gbest from neighborhood
self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(
self.swarm, self.p, self.r
)
# If the topology is a random topology pass the neighbor attribute to compute_gbest()
elif isinstance(self.top, Random):
# Get minima of pbest and check if it's less than gbest
if np.min(self.swarm.pbest_cost) < self.swarm.best_cost:
self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(
self.swarm, self.k
)
else:
# Get minima of pbest and check if it's less than gbest
if np.min(self.swarm.pbest_cost) < self.swarm.best_cost:
self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(
self.swarm
)
# Print to console
if i % print_step == 0:
cli_print(
"Iteration {}/{}, cost: {}".format(i + 1, iters, self.swarm.best_cost),
verbose,
2,
logger=self.logger
)
# Save to history
hist = self.ToHistory(
best_cost=self.swarm.best_cost,
mean_pbest_cost=np.mean(self.swarm.pbest_cost),
mean_neighbor_cost=self.swarm.best_cost,
position=self.swarm.position,
velocity=self.swarm.velocity
)
self._populate_history(hist)
# Verify stop criteria based on the relative acceptable cost ftol
relative_measure = self.ftol * (1 + np.abs(best_cost_yet_found))
if (
np.abs(self.swarm.best_cost - best_cost_yet_found)
< relative_measure
):
break
# Perform velocity and position updates
self.swarm.velocity = self.top.compute_velocity(
self.swarm, self.velocity_clamp
)
self.swarm.position = self.top.compute_position(
self.swarm, self.bounds
)
# Obtain the final best_cost and the final best_position
final_best_cost = self.swarm.best_cost.copy()
final_best_pos = self.swarm.best_pos.copy()
# Write report in log and return final cost and position
end_report(
final_best_cost, final_best_pos, verbose, logger=self.logger
)
return(final_best_cost, final_best_pos)
```
#### File: pyswarms/utils/console_utils.py
```python
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
# Import modules
def cli_print(message, verbosity, threshold, logger):
"""Helper function to print console output
Parameters
----------
message : str
the message to be printed into the console
verbosity : int
verbosity setting of the user
threshold : int
threshold for printing
logger : logging.getLogger
logger instance
"""
if verbosity >= threshold:
logger.info(message)
else:
pass
def end_report(cost, pos, verbosity, logger):
"""Helper function to print a simple report at the end of the
run. This always has a threshold of 1.
Parameters
----------
cost : float
final cost from the optimization procedure.
pos : numpy.ndarray or list
best position found
verbosity : int
verbosity setting of the user.
logger : logging.getLogger
logger instance
"""
# Cuts the length of the best position if it's too long
if len(list(pos)) > 3:
out = ("[ " + 3 * "{:3f} " + "...]").format(*list(pos))
else:
out = list(pos)
template = (
"================================\n"
"Optimization finished!\n"
"Final cost: {:06.4f}\n"
"Best value: {}\n"
).format(cost, out)
if verbosity >= 1:
logger.info(template)
```
#### File: utils/plotters/formatters.py
```python
import numpy as np
from attr import attrs, attrib
from attr.validators import instance_of
from matplotlib import cm, colors
@attrs
class Designer(object):
"""Designer class for specifying a plot's formatting and design
You can use this class for specifying design-related customizations to
your plot. This can be passed in various functions found in the
:mod:`pyswarms.utils.plotters` module.
.. code-block :: python
from pyswarms.utils.plotters import plot_cost_history
from pyswarms.utils.plotters.formatters import Designer
# Set title_fontsize into 20
my_designer = Designer(title_fontsize=20)
# Assuming we already had an optimizer ready
plot_cost_history(cost_history, designer=my_designer)
Attributes
----------
figsize : tuple (default is :code:`(10,8)`)
Overall figure size.
title_fontsize : str, int, or float (default is :code:`large`)
Size of the plot's title.
text_fontsize : str, int, or float (default is :code:`medium`)
Size of the plot's labels and legend.
legend : str (default is :code:`Cost`)
Label to show in the legend. For cost histories, it states
the label of the line plot.
label : str, list, or tuple (default is :code:`['x-axis', 'y-axis']`)
Label to show in the x, y, or z-axis. For a 3D plot, please pass
an iterable with three elements.
"""
# Overall plot design
figsize = attrib(type=tuple, validator=instance_of(tuple), default=(10, 8))
title_fontsize = attrib(
validator=instance_of((str, int, float)), default="large"
)
text_fontsize = attrib(
validator=instance_of((str, int, float)), default="medium"
)
legend = attrib(validator=instance_of(str), default="Cost")
label = attrib(
validator=instance_of((str, list, tuple)),
default=["x-axis", "y-axis", "z-axis"],
)
limits = attrib(
validator=instance_of((list, tuple)), default=[(-1, 1), (-1, 1), (-1, 1)]
)
colormap = attrib(
validator=instance_of(colors.Colormap),
default=cm.viridis,
)
@attrs
class Animator(object):
"""Animator class for specifying animation behavior
You can use this class to modify options on how the animation will be run
in the :func:`pyswarms.utils.plotters.plot_contour` and
:func:`pyswarms.utils.plotters.plot_surface` methods.
.. code-block :: python
from pyswarms.utils.plotters import plot_contour
from pyswarms.utils.plotters.formatters import Animator
# Do not repeat animation
my_animator = Animator(repeat=False)
# Assuming we already had an optimizer ready
plot_contour(pos_history, animator=my_animator)
Attributes
----------
interval : int (default is :code:`80`)
Sets the interval or speed into which the animation is played.
repeat_delay : int, float (default is :code:`None`)
Sets the delay before repeating the animation again.
repeat : bool (default is :code:`True`)
Pass :code:`False` if you don't want to repeat the animation.
"""
interval = attrib(type=int, validator=instance_of(int), default=80)
repeat_delay = attrib(default=None)
repeat = attrib(type=bool, validator=instance_of(bool), default=True)
@attrs
class Mesher(object):
"""Mesher class for plotting contours of objective functions
This class enables drawing a surface plot of a given objective function.
You can customize how this plot is drawn with this class. Pass an instance
of this class to enable meshing.
.. code-block :: python
from pyswarms.utils.plotters import plot_surface
from pyswarms.utils.plotters.formatters import Mesher
from pyswarms.utils.functions import single_obj as fx
# Use sphere function
my_mesher = Mesher(func=fx.sphere_func)
# Assuming we already had an optimizer ready
plot_surface(pos_history, mesher=my_mesher)
Attributes
----------
func : callable
Objective function to plot a surface of.
delta : float (default is :code:`0.001`)
Number of steps when generating the surface plot
limits : list, tuple (default is :code:`[(-1,1), (-1,1)]`)
The range, in each axis, where the mesh will be drawn.
levels : list (default is :code:`np.arange(-2.0, 2.0, 0.070)`)
Levels on which the contours are shown.
alpha : float (default is :code:`0.3`)
Transparency of the surface plot
"""
func = attrib()
# For mesh creation
delta = attrib(type=float, default=0.001)
limits = attrib(
validator=instance_of((list, tuple)), default=[(-1, 1), (-1, 1)]
)
levels = attrib(type=list, default=np.arange(-2.0, 2.0, 0.070))
# Surface transparency
alpha = attrib(type=float, validator=instance_of(float), default=0.3)
def compute_history_3d(self, pos_history):
"""Compute a 3D position matrix
The first two columns are the 2D position in the x and y axes
respectively, while the third column is the fitness on that given
position.
Parameters
----------
pos_history : numpy.ndarray
Two-dimensional position matrix history of shape
:code:`(iterations, n_particles, 2)`
Returns
-------
numpy.ndarray
3D position matrix of shape :code:`(iterations, n_particles, 3)`
"""
fitness = np.array(list(map(self.func, pos_history)))
return np.dstack((pos_history, fitness))
```
#### File: tests/optimizers/conftest.py
```python
import pytest
import numpy as np
# Import from package
from pyswarms.single import GlobalBestPSO, LocalBestPSO, GeneralOptimizerPSO
from pyswarms.discrete import BinaryPSO
from pyswarms.utils.functions.single_obj import sphere_func
from pyswarms.backend.topology import Star, Ring, Pyramid, Random, VonNeumann
@pytest.fixture(scope="module")
def general_opt_history(topology):
"""Returns a GeneralOptimizerPSO instance run for 1000 iterations for checking
history"""
pso = GeneralOptimizerPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5}, topology=topology)
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def general_opt_reset(topology):
"""Returns a GeneralOptimizerPSO instance that has been run and reset to check
default value"""
pso = GeneralOptimizerPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5}, topology=topology)
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture(scope="module")
def gbest_history():
"""Returns a GlobalBestPSO instance run for 1000 iterations for checking
history"""
pso = GlobalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5})
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def gbest_reset():
"""Returns a GlobalBestPSO instance that has been run and reset to check
default value"""
pso = GlobalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5})
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture(scope="module")
def lbest_history():
"""Returns a LocalBestPSO instance run for 1000 iterations for checking
history"""
pso = LocalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def lbest_reset():
"""Returns a LocalBestPSO instance that has been run and reset to check
default value"""
pso = LocalBestPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture(scope="module")
def binary_history():
"""Returns a BinaryPSO instance run for 1000 iterations for checking
history"""
pso = BinaryPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 1000, verbose=0)
return pso
@pytest.fixture(scope="module")
def binary_reset():
"""Returns a BinaryPSO instance that has been run and reset to check
default value"""
pso = BinaryPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
pso.optimize(sphere_func, 10, verbose=0)
pso.reset()
return pso
@pytest.fixture
def options():
"""Default options dictionary for most PSO use-cases"""
options_ = {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2, "r": 1}
return options_
@pytest.fixture(params=[
Star(),
Ring(static=False), Ring(static=True),
Pyramid(static=False), Pyramid(static=True),
Random(static=False), Random(static=True),
VonNeumann()
])
def topology(request):
"""Parametrized topology parameter"""
topology_ = request.param
return topology_
```
#### File: utils/plotters/conftest.py
```python
import os
import pytest
import numpy as np
from mock import Mock
import matplotlib as mpl
if os.environ.get("DISPLAY", "") == "":
mpl.use("Agg")
# Import from package
from pyswarms.single import GlobalBestPSO
from pyswarms.utils.functions.single_obj import sphere_func
from pyswarms.utils.plotters.formatters import Mesher
@pytest.fixture
def trained_optimizer():
"""Returns a trained optimizer instance with 100 iterations"""
options = {"c1": 0.5, "c2": 0.3, "w": 0.9}
optimizer = GlobalBestPSO(n_particles=10, dimensions=2, options=options)
optimizer.optimize(sphere_func, iters=100)
return optimizer
@pytest.fixture
def pos_history():
"""Returns a list containing a swarms' position history"""
return np.random.uniform(size=(10, 5, 2))
@pytest.fixture
def mesher():
"""A Mesher instance with sphere function and delta=0.1"""
return Mesher(func=sphere_func, delta=0.1)
``` |
{
"source": "jolemon/defects4j-dataset",
"score": 2
} |
#### File: jolemon/defects4j-dataset/clear_d4j.py
```python
import os
import shutil
import re
projs = ['Time', 'Mockito', 'Lang', 'Math', 'Closure']
linked_dir = 'linked-bugMethods-iBug/'
linked_postfix = '_bugId_buggyMethodsName'
bugcode_dir = 'allMethods-iBug'
br_dir = 'bugReport4Vector'
def load_link_dic(path):
dic = {}
f = open(path, 'r')
lines = f.readlines()
for line in lines:
line = line.strip()
if len(line) > 0:
parts = line.split('\t')
br_id = parts[0]
dic[br_id] = 1
return dic
def clear_allMethods():
for proj in projs:
link_dic = load_link_dic(linked_dir + proj + linked_postfix)
for file in os.listdir(os.path.join(bugcode_dir, proj)):
if file not in link_dic:
print(os.path.join(bugcode_dir, proj, file))
file_path = os.path.join(bugcode_dir, proj, file)
if file == '.DS_Store':
os.remove(file_path)
else:
shutil.rmtree(file_path)
def clear_br():
for proj in projs:
link_dic = load_link_dic(linked_dir + proj + linked_postfix)
proj_dir = os.path.join(br_dir, proj)
for br in os.listdir(proj_dir):
if br not in link_dic:
print(os.path.join(proj_dir, br))
file_path = os.path.join(br_dir, proj, br)
os.remove(file_path)
if __name__ == '__main__':
clear_allMethods()
clear_br()
``` |
{
"source": "jolenechong/simplePythonProjects",
"score": 3
} |
#### File: jolenechong/simplePythonProjects/hangman.py
```python
from PIL import Image
'''
hangman
time taken: 30mins on base form (no hangman image, no game won no games left)
1h45min for whole project
todos:
- guess letter OR whole word
'''
hangman = {
1:[
"-------",
" ▓",
" ▓",
" ▓",
" ▓",
" ▓",
"-------",
],
2:[
"-------",
" | ▓",
" ▓",
" ▓",
" ▓",
" ▓",
"-------",
],
3:[
"-------",
" | ▓",
" ☺ ▓",
" ▓",
" ▓",
" ▓",
"-------",
],
4:[
"-------",
" | ▓",
" \☺ ▓",
" ▓",
" ▓",
" ▓",
"-------",
],
5:[
"-------",
" | ▓",
" \☺/ ▓",
" ▓",
" ▓",
" ▓",
"-------",
],
6:[
"-------",
" | ▓",
" \☺/ ▓",
" | ▓",
" ▓",
" ▓",
"-------",
],
7:[
"-------",
" | ▓",
" \☺/ ▓",
" | ▓",
" / ▓",
" ▓",
"-------",
],
8:[
"-------",
" | ▓",
" \☺/ ▓",
" | ▓",
" / \ ▓",
" ▓",
"-------",
],
}
def main():
word = str(input('Enter the word to guess: '))
word = word.lower()
start = input('Enter (E) to start: ')
if start == 'e' or start == 'E':
guess_the_word(word)
def guess_the_word(word):
letterList = []
currentWord = []
gameWon = False
hangmanCount = 1
# store word in a list by each letter
# append _ to current word depending on how many letters are in the word
for letter in word:
letterList.append(letter)
currentWord.append("_")
while True:
roundWon = False
if gameWon == True:
print('\nYAY! You won!')
im = Image.open("YAY.jpg")
im.show()
break
else:
letterGuessed = str(input('Guess a letter:'))
letterGuessed = letterGuessed.lower()
# if guessed correctly append currentWord
count = 0
for letter in letterList:
if letterGuessed == letter:
currentWord[count]=letterGuessed
roundWon = True
count +=1
if "_" not in currentWord:
gameWon = True
if roundWon == False:
hangmanCount += 1
print(hangman.get(hangmanCount, 'error')[0])
print(hangman.get(hangmanCount, 'error')[1])
print(hangman.get(hangmanCount, 'error')[2])
print(hangman.get(hangmanCount, 'error')[3])
print(hangman.get(hangmanCount, 'error')[4])
print(hangman.get(hangmanCount, 'error')[5])
print(hangman.get(hangmanCount, 'error')[6])
print("CURRENT WORD:\n", "".join(map(str, currentWord)))
turnsleft = 10 - hangmanCount
print(f"You have {turnsleft} turns left\n")
main()
``` |
{
"source": "JoleProject/Jole",
"score": 2
} |
#### File: examples/tf/ppo_memorize_digits.py
```python
import gym
from garage.envs import normalize
from garage.experiment import run_experiment
from garage.tf.algos import PPO
from garage.tf.baselines import GaussianCNNBaseline
from garage.tf.envs import TfEnv
from garage.tf.experiment import LocalTFRunner
from garage.tf.policies import CategoricalCNNPolicy
def run_task(snapshot_config, *_):
"""Run task."""
with LocalTFRunner(snapshot_config=snapshot_config) as runner:
env = TfEnv(normalize(gym.make('MemorizeDigits-v0')))
policy = CategoricalCNNPolicy(env_spec=env.spec,
conv_filters=(32, 64, 64),
conv_filter_sizes=(5, 3, 2),
conv_strides=(4, 2, 1),
conv_pad='VALID',
hidden_sizes=(256, ))
baseline = GaussianCNNBaseline(env_spec=env.spec,
regressor_args=dict(
conv_filters=(32, 64, 64),
conv_filter_sizes=(5, 3, 2),
conv_strides=(4, 2, 1),
conv_pads=('VALID', 'VALID'),
hidden_sizes=(256, ),
use_trust_region=True))
algo = PPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
max_kl_step=0.01)
runner.setup(algo, env)
runner.train(n_epochs=1000, batch_size=2048)
run_experiment(
run_task,
snapshot_mode='last',
seed=1,
)
```
#### File: envs/wrappers/double_action.py
```python
import gym
import numpy as np
import random
class DoubleAction(gym.Wrapper):
"""Clip the reward by its sign."""
def step(self, ac):
"""gym.Env step function."""
r = random.uniform(0, 1)
if r>0.8:
ac = ac * 2
obs, reward, done, info = self.env.step(ac)
return obs, np.sign(reward), done, info
def reset(self):
"""gym.Env reset."""
return self.env.reset()
```
#### File: tf/env_functions/base2.py
```python
import abc
class EnvFunction2(abc.ABC):
"""
Q-function base class without Parameterzied.
Args:
name (str): Name of the Q-fucntion, also the variable scope.
"""
def __init__(self, name):
self.name = name or type(self).__name__
self._variable_scope = None
def get_fval_sym(self, *input_phs):
"""
Symbolic graph for q-network.
All derived classes should implement this function.
Args:
input_phs (list[tf.Tensor]): Recommended to be positional
arguments, e.g. def get_qval_sym(self, state_input,
action_input).
"""
pass
def clone(self, name):
"""
Return a clone of the Q-function.
It should only copy the configuration of the Q-function,
not the parameters.
Args:
name (str): Name of the newly created q-function.
"""
pass
def get_trainable_vars(self):
"""Get all trainable variables under the QFunction scope."""
return self._variable_scope.trainable_variables()
def get_global_vars(self):
"""Get all global variables under the QFunction scope."""
return self._variable_scope.global_variables()
def get_regularizable_vars(self):
"""Get all network weight variables under the QFunction scope."""
trainable = self._variable_scope.global_variables()
return [
var for var in trainable
if 'hidden' in var.name and 'kernel' in var.name
]
def log_diagnostics(self, paths):
"""Log extra information per iteration based on the collected paths."""
pass
```
#### File: tf/env_functions/mlp_terminal_function.py
```python
import tensorflow as tf
from garage.misc.overrides import overrides
from garage.tf.models import MLPModel
from garage.tf.env_functions.base2 import EnvFunction2
class MLPTerminalFunction(EnvFunction2):
"""
Continuous MLP QFunction.
This class implements a q value network to predict q based on the input
state and action. It uses an MLP to fit the function of Q(s, a).
Args:
env_spec (garage.envs.env_spec.EnvSpec): Environment specification.
name (str): Name of the q-function, also serves as the variable scope.
hidden_sizes (list[int]): Output dimension of dense layer(s).
For example, (32, 32) means the MLP of this q-function consists of
two hidden layers, each with 32 hidden units.
action_merge_layer (int): The index of layers at which to concatenate
action inputs with the network. The indexing works like standard
python list indexing. Index of 0 refers to the input layer
(observation input) while an index of -1 points to the last
hidden layer. Default parameter points to second layer from the
end.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a tf.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
tf.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
tf.Tensor.
output_nonlinearity (callable): Activation function for output dense
layer. It should return a tf.Tensor. Set it to None to
maintain a linear activation.
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
tf.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
tf.Tensor.
input_include_goal (bool): Whether input includes goal.
layer_normalization (bool): Bool for using layer normalization.
"""
def __init__(self,
env_spec,
name='MLPTerminalFunction',
hidden_sizes=(20, 20),
hidden_nonlinearity=tf.nn.relu,
hidden_w_init=tf.glorot_uniform_initializer(),
hidden_b_init=tf.zeros_initializer(),
output_nonlinearity=None,
output_w_init=tf.glorot_uniform_initializer(),
output_b_init=tf.zeros_initializer(),
input_include_goal=False,
layer_normalization=False):
super().__init__(name)
self._env_spec = env_spec
self._hidden_sizes = hidden_sizes
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._input_include_goal = input_include_goal
self._layer_normalization = layer_normalization
if self._input_include_goal:
self._obs_dim = env_spec.observation_space.flat_dim_with_keys(
['observation', 'desired_goal'])
else:
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self.model = MLPModel(output_dim=2,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
output_nonlinearity=output_nonlinearity,
output_w_init=output_w_init,
output_b_init=output_b_init,
layer_normalization=layer_normalization)
self._initialize()
def _initialize(self):
obs_ph = tf.compat.v1.placeholder(tf.float32, (None, self._obs_dim),
name='obs')
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
self.model.build(obs_ph)
self._f_qval = tf.compat.v1.get_default_session().make_callable(
self.model.networks['default'].outputs,
feed_list=[obs_ph])
def get_obs_val(self, observation):
"""Q Value of the network."""
return self._f_qval(observation)
@property
def inputs(self):
"""Return the input tensor."""
return self.model.networks['default'].inputs
@overrides
def get_fval_sym(self, state_input, name):
"""
Symbolic graph for q-network.
Args:
state_input (tf.Tensor): The state input tf.Tensor to the network.
name (str): Network variable scope.
Return:
The tf.Tensor output of Discrete MLP QFunction.
"""
with tf.compat.v1.variable_scope(self._variable_scope):
return self.model.build(state_input, name=name)
def clone(self, name):
"""
Return a clone of the Q-function.
It only copies the configuration of the Q-function,
not the parameters.
Args:
name (str): Name of the newly created q-function.
"""
return self.__class__(name=name,
env_spec=self._env_spec,
hidden_sizes=self._hidden_sizes,
hidden_nonlinearity=self._hidden_nonlinearity,
hidden_w_init=self._hidden_w_init,
hidden_b_init=self._hidden_b_init,
output_nonlinearity=self._output_nonlinearity,
output_w_init=self._output_w_init,
output_b_init=self._output_b_init,
layer_normalization=self._layer_normalization)
def __getstate__(self):
"""Object.__getstate__."""
new_dict = self.__dict__.copy()
del new_dict['_f_qval']
return new_dict
def __setstate__(self, state):
"""Object.__setstate__."""
self.__dict__.update(state)
self._initialize()
``` |
{
"source": "joleroi/gammapy",
"score": 3
} |
#### File: gammapy/examples/horizon_coord_transformations.py
```python
import numpy as np
import ephem
from kapteyn.wcs import Projection
def approximate_nominal_to_altaz(nominal, horizon_center=(0, 0)):
"""Transform nominal coordinates to horizon coordinates.
nominal = (x, y) in meter
horizon_center = (az_center, alt_center) in deg
Returns: horizon = (az, alt) in deg
TODO: The following method of computing Alt / Az is only
an approximation. Implement and use a utility function
using the TAN FITS projection.
"""
x, y = np.asarray(nominal, dtype='float64')
az_center, alt_center = np.asarray(horizon_center, dtype='float64')
# @note: alt increases where x increases, az increases where y increases
az = az_center + np.degrees(np.tan(y)) / np.cos(np.radians(alt_center))
alt = alt_center + np.degrees(np.tan(x))
return az, alt
def nominal_to_altaz(nominal, horizon_center=(0, 0)):
"""Transform nominal coordinates to horizon coordinates.
nominal = (x, y) in meter
horizon_center = (az_center, alt_center) in deg
Returns: horizon = (az, alt) in deg
"""
x, y = np.asarray(nominal, dtype='float64')
az_center, alt_center = np.asarray(horizon_center, dtype='float64')
header = {'NAXIS': 2,
'NAXIS1': 100,
'NAXIS2': 100,
'CTYPE1': 'RA---TAN',
'CRVAL1': az_center,
'CRPIX1': 0,
'CUNIT1': 'deg',
'CDELT1': np.degrees(1),
'CTYPE2': 'DEC--TAN',
'CRVAL2': alt_center,
'CRPIX2': 0,
'CUNIT2': 'deg',
'CDELT2': np.degrees(1),
}
projection = Projection(header)
altaz = projection.toworld((y, x))
return altaz[0], altaz[1]
```
#### File: gammapy/examples/plot_rings.py
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from gammapy.background import ring
def add_inner_title(ax, title, loc, size=None, **kwargs):
from matplotlib.offsetbox import AnchoredText
from matplotlib.patheffects import withStroke
if size is None:
size = dict(size=plt.rcParams['legend.fontsize'])
at = AnchoredText(title, loc=loc, prop=size,
pad=0., borderpad=0.5,
frameon=False, **kwargs)
ax.add_artist(at)
at.txt._text.set_path_effects([withStroke(foreground="w", linewidth=3)])
return at
fov = 2.0
pixscale = 0.02
areafactor = 20
thetas = [0.1, 0.2, 0.4]
r_is = [0.5, 0.8, 1.1]
x = y = np.arange(-fov, fov + pixscale, pixscale)
X, Y = np.meshgrid(x, y)
d = np.sqrt(X ** 2 + Y ** 2)
fig = plt.figure(1, (10, 10))
title = ('Areafactor = {0} and Pixel Size {1}'
''.format(areafactor, pixscale))
grid = ImageGrid(fig, 111,
nrows_ncols=(len(thetas), len(r_is)),
axes_pad=0.1,
)
for i_theta, theta in enumerate(thetas):
for i_r_i, r_i in enumerate(r_is):
r_o = ring.r_o(theta, r_i, areafactor)
circle = d < theta
ring = (r_i < d) & (d < r_o)
true_areafactor = ring.sum() / circle.sum()
mask = circle + ring
index = i_theta * len(r_is) + i_r_i
ax = grid[index]
print(index, theta, r_i, r_o, true_areafactor)
ax.imshow(-mask,
interpolation='nearest',
cmap='gray',
extent=[-fov, fov, -fov, fov],
)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xlabel('r_inner = {0}'.format(r_i))
ax.set_ylabel('theta = {0}'.format(theta))
add_inner_title(ax, 'r_thick = {0:1.2f}'.format(r_o - r_i), 2)
# ax.text(0.05, 0.95, 'r_thick = {0:1.2f}'.format(r_o - r_i),
# ha='left', va='top', transform = ax.transAxes)
for extension in ['png', 'pdf']:
plt.savefig('ringbg_rings_areafactor_{0}_pixscale_{1}.{2}'
''.format(areafactor, pixscale, extension), dpi=300)
```
#### File: gammapy/background/fov.py
```python
from __future__ import print_function, division
import numpy as np
from astropy.wcs import WCS
from astropy.wcs.utils import pixel_to_skycoord
from ..image.utils import coordinates
__all__ = ['fill_acceptance_image',
]
def fill_acceptance_image(image, center, offset, acceptance):
"""Generate a 2D image of a radial acceptance curve.
The radial acceptance curve is given as an array of values
defined at the specified offsets.
Parameters
----------
image : `~astropy.io.fits.ImageHDU`
Empty image to fill.
center : `~astropy.coordinates.SkyCoord`
Coordinate of the center of the image.
offset : `~astropy.coordinates.Angle`
1D array of offset values where acceptance is defined.
acceptance : `~numpy.ndarray`
Array of acceptance values.
Returns
-------
image : `~astropy.io.fits.ImageHDU`
Image filled with radial acceptance.
"""
from scipy.interpolate import interp1d
# initialize WCS to the header of the image
w = WCS(image.header)
# define grids of pixel coorinates
xpix_coord_grid, ypix_coord_grid = coordinates(image, world=False)
# calculate pixel offset from center (in world coordinates)
coord = pixel_to_skycoord(xpix_coord_grid, ypix_coord_grid, w, origin=0)
pix_off = coord.separation(center)
# interpolate
f = interp1d(offset, acceptance, kind='cubic')
pix_acc = f(pix_off)
# fill value in image
image.data = pix_acc
return image
```
#### File: background/tests/test_kernel.py
```python
from __future__ import print_function, division
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import os
import tempfile
from astropy.io import fits
from astropy.tests.helper import pytest
from astropy.units import Quantity
from astropy.coordinates.angles import Angle
from ...background import GammaImages, IterativeKernelBackgroundEstimator
from ...image import make_empty_image
from ...stats import significance
from ...datasets import FermiGalacticCenter
from ...irf import EnergyDependentTablePSF
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
@pytest.mark.skipif('not HAS_SCIPY')
def test_GammaImages():
"""Tests compute correlated maps in GammaImages.
This is the only method in GammaImages that actually calculates anything.
"""
# Set up test counts and background
counts_hdu = make_empty_image(nxpix=10, nypix=10, binsz=1, fill=42)
counts_hdu.data[4][4] = 1000
counts = counts_hdu.data
background_data = 42 * np.ones_like(counts, dtype=float)
# Single unit pixel kernel so should actually be no change.
background_kernel = np.ones((1, 1))
images = GammaImages(counts, background_data)
images.compute_correlated_maps(background_kernel)
# Test significance image against Li & Ma significance value
expected = significance(counts, background_data)
actual = images.significance
assert_allclose(actual, expected)
@pytest.mark.skipif('not HAS_SCIPY')
class TestIterativeKernelBackgroundEstimator(object):
"""Tests methods in the IterativeKernelBackgroundEstimator.
"""
def setup_class(self):
"""Prepares appropriate input and defines inputs for test cases.
"""
from scipy.ndimage import convolve
# Load/create example model images
counts_hdu = make_empty_image(nxpix=10, nypix=10, binsz=1, fill=42)
counts_hdu.data[4][4] = 1000
counts = counts_hdu.data
# Initial counts required by one of the tests.
self.counts = counts
psf = FermiGalacticCenter.psf()
psf = psf.table_psf_in_energy_band(Quantity([10, 500], 'GeV'))
kernel_array = psf.kernel(pixel_size=Angle(1, 'deg'),
offset_max=Angle(3, 'deg'), normalize=True)
counts_blob = convolve(counts, kernel_array, mode='constant')
self.counts_blob = counts_blob
# Start with flat background estimate
# Background must be provided as an ImageHDU
images = GammaImages(counts=counts, header=counts_hdu.header)
images_blob = GammaImages(counts=counts_blob, header=counts_hdu.header)
source_kernel = np.ones((1, 3))
background_kernel = np.ones((5, 3))
significance_threshold = 4
mask_dilation_radius = 1
# Loads prepared inputs into estimator
self.ibe = IterativeKernelBackgroundEstimator(
images,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
self.ibe2 = IterativeKernelBackgroundEstimator(
images,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
self.ibe_blob = IterativeKernelBackgroundEstimator(
images_blob,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
def test_run_iteration_point(self):
"""Asserts that mask and background are as expected according to input."""
# Call the run_iteration code as this is what is explicitly being tested
self.ibe.run_iteration()
# Should be run twice to update the mask
self.ibe.run_iteration()
mask = self.ibe.mask_image_hdu.data
background = self.ibe.background_image_hdu.data
# Check mask matches expectations
expected_mask = np.ones_like(self.counts)
expected_mask[4][3] = 0
expected_mask[4][4] = 0
expected_mask[4][5] = 0
assert_allclose(mask.astype(int), expected_mask)
# Check background, should be 42 uniformly
assert_allclose(background.astype(float), 42 * np.ones((10, 10)))
def test_run_iteration_blob(self):
"""Asserts that mask and background are as expected according to input."""
# Call the run_iteration code as this is what is explicitly being tested
self.ibe_blob.run_iteration()
# Should be run twice to update the mask
self.ibe_blob.run_iteration()
mask = self.ibe_blob.mask_image_hdu.data
background = self.ibe_blob.background_image_hdu.data
# Check background, should be 42 uniformly within 10%
assert_allclose(background, 42 * np.ones((10, 10)), rtol=0.15)
def test_run(self):
"""Tests run script."""
mask, background = self.ibe2.run()
assert_allclose(mask.sum(), 97)
assert_allclose(background, 42 * np.ones((10, 10)))
def test_save_files(self):
"""Tests that files are saves, and checks values within them."""
# Create temporary file to write output into
dir = tempfile.mkdtemp()
self.ibe.run_iteration(1)
self.ibe.save_files(filebase=dir, index=0)
mask_filename = dir + '00_mask.fits'
significance_filename = dir + '00_significance.fits'
background_filename = dir + '00_background.fits'
mask_data = fits.open(mask_filename)[1].data
significance_data = fits.open(significance_filename)[1].data
background_data = fits.open(background_filename)[1].data
# Checks values in files against known results for one iteration.
assert_allclose(mask_data.sum(), 97)
assert_allclose(significance_data.sum(), 157.316195729298)
assert_allclose(background_data.sum(), 4200)
os.removedirs(dir)
```
#### File: background/tests/test_reflected.py
```python
from __future__ import print_function, division
import unittest
from astropy.tests.helper import pytest
from ...background import Maps, ReflectedRegionMaker
@pytest.mark.xfail
class TestReflectedBgMaker(unittest.TestCase):
def test_analysis(self):
runs = 'TODO'
maps = Maps('maps.fits')
reflected_bg_maker = ReflectedRegionMaker(runs, maps, psi=2, theta=0.1)
total_maps = Maps('total_maps.fits')
for run in runs:
run_map = total_maps.cutout(run)
reflected_bg_maker.make_n_reflected_map(run, run_map)
total_maps.add(run_map)
total_maps.save('n_reflected.fits')
```
#### File: gammapy/extern/bunch.py
```python
__all__ = ['Bunch']
class Bunch(dict):
"""
Dictionary with attribute access for result objects.
Example is `~gammapy.detect.TSMapResult`
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in self.items()])
else:
return self.__class__.__name__ + "()"
```
#### File: gammapy/extern/validator.py
```python
import numpy as np
from astropy import units as u
from astropy.extern import six
def validate_physical_type(name, value, physical_type):
if physical_type is not None:
if not isinstance(value, u.Quantity):
raise TypeError("{0} should be given as a Quantity object".format(name))
if isinstance(physical_type, six.string_types):
if value.unit.physical_type != physical_type:
raise TypeError("{0} should be given in units of {1}".format(name, physical_type))
else:
if not value.unit.physical_type in physical_type:
raise TypeError("{0} should be given in units of {1}".format(name, ', '.join(physical_type)))
def validate_scalar(name, value, domain=None, physical_type=None):
validate_physical_type(name, value, physical_type)
if not physical_type:
if not np.isscalar(value) or not np.isreal(value):
raise TypeError("{0} should be a scalar floating point value".format(name))
if domain == 'positive':
if value < 0.:
raise ValueError("{0} should be positive".format(name))
elif domain == 'strictly-positive':
if value <= 0.:
raise ValueError("{0} should be strictly positive".format(name))
elif domain == 'negative':
if value > 0.:
raise ValueError("{0} should be negative".format(name))
elif domain == 'strictly-negative':
if value >= 0.:
raise ValueError("{0} should be strictly negative".format(name))
elif type(domain) in [tuple, list] and len(domain) == 2:
if value < domain[0] or value > domain[-1]:
raise ValueError("{0} should be in the range [{1}:{2}]".format(name, domain[0], domain[-1]))
return value
def validate_array(name, value, domain=None, ndim=1, shape=None, physical_type=None):
validate_physical_type(name, value, physical_type)
# First convert to a Numpy array:
if type(value) in [list, tuple]:
value = np.array(value)
# Check the value is an array with the right number of dimensions
if not isinstance(value, np.ndarray) or value.ndim != ndim:
if ndim == 1:
raise TypeError("{0} should be a 1-d sequence".format(name))
else:
raise TypeError("{0} should be a {1:d}-d array".format(name, ndim))
# Check that the shape matches that expected
if shape is not None and value.shape != shape:
if ndim == 1:
raise ValueError("{0} has incorrect length (expected {1} but found {2})".format(name, shape[0], value.shape[0]))
else:
raise ValueError("{0} has incorrect shape (expected {1} but found {2})".format(name, shape, value.shape))
return value
```
#### File: gammapy/image/catalog.py
```python
import numpy as np
from astropy.coordinates import Angle
from astropy.wcs import WCS
from astropy.units import Quantity
from astropy.table import Table
from . import coordinates
__all__ = ['catalog_image', 'catalog_table']
def _extended_image(catalog, reference_cube):
"""Reprojects and adds extended source images to a larger survey image.
"""
# This import is here instead of at the top to avoid an ImportError
# due to circular dependencies
from ..datasets import fetch_fermi_extended_sources
from ..data import SpectralCube
# Note that the first extended source fits file is unreadable...
hdu_list = fetch_fermi_extended_sources(catalog)[1:]
for source in hdu_list:
source_wcs = WCS(source.header)
source_spec_cube = SpectralCube(data=Quantity(np.array([source.data]), ''),
wcs=source_wcs, energy=energy)
new_source_cube = source_spec_cube.reproject_to(reference_cube)
# TODO: Fix this hack
reference_cube.data = reference_cube.data + np.nan_to_num(new_source_cube.data * 1e-30)
return reference_cube.data[0]
def _source_image(catalog, reference_cube, sim_table=None, total_flux=True):
"""Adds point sources to a larger survey image.
"""
new_image = np.zeros_like(reference_cube.data, dtype=np.float64)
if sim_table is None:
source_table = catalog_table(catalog, energy_bands=False)
else:
source_table = sim_table
energies = source_table.meta['Energy Bins']
wcs_reference = reference_cube.wcs
footprint = wcs_reference.calc_footprint()
glon_max, glon_min = footprint[0][0], footprint[2][0] - 360
glat_min, glat_max = footprint[0][1], footprint[1][1]
for source in np.arange(len(source_table['flux'])):
lon = source_table['GLON'][source]
if lon >= 180:
lon = lon - 360
if (glon_min < lon) & (lon < glon_max):
lat = source_table['GLAT'][source]
if (glat_min < lat) & (lat < glat_max):
flux = source_table['flux'][source]
wcs = reference_cube.wcs
origin = 0 # convention for gammapy
x, y = wcs.wcs_world2pix(lon, lat, origin)
xi, yi = x.astype(int), y.astype(int)
new_image[yi, xi] = new_image[yi, xi] + flux
if total_flux:
factor = source_table['flux'].sum() / new_image.sum()
else:
factor = 1
return new_image * factor, energies
def catalog_image(reference, psf, catalog='1FHL', source_type='point',
total_flux=False, sim_table=None):
"""Creates an image from a simulated catalog, or from 1FHL or 2FGL sources.
Parameters
----------
reference : `~fits.ImageHDU`
Reference Image HDU. The output takes the shape and resolution of this.
psf : `~gammapy.irf.EnergyDependentTablePSF`
Energy dependent Table PSF object for image convolution.
catalog : {'1FHL', '2FGL', 'simulation'}
Flag which source catalog is to be used to create the image.
If 'simulation' is used, sim_table must also be provided.
source_type : {'point', 'extended', 'all'}
Specify whether point or extended sources should be included, or both.
TODO: Currently only 'point' is implemented.
total_flux : bool
Specify whether to conserve total flux.
sim_table : `~astropy.table.Table`
Table of simulated point sources. Only required if catalog = 'simulation'
Returns
-------
out_cube : `~gammapy.data.SpectralCube`
2D Spectral cube containing the image.
Notes
-----
This is currently only implemented for a single energy band.
"""
from scipy.ndimage import convolve
# This import is here instead of at the top to avoid an ImportError
# due to circular dependencies
from ..data import SpectralCube
lons, lats = coordinates(reference)
wcs = WCS(reference.header)
# Uses dummy energy for now to construct spectral cube
# TODO : Fix this hack
reference_cube = SpectralCube(data=Quantity(np.array(reference.data), ''),
wcs=wcs, energy=Quantity([0, 1], 'GeV'))
if source_type == 'extended':
raise NotImplementedError
# TODO: Currently fluxes are not correct for extended sources.
new_image = _extended_image(catalog, reference_cube)
elif source_type == 'point':
new_image, energy = _source_image(catalog, reference_cube,
sim_table, total_flux)
elif source_type == 'all':
raise NotImplementedError
# TODO: Currently Extended Sources do not work
extended = _extended_image(catalog, reference_cube)
point_source = _source_image(catalog, reference_cube, total_flux=True)[0]
new_image = extended + point_source
else:
raise ValueError
total_point_image = SpectralCube(data=new_image, wcs=wcs, energy=energy)
convolved_cube = new_image.copy()
psf = psf.table_psf_in_energy_band(Quantity([np.min(energy).value,
np.max(energy).value], energy.unit))
resolution = abs(reference.header['CDELT1'])
kernel_array = psf.kernel(pixel_size=Angle(resolution, 'deg'),
offset_max=Angle(5, 'deg'), normalize=True)
convolved_cube = convolve(new_image, kernel_array, mode='constant')
out_cube = SpectralCube(data=convolved_cube,
wcs=total_point_image.wcs,
energy=energy)
return out_cube
def catalog_table(catalog, energy_bands=False):
"""Creates catalog table from published source catalog.
This creates a table of catalog sources, positions and fluxes for an
indicated published source catalog - either 1FHL or 2FGL. This should
be used to in instances where a table is required, for instance as an
input for the `~gammapy.image.catalog_image` function.
Parameters
----------
catalog : {'1FHL', '2FGL'}
Catalog to load.
energy_bands : bool
Whether to return catalog in energy bands.
Returns
-------
table : `~astropy.table.Table`
Point source catalog table.
"""
# This import is here instead of at the top to avoid an ImportError
# due to circular dependencies
from ..datasets import fetch_fermi_catalog
data = []
cat_table = fetch_fermi_catalog(catalog, 'LAT_Point_Source_Catalog')
for source in np.arange(len(cat_table)):
glon = cat_table['GLON'][source]
glat = cat_table['GLAT'][source]
# Different from here between each catalog because of different catalog header names
if catalog in ['1FHL', 'simulation']:
energy = Quantity([10, 30, 100, 500], 'GeV')
if energy_bands:
Flux_10_30 = cat_table['Flux10_30GeV'][source]
Flux_30_100 = cat_table['Flux30_100GeV'][source]
Flux_100_500 = cat_table['Flux100_500GeV'][source]
row = dict(Source_Type='PointSource',
GLON=glon, GLAT=glat, Flux10_30=Flux10_30,
Flux30_100=Flux30_100, Flux100_500=Flux100_500)
else:
flux_bol = cat_table['Flux'][source]
row = dict(Source_Type='PointSource',
GLON=glon, GLAT=glat, flux=flux_bol)
elif catalog == '2FGL':
energy = Quantity([30, 100, 300, 1000, 3000, 10000, 100000], 'GeV')
if not energy_bands:
flux_bol = cat_table['Flux_Density'][source]
row = dict(Source_Type='PointSource',
GLON=glon,
GLAT=glat,
flux=flux_bol)
else:
Flux_30_100 = cat_table['Flux30_100'][source]
Flux_100_300 = cat_table['Flux100_300'][source]
Flux_300_1000 = cat_table['Flux300_1000'][source]
Flux_1000_3000 = cat_table['Flux1000_3000'][source]
Flux_3000_10000 = cat_table['Flux3000_10000'][source]
Flux_10000_100000 = cat_table['Flux10000_100000'][source]
row = dict(Source_Type='PointSource',
Source_Name=source_name,
GLON=glon,
GLAT=glat,
Flux_30_100=Flux_30_100,
Flux_100_300=Flux_100_300,
Flux_300_1000=Flux_300_1000,
Flux_1000_3000=Flux_1000_3000,
Flux_3000_10000=Flux_3000_10000,
Flux_10000_100000=Flux_10000_100000)
data.append(row)
table = Table(data)
table.meta['Energy Bins'] = energy
return table
```
#### File: gammapy/morphology/model.py
```python
from __future__ import print_function, division
import logging
import numpy as np
from astropy.io import fits
from ..utils.const import fwhm_to_sigma
__all__ = ['GaussCatalog',
'make_test_model',
'read_json',
'MorphModelImageCreator',
]
__doctest_skip__ = ['MorphModelImageCreator']
class MorphModelImageCreator(object):
"""Create model images from a HGPS pipeline source config file.
Uses astropy to evaluate the source model, with oversampling or integrating
over pixels.
Parameters
----------
cfg_file : str
Config file with all the sources listed.
exposure : str
Fits image file with the exposure.
psf_file : str (optional)
Json file with PSF information.
background : str (optional)
Fits image file with the background.
apply_psf : bool (default True)
Whether the psf should be applied.
compute_excess : bool (default True)
Whether to compute an excess image.
flux_factor : float (default 1E-12)
Flux conversion factor.
Examples
--------
Here is an example how to use `MorphModelImageCreator`:
>>> from gammapy.morphology import MorphModelImageCreator
>>> model_image_creator = MorphModelImageCreator(cfg_file='input_sherpa.cfg',
... exposure='exposure.fits',
... psf_file='psf.json')
>>> model_image_creator.evaluate_model(mode='center')
>>> model_image_creator.save('model_image.fits')
"""
def __init__(self, cfg_file, exposure, psf_file=None, apply_psf=True,
background=None, flux_factor=1E-12, compute_excess=True):
self.cfg_file = cfg_file
self.exposure = fits.getdata(exposure)
self.header = fits.getheader(exposure)
self._apply_psf = apply_psf
self._flux_factor = flux_factor
self._compute_excess = compute_excess
if psf_file is not None:
self.psf_file = psf_file
if background is not None:
if isinstance(background, str):
self.background = fits.getdata(background)
elif isinstance(background, (int, float)):
self.background = np.ones_like(self.exposure)
def _setup_model(self):
"""Setup a list of source models from an ``input_sherpa.cfg`` config file.
"""
self.source_models = []
# Read config file
from configobj import ConfigObj
cfg = ConfigObj(self.cfg_file, file_error=True)
# Set up model
from astropy.modeling.models import Gaussian2D
for source in cfg.keys():
# TODO: Add other source models
if cfg[source]['Type'] != 'NormGaussian':
logging.ERROR('So far only normgauss2d models can be handled.')
sigma = fwhm_to_sigma * float(cfg[source]['fwhm'])
ampl = float(cfg[source]['ampl']) * 1 / (2 * np.pi * sigma ** 2)
xpos = float(cfg[source]['xpos']) - 1
ypos = float(cfg[source]['ypos']) - 1
source_model = Gaussian2D(ampl, xpos, ypos, x_stddev=sigma, y_stddev=sigma)
self.source_models.append(source_model)
def evaluate_model(self, **kwargs):
"""Evaluate model by oversampling or taking the value at the center of the pixel.
"""
self._setup_model()
self.model_image = np.zeros_like(self.exposure, dtype=np.float64)
from astropy.convolution import utils
height, width = self.exposure.shape
for source_model in self.source_models:
source_model_image = utils.discretize_model(source_model,
(0, width), (0, height), **kwargs)
self.model_image += source_model_image
if self._compute_excess:
self.model_image = self.model_image * self.exposure
if self._apply_psf:
psf = self._create_psf(**kwargs)
from astropy.convolution import convolve
self.model_image = convolve(self.model_image, psf)
self.model_image *= self._flux_factor
def _create_psf(self, **kwargs):
"""Set up psf model using `astropy.convolution`.
"""
# Read psf info
import json
psf_data = json.load(open(self.psf_file))
# Convert sigma and amplitude
sigma_1 = fwhm_to_sigma * psf_data['psf1']['fwhm']
sigma_2 = fwhm_to_sigma * psf_data['psf2']['fwhm']
sigma_3 = fwhm_to_sigma * psf_data['psf3']['fwhm']
ampl_1 = psf_data['psf1']['ampl'] * 2 * np.pi * sigma_1 ** 2
ampl_2 = psf_data['psf2']['ampl'] * 2 * np.pi * sigma_2 ** 2
ampl_3 = psf_data['psf3']['ampl'] * 2 * np.pi * sigma_3 ** 2
# Setup kernels
from astropy.convolution import Gaussian2DKernel
gauss_1 = Gaussian2DKernel(sigma_1, **kwargs)
gauss_2 = Gaussian2DKernel(sigma_2, **kwargs)
gauss_3 = Gaussian2DKernel(sigma_3, **kwargs)
psf = gauss_1 * ampl_1 + gauss_2 * ampl_2 + gauss_3 * ampl_3
psf.normalize()
return psf
def save(self, filename, **kwargs):
"""Save model image to file."""
hdu_list = []
prim_hdu = fits.PrimaryHDU(self.model_image, header=self.header)
hdu_list.append(prim_hdu)
fits_hdu_list = fits.HDUList(hdu_list)
fits_hdu_list.writeto(filename, **kwargs)
if hasattr(self, 'measurements'):
hdu_list = []
prim_hdu = fits.PrimaryHDU(self.measurements[0], header=self.header)
hdu_list.append(prim_hdu)
for image in self.measurements[1:]:
hdu = fits.ImageHDU(image)
hdu_list.append(hdu)
fits_hdu_list = fits.HDUList(hdu_list)
fits_hdu_list.writeto('counts_' + filename, **kwargs)
def fake_counts(self, N, **kwargs):
"""Fake measurement data by adding Poisson noise to the model image.
Parameters
----------
N : int
Number of measurements to fake.
"""
if not self._compute_excess:
self.model_image = self.model_image * self.exposure
if not self._apply_psf:
psf = self._create_psf(**kwargs)
from astropy.convolution import convolve
self.model_image = convolve(self.model_image, psf)
# Set random seed to get reproducible results
np.random.seed(0)
# Fake measurements
for _ in range(N):
self.measurements.append(np.random.poisson(self.model_image))
class GaussCatalog(dict):
"""Multi-Gauss catalog utils."""
def __init__(self, source):
import json
if isinstance(source, dict):
# Assume source is a dict with correct format
self.pars = source
elif isinstance(source, str):
# Assume it is a JSON filename
fh = open(source)
self.pars = json.load(fh)
fh.close()
else:
logging.error('Unknown source: {0}'.format(source))
def set(self):
' + '.join(['gauss2d.' + name for name in source_names])
pass
def make_test_model(nsources=100, npix=500, ampl=100, fwhm=30):
"""Create a model of several Gaussian sources.
"""
from numpy.random import random
from sherpa.astro.ui import set_source
from morphology.utils import _set, _name
model = ' + '.join([_name(ii) for ii in range(nsources)])
set_source(model)
for ii in range(nsources):
_set(_name(ii), 'xpos', npix * random())
_set(_name(ii), 'ypos', npix * random())
_set(_name(ii), 'ampl', ampl * random())
_set(_name(ii), 'fwhm', fwhm * random())
def read_json(filename):
from sherpa.astro.ui import set_source
morphology.utils.read_json(filename, set_source)
```
#### File: morphology/tests/test_theta.py
```python
from __future__ import print_function, division
import unittest
from astropy.tests.helper import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from ...morphology import (Gauss2DPDF,
MultiGauss2D,
ThetaCalculator,
ThetaCalculatorScipy,
)
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
@pytest.mark.skipif('not HAS_SCIPY')
class TestThetaCalculator(unittest.TestCase):
"""We use a Gaussian, because it has known analytical
solutions for theta and containment."""
def setUp(self):
# Single Gauss
self.g = Gauss2DPDF(sigma=1)
self.g_tc = ThetaCalculator(self.g.dpdtheta2, theta_max=5, n_bins=1e6)
self.g_tcs = ThetaCalculatorScipy(self.g.dpdtheta2, theta_max=5)
# Multi Gauss
self.m = MultiGauss2D(sigmas=[1, 2])
self.m_tc = ThetaCalculator(self.m.dpdtheta2, theta_max=5, n_bins=1e6)
self.m_tcs = ThetaCalculatorScipy(self.m.dpdtheta2, theta_max=5)
# self.tc2 = mt.ThetaCalculator2D.from_source(self.g, theta_max=5, d)
def test_containment_Gauss2D(self):
for tc in [self.g_tc, self.g_tcs]:
for theta in np.linspace(0, 3, 4):
actual = tc.containment_fraction(theta)
desired = self.g.containment_fraction(theta)
assert_almost_equal(actual, desired, decimal=4)
def test_containment_MultiGauss2D(self):
for tc in [self.m_tc, self.m_tcs]:
for theta in np.linspace(0, 3, 4):
actual = tc.containment_fraction(theta)
desired = self.m.containment_fraction(theta)
assert_almost_equal(actual, desired, decimal=4)
def test_theta_Gauss2D(self):
for tc in [self.g_tc, self.g_tcs]:
for containment in np.arange(0, 1, 0.1):
actual = tc.containment_radius(containment)
desired = self.g.containment_radius(containment)
assert_almost_equal(actual, desired, decimal=4)
def test_theta_MultiGauss2D(self):
for tc in [self.m_tc, self.m_tcs]:
for containment in np.arange(0, 1, 0.1):
actual = tc.containment_radius(containment)
desired = self.m.containment_radius(containment)
assert_almost_equal(actual, desired, decimal=4)
# FIXME: This test is slow and fails with an IndexError.
def _test_ModelThetaCalculator():
"""Check that Gaussian widths add in quadrature
i.e. sigma_psf = 3, sigma_source = 4 ===> sigma_model = 5"""
source, psf = Gauss2DPDF(3), Gauss2DPDF(4)
# Correct analytical reference
ana = Gauss2DPDF(5)
ana_angle = ana.containment_radius(0.5)
ana_containment = ana.containment(ana_angle)
# Numerical method
fov, binsz = 20, 0.2
num = ModelThetaCalculator(source, psf, fov, binsz)
num_angle = num.containment_radius(0.5)
num_containment = num.containment(num_angle)
# Compare results
par_names = ['angle', 'containment']
par_refs = [ana_angle, ana_containment]
par_checks = [num_angle, num_containment]
# TODO: add asserts
```
#### File: gammapy/morphology/utils.py
```python
from __future__ import print_function, division
import json
__all__ = ['read_ascii', 'read_json', 'write_all', 'write_ascii', 'write_json']
def _name(ii):
"""Use this to make the model name for source number `ii`."""
return 'gauss2d.source_{0:02d}'.format(ii)
def _set(name, par, val):
"""Set a source parameter."""
import sherpa.astro.ui as sau
sau.set_par('{name}.{par}'.format(**locals()), val)
# try:
# exec(name + '.' + par + '=' + str(val))
# except Exception as e:
# print e
def _model(source_names):
"""Build additive model string for Gaussian sources."""
return ' + '.join(['gauss2d.' + name for name in source_names])
def read_json(source, setter):
"""Read from JSON file."""
if isinstance(source, dict):
# Assume source is a dict with correct format
d = source
else:
# Assume source is a filename with correct format
d = json.load(open(source))
source_names = d.keys()
model = _model(source_names)
setter(model)
for name, pars in d.items():
for par, val in pars.items():
_set(name, par, val)
def read_ascii(filename, setter):
"""Read from ASCII file."""
lines = open(filename).readlines()
tokens = [line.split() for line in lines]
names = set([token[0] for token in tokens])
pars = set([token[1] for token in tokens])
vals = set([token[2] for token in tokens])
model = _model(names)
setter(model)
for name, par, val in zip(names, pars, vals):
_set(name, par, val)
def write_json(pars, filename):
"""Write to JSON file."""
d = {}
for par in pars:
if not par.modelname in d.keys():
d[par.modelname] = {}
d[par.modelname][par.name] = par.val
json.dump(d, open(filename, 'w'), sort_keys=True, indent=4)
def write_ascii(pars, filename):
"""Write to ASCII"""
fh = open(filename, 'w')
for par in pars:
fh.write('{0} {1} {2}\n'.format(par.modelname, par.name, par.val))
def write_all(filename='results.json'):
"""Dump source, fit results and conf results to a JSON file.
http://www.astropython.org/snippet/2010/7/Save-sherpa-fit-and-conf-results-to-a-JSON-file
"""
import sherpa.astro.ui as sau
out = dict()
if 0:
src = sau.get_source()
src_par_attrs = ('name', 'frozen', 'modelname', 'units', 'val', 'fullname')
out['src'] = dict(name=src.name,
pars=[dict((attr, getattr(par, attr)) for attr in src_par_attrs)
for par in src.pars])
try:
fit_attrs = ('methodname', 'statname', 'succeeded', 'statval', 'numpoints', 'dof',
'rstat', 'qval', 'nfev', 'message', 'parnames', 'parvals')
fit = sau.get_fit_results()
out['fit'] = dict((attr, getattr(fit, attr)) for attr in fit_attrs)
except Exception as err:
print(err)
try:
conf_attrs = ('datasets', 'methodname', 'fitname', 'statname', 'sigma', 'percent',
'parnames', 'parvals', 'parmins', 'parmaxes', 'nfits')
conf = sau.get_conf_results()
out['conf'] = dict((attr, getattr(conf, attr)) for attr in conf_attrs)
except Exception as err:
print(err)
try:
covar_attrs = ('datasets', 'methodname', 'fitname', 'statname', 'sigma', 'percent',
'parnames', 'parvals', 'parmins', 'parmaxes', 'nfits')
covar = sau.get_covar_results()
out['covar'] = dict((attr, getattr(covar, attr)) for attr in covar_attrs)
except Exception as err:
print(err)
if 0:
out['pars'] = []
for par in src.pars:
fullname = par.fullname
if any(fullname == x['name'] for x in out['pars']):
continue # Parameter was already processed
outpar = dict(name=fullname, kind=par.name)
# None implies no calculated confidence interval for Measurement
parmin = None
parmax = None
try:
if fullname in conf.parnames: # Confidence limits available from conf
i = conf.parnames.index(fullname)
parval = conf.parvals[i]
parmin = conf.parmins[i]
parmax = conf.parmaxes[i]
if parmin is None:
parmin = -float('inf') # None from conf means infinity, so set accordingly
if parmax is None:
parmax = float('inf')
elif fullname in fit.parnames: # Conf failed or par is uninteresting and wasn't sent to conf
i = fit.parnames.index(fullname)
parval = fit.parvals[i]
else: # No fit or conf value (maybe frozen)
parval = par.val
except Exception as err:
print(err)
out['pars'].append(outpar)
if filename is None:
return out
else:
json.dump(out, open(filename, 'w'), sort_keys=True, indent=4)
```
#### File: gammapy/scripts/background_cube.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..utils.scripts import get_parser
__all__ = ['background_cube']
def main(args=None):
parser = get_parser(background_cube)
parser.add_argument('run_list', type=str,
help='Input run list file name')
parser.add_argument('exclusion_list', type=str,
help='Input exclusion list file name')
parser.add_argument('reference_file', type=str,
help='Input FITS reference cube file name')
parser.add_argument('out_file', type=str,
help='Output FITS counts cube file name')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite existing output file?')
args = parser.parse_args(args)
background_cube(**vars(args))
def background_cube(run_list,
exclusion_list,
reference_file,
out_file,
overwrite):
"""Create background model cube from off runs.
TODO: explain a bit.
"""
import logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(message)s')
# TODO: implement
raise NotImplementedError
```
#### File: gammapy/scripts/check.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..utils.scripts import get_parser
__all__ = ['check']
def main(args=None):
parser = get_parser(check)
parser.add_argument('--package', type=str, default=None,
help='Package to test')
args = parser.parse_args(args)
check(**vars(args))
def check(package):
"""Run gammapy unit tests."""
import logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(message)s')
import gammapy
gammapy.test(package, verbose=True)
```
#### File: gammapy/scripts/cwt.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..utils.scripts import get_parser
__all__ = ['run_cwt']
def main(args=None):
parser = get_parser(run_cwt)
parser.add_argument('infile', action="store",
help='Input FITS file name')
parser.add_argument('outfile', action="store",
help='Output FITS file name')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite existing output file?')
# Wavelet scales to be used
parser.add_argument('--min_scale', default=6.0, type=float,
help='Minimum wavelet scale')
parser.add_argument('--nscales', default=6, type=int,
help='Number of wavelet scales')
parser.add_argument('--scale_step', default=1.3, type=float,
help='Geometric step between wavelet scales')
# Detection thresholds
parser.add_argument('--thresh', default=3.0, type=float,
help='Significance threshold for pixel detection')
parser.add_argument('--detect', default=5.0, type=float,
help='Significance threshold for source detection')
parser.add_argument('--niter', default=5, type=int,
help='Maximum number of iterations')
parser.add_argument('--convergence', default=1e-5, type=float,
help='Convergence parameter')
args = parser.parse_args(args)
run_cwt(**vars(args))
def run_cwt(infile,
outfile,
overwrite,
min_scale,
nscales,
scale_step,
thresh,
detect,
niter,
convergence):
"""Compute filtered image using Continuous Wavelet Transform (CWT).
TODO: add example and explain output.
"""
import os.path
import sys
import logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(message)s')
if os.path.isfile(outfile) and not overwrite:
logging.error("Output file exists and overwrite is False")
sys.exit()
from ..detect.cwt import CWT
cwt = CWT(min_scale, nscales, scale_step)
cwt.set_file(infile)
cwt.iterative_filter_peak(thresh, detect, niter, convergence)
cwt.save_filter(outfile, overwrite=overwrite)
```
#### File: gammapy/scripts/residual_images.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..utils.scripts import get_parser
__all__ = ['residual_images']
def main(args=None):
parser = get_parser(residual_images)
parser.add_argument('model_file', type=str,
help='Input excess model FITS file name')
parser.add_argument('data_file', type=str,
help='Input data FITS file name')
parser.add_argument('out_file', type=str,
help='Output FITS file name')
parser.add_argument('--thetas', type=str, default='0.1',
help='On-region correlation radii (deg, comma-separated)')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite existing output file?')
args = parser.parse_args(args)
args.thetas = [float(theta) for theta in args.thetas.split(',')]
residual_images(**vars(args))
def residual_images(model_file,
data_file,
out_file,
thetas,
overwrite):
"""Compute source model residual images.
The input `data_file` must contain the following HDU extensions:
* 'On' -- Counts image
* 'Background' -- Background image
"""
import logging
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s - %(message)s')
import numpy as np
from astropy.io import fits
from gammapy.image import disk_correlate
from gammapy.stats import significance
logging.info('Reading {0}'.format(data_file))
hdu_list = fits.open(data_file)
header = hdu_list[0].header
counts = hdu_list['On'].data.astype(np.float64)
background = hdu_list['Background'].data.astype(np.float64)
diffuse = hdu_list['Diffuse'].data.astype(np.float64)
logging.info('Reading {0}'.format(model_file))
model = fits.getdata(model_file)
background_plus_model_diffuse = background + model + diffuse
out_hdu_list = fits.HDUList()
for theta in thetas:
logging.info('Processing theta = {0} deg'.format(theta))
theta_pix = theta / header['CDELT2']
counts_corr = disk_correlate(counts, theta_pix)
background_plus_model_corr = disk_correlate(background_plus_model_diffuse, theta_pix)
# excess_corr = counts_corr - background_plus_model_corr
significance_corr = significance(counts_corr, background_plus_model_corr)
name = 'RESIDUAL_SIGNIFICANCE_{0}'.format(theta)
logging.info('Appending HDU extension: {0}'.format(name))
hdu = fits.ImageHDU(significance_corr, header, name)
out_hdu_list.append(hdu)
logging.info('Writing {0}'.format(out_file))
out_hdu_list.writeto(out_file, clobber=overwrite)
```
#### File: gammapy/scripts/root_to_fits.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..utils.scripts import get_parser
# TODO: implement object listing and selecting by key names in ROOT file
# TODO: implement histogram conversion
# TODO: implement ntuple conversion
__all__ = ['root_to_fits']
def main(args=None):
parser = get_parser(root_to_fits)
args = parser.parse_args(args)
root_to_fits(**vars(args))
def root_to_fits():
"""Convert ROOT files to FITS files (histograms and ntuples).
TODO: explain a bit.
"""
# TODO: implement
raise NotImplementedError
```
#### File: gammapy/spectrum/cosmic_ray.py
```python
from __future__ import print_function, division
import numpy as np
from astropy.units import Quantity
__all__ = ['cosmic_ray_flux']
def _power_law(E, N, k):
E = Quantity(E, 'TeV')
E0 = Quantity(1, 'TeV')
N = Quantity(N, 'm^-2 s^-1 TeV^-1 sr^-1')
flux = N * (E / E0) ** (-k)
return flux
def _log_normal(E, L, E_p, w):
E = Quantity(E, 'TeV')
E_p = Quantity(E_p, 'TeV')
L = Quantity(L, 'm^-2 s^-1 sr^-1')
term1 = L / (E * w * np.sqrt(2 * np.pi))
term2 = np.exp(-np.log(E / E_p) ** 2 / (2 * w ** 2))
return term1 * term2
def _electron_spectrum(E, N, k, L, E_p, w):
flux = _power_law(E, N, k)
flux += _log_normal(E, L, E_p, w)
return flux
def cosmic_ray_flux(energy, particle='proton'):
"""Cosmic ray flux at Earth.
These are the spectra assumed in this CTA study:
Table 3 in http://adsabs.harvard.edu/abs/2013APh....43..171B
The hadronic spectra are simple power-laws, the electron spectrum
is the sum of a power law and a log-normal component to model the
"Fermi shoulder".
Parameters
----------
energy : `~astropy.units.Quantity`
Particle energy
particle : {'electron', 'proton', 'He', 'N', 'Si', 'Fe'}
Particle type
Returns
-------
flux : `~astropy.units.Quantity`
Cosmic ray flux in unit ``m^-2 s^-1 TeV^-1 sr^-1``
"""
pars = dict()
pars['electron'] = dict(N=6.85e-5, k=3.21, L=3.19e-3, E_p=0.107, w=0.776)
pars['proton'] = dict(N=0.096, k=2.70)
pars['N'] = dict(N=0.0719, k=2.64)
pars['Si'] = dict(N=0.0284, k=2.66)
pars['Fe'] = dict(N=0.0134, k=2.63)
if particle == 'electron':
return _electron_spectrum(energy, **pars['electron'])
elif particle in ['proton', 'He', 'N', 'Si', 'Fe']:
return _power_law(energy, **pars[particle])
else:
raise ValueError('Invalid argument for particle: {0}'.format(particle))
```
#### File: gammapy/spectrum/flux_point.py
```python
from __future__ import print_function, division
import numpy as np
from astropy.table import Table
from ..spectrum.powerlaw import power_law_flux
__all__ = ['compute_differential_flux_points']
def compute_differential_flux_points(x_method='lafferty', y_method='power_law',
table=None, model=None,
spectral_index=None, energy_min=None,
energy_max=None, int_flux=None,
int_flux_err=None):
"""Creates differential flux points table from integral flux points table.
Parameters
----------
table : `~astropy.table.Table`
Integral flux data table in energy bins, including columns
'ENERGY_MIN', 'ENERGY_MAX', 'INT_FLUX', 'INT_FLUX_ERR'
energy_min : float, array_like
If table not defined, minimum energy of bin(s) may be input
directly as either a float or array.
energy_max : float, array_like
If table not defined, maximum energy of bin(s) input directly.
int_flux : float, array_like
If table not defined, integral flux in bin(s) input directly. If array,
energy_min, energy_max must be either arrays of the same shape
(for differing energy bins) or floats (for the same energy bin).
int_flux_err : float, array_like
Type must be the same as for int_flux
x_method : {'lafferty', 'log_center', 'table'}
Flux point energy computation method; either Lafferty & Wyatt
model-based positioning, log bin center positioning
or user-defined `~astropy.table.Table` positioning
using column heading ['ENERGY']
y_method : {'power_law', 'model'}
Flux computation method assuming PowerLaw or user defined model function
model : callable
User-defined model function
spectral_index : float, array_like
Spectral index if default power law model is used. Either a float
or array_like (in which case, energy_min, energy_max and int_flux
must be floats to avoid ambiguity)
Returns
-------
differential_flux_table : `~astropy.table.Table`
Input table with appended columns 'ENERGY', 'DIFF_FLUX', 'DIFF_FLUX_ERR'
Notes
-----
For usage, see this tutorial: :ref:`tutorials-flux_point`.
"""
# Use input values if not initially provided with a table
# and broadcast quantities to arrays if required
if table is None:
spectral_index = np.array(spectral_index).reshape(np.array(spectral_index).size,)
energy_min = np.array(energy_min).reshape(np.array(energy_min).size,)
energy_max = np.array(energy_max).reshape(np.array(energy_max).size,)
int_flux = np.array(int_flux).reshape(np.array(int_flux).size,)
try:
int_flux_err = np.array(int_flux_err).reshape(np.array(int_flux_err).size,)
except:
pass
# TODO: Can a better implementation be found here?
lengths = dict(SPECTRAL_INDEX=len(spectral_index),
ENERGY_MIN=len(energy_min),
ENERGY_MAX=len(energy_max),
FLUX=len(int_flux))
max_length = np.array(list(lengths.values())).max()
int_flux = np.array(int_flux) * np.ones(max_length)
spectral_index = np.array(spectral_index) * np.ones(max_length)
energy_min = np.array(energy_min) * np.ones(max_length)
energy_max = np.array(energy_max) * np.ones(max_length)
try:
int_flux_err = np.array(int_flux_err) * np.ones(max_length)
except:
pass
# Otherwise use the table provided
else:
energy_min = np.asanyarray(table['ENERGY_MIN'])
energy_max = np.asanyarray(table['ENERGY_MAX'])
int_flux = np.asanyarray(table['INT_FLUX'])
try:
int_flux_err = np.asanyarray(table['INT_FLUX_ERR'])
except:
pass
# Compute x point
if x_method == 'table':
# This is only called if the provided table includes energies
energy = np.array(table['ENERGY'])
elif x_method == 'log_center':
from scipy.stats import gmean
energy = np.array(gmean((energy_min, energy_max)))
elif x_method == 'lafferty':
if y_method == 'power_law':
# Uses analytical implementation available for the power law case
energy = _energy_lafferty_power_law(energy_min, energy_max,
spectral_index)
else:
energy = np.array(_x_lafferty(energy_min,
energy_max, model))
else:
raise ValueError('Invalid x_method: {0}'.format(x_method))
# Compute y point
if y_method == 'power_law':
g = -1 * np.abs(spectral_index)
diff_flux = power_law_flux(int_flux, g, energy, energy_min, energy_max)
elif y_method == 'model':
diff_flux = _ydiff_excess_equals_expected(int_flux, energy_min,
energy_max, energy, model)
else:
raise ValueError('Invalid y_method: {0}'.format(y_method))
# Add to table
table = Table()
table['ENERGY'] = energy
table['DIFF_FLUX'] = diff_flux
# Error processing if required
try:
# TODO: more rigorous implementation of error propagation should be implemented
# I.e. based on MC simulation rather than gaussian error assumption
err = int_flux_err / int_flux
diff_flux_err = err * diff_flux
table['DIFF_FLUX_ERR'] = diff_flux_err
except:
pass
table.meta['spectral_index'] = spectral_index
table.meta['spectral_index_description'] = "Spectral index assumed in the DIFF_FLUX computation"
return table
def _x_lafferty(xmin, xmax, function):
"""The Lafferty & Wyatt method to compute X.
Pass in a function and bin bounds x_min and x_max i.e. for energy
See: Lafferty & Wyatt, Nucl. Instr. and Meth. in Phys. Res. A 355(1995) 541-547
See: http://nbviewer.ipython.org/gist/cdeil/bdab5f236640ef52f736
"""
from scipy.optimize import brentq
from scipy import integrate
indices = np.arange(len(xmin))
x_points = []
for index in indices:
deltax = xmax[index] - xmin[index]
I = integrate.quad(function, xmin[index], xmax[index], args=())
F = (I[0] / deltax)
def g(x):
return function(x) - F
x_point = brentq(g, xmin[index], xmax[index])
x_points.append(x_point)
return x_points
def _ydiff_excess_equals_expected(yint, xmin, xmax, x, model):
"""The ExcessEqualsExpected method to compute Y (differential).
y / yint = y_model / yint_model"""
yint_model = _integrate(xmin, xmax, model)
y_model = model(x)
return y_model * (yint / yint_model)
def _integrate(xmin, xmax, function, segments=1e3):
"""Integrates method function using the trapezium rule between xmin and xmax.
"""
indices = np.arange(len(xmin))
y_values = []
for index in indices:
x_vals = np.arange(xmin[index], xmax[index], 1.0 / segments)
y_vals = function(x_vals)
# Division by number of segments required for correct normalization
y_values.append(np.trapz(y_vals) / segments)
return y_values
def _energy_lafferty_power_law(energy_min, energy_max, spectral_index):
"""Analytical case for determining lafferty x-position for power law case.
"""
# Cannot call into gammapy.powerlaw as implementation is different
# due to different reference energies
term0 = 1. - spectral_index
term1 = energy_max - energy_min
term2 = 1. / term0
flux_lw = term2 / term1 * (energy_max ** term0 - energy_min ** term0)
return np.exp(-np.log(flux_lw) / np.abs(spectral_index))
```
#### File: spectrum/tests/test_cosmic_ray.py
```python
from __future__ import print_function, division
from astropy.units import Quantity
from ...spectrum import cosmic_ray_flux
from ...utils.testing import assert_quantity
def test_cosmic_ray_flux():
energy = Quantity(1, 'TeV')
actual = cosmic_ray_flux(energy, 'proton')
desired = Quantity(0.096, '1 / (m2 s sr TeV)')
assert_quantity(actual, desired)
# TODO: test array quantities and other particles
```
#### File: spectrum/tests/test_flux_point.py
```python
from __future__ import print_function, division
import itertools
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest
from astropy.table import Table
from ..flux_point import (_x_lafferty, _integrate, _ydiff_excess_equals_expected,
compute_differential_flux_points,
_energy_lafferty_power_law)
from ...spectrum.powerlaw import power_law_evaluate, power_law_integral_flux
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
x_methods = ['table', 'lafferty', 'log_center']
y_methods = ['power_law', 'model']
indices = [0, 1, 2, 3]
@pytest.mark.skipif('not HAS_SCIPY')
def test_x_lafferty():
"""Tests Lafferty & Wyatt x-point method.
Using input function g(x) = 10^4 exp(-6x) against
check values from paper Lafferty & Wyatt. Nucl. Instr. and Meth. in Phys.
Res. A 355 (1995) 541-547, p. 542 Table 1
"""
# These are the results from the paper
desired = np.array([0.048, 0.190, 0.428, 0.762])
def f(x):
return (10 ** 4) * np.exp(-6 * x)
emins = np.array([0.0, 0.1, 0.3, 0.6])
emaxs = np.array([0.1, 0.3, 0.6, 1.0])
actual = _x_lafferty(xmin=emins, xmax=emaxs, function=f)
assert_allclose(actual, desired, atol=1e-3)
def test_integration():
def function(x):
return x ** 2
xmin = np.array([-2])
xmax = np.array([2])
def indef_int(x):
return (x ** 3) / 3
# Calculate analytical result
desired = indef_int(xmax) - indef_int(xmin)
# Get numerical result
actual = _integrate(xmin, xmax, function, segments=1e3)
# Compare, bounds suitable for number of segments
assert_allclose(actual, desired, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ydiff_excess_equals_expected():
"""Tests y-value normalization adjustment method.
"""
def model(x):
return x ** 2
xmin = np.array([10, 20, 30, 40])
xmax = np.array([20, 30, 40, 50])
yint = np.array([42, 52, 62, 72]) # 'True' integral flux in this test bin
# Get values
x_values = np.array(_x_lafferty(xmin, xmax, model))
y_values = _ydiff_excess_equals_expected(yint, xmin, xmax, x_values, model)
# Set up test case comparison
y_model = model(np.array(x_values))
# Test comparison result
desired = _integrate(xmin, xmax, model)
# Test output result
actual = y_model * (yint / y_values)
# Compare
assert_allclose(actual, desired, rtol=1e-6)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('index, x_method, y_method',
itertools.product(indices, ['lafferty', 'log_center'],
y_methods))
def test_array_broadcasting(index, x_method, y_method):
"""Tests for array broadcasting in for likely input scenarios.
"""
# API for power_law case can differ from model case if table not used
# so both tested here
in_array = 0.9 * np.arange(6).reshape(3, 2)
values = dict(SPECTRAL_INDEX=[3 * in_array, 3., 3., 3.],
ENERGY_MIN=[1., 0.1 * in_array, 1., 1.],
ENERGY_MAX=[10., 10., 4 * in_array, 10.],
INT_FLUX=[30., 30., 30., 10. * in_array])
# Define parameters
spectral_index = values['SPECTRAL_INDEX'][index]
energy_min = values['ENERGY_MIN'][index]
energy_max = values['ENERGY_MAX'][index]
int_flux = values['INT_FLUX'][index]
int_flux_err = 0.1 * int_flux
if y_method == 'power_law':
model = None
else:
def model(x):
return x ** 2
table = compute_differential_flux_points(x_method, y_method, model=model,
spectral_index=spectral_index,
energy_min=energy_min,
energy_max=energy_max,
int_flux=int_flux,
int_flux_err=int_flux_err)
# Check output sized
energy = table['ENERGY']
actual = len(energy)
desired = 6
assert_allclose(actual, desired)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('x_method,y_method', itertools.product(x_methods,
y_methods))
def test_compute_differential_flux_points(x_method, y_method):
"""Iterates through the 6 different combinations of input options.
Tests against analytical result or result from gammapy.spectrum.powerlaw.
"""
# Define the test cases for all possible options
energy_min = np.array([1.0, 10.0])
energy_max = np.array([10.0, 100.0])
spectral_index = 2.0
table = Table()
table['ENERGY_MIN'] = energy_min
table['ENERGY_MAX'] = energy_max
table['ENERGY'] = np.array([2.0, 20.0])
if x_method == 'log_center':
energy = np.sqrt(energy_min * energy_max)
elif x_method == 'table':
energy = table['ENERGY'].data
# Arbitrary model (simple exponential case)
def diff_flux_model(x):
return np.exp(x)
# Integral of model
def int_flux_model(E_min, E_max):
return np.exp(E_max) - np.exp(E_min)
if y_method == 'power_law':
if x_method == 'lafferty':
energy = _energy_lafferty_power_law(energy_min, energy_max,
spectral_index)
# Test that this is equal to analytically expected
# log center result
desired_energy = np.sqrt(energy_min * energy_max)
assert_allclose(energy, desired_energy, rtol=1e-6)
desired = power_law_evaluate(energy, 1, spectral_index, energy)
int_flux = power_law_integral_flux(desired, spectral_index, energy,
energy_min, energy_max)
elif y_method == 'model':
if x_method == 'lafferty':
energy = _x_lafferty(energy_min, energy_max, diff_flux_model)
desired = diff_flux_model(energy)
int_flux = int_flux_model(energy_min, energy_max)
int_flux_err = 0.1 * int_flux
table['INT_FLUX'] = int_flux
table['INT_FLUX_ERR'] = int_flux_err
result_table = compute_differential_flux_points(x_method,
y_method,
table,
diff_flux_model,
spectral_index)
# Test energy
actual_energy = result_table['ENERGY'].data
desired_energy = energy
assert_allclose(actual_energy, desired_energy, rtol=1e-3)
# Test flux
actual = result_table['DIFF_FLUX'].data
assert_allclose(actual, desired, rtol=1e-2)
# Test error
actual = result_table['DIFF_FLUX_ERR'].data
desired = 0.1 * result_table['DIFF_FLUX'].data
assert_allclose(actual, desired, rtol=1e-3)
```
#### File: spectrum/tests/test_isrf.py
```python
from __future__ import print_function, division
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest
from ...spectrum import Schlickeiser, Galprop
@pytest.mark.xfail
def test_Schlickeiser_omega_g_over_b():
""" Check that CMB has the energy density it is
supposed to have according to its temperature """
actual = Schlickeiser()._omega_g_over_b('CMB')
assert_allclose(actual, 1, places=2)
@pytest.mark.xfail
def test_Schlickeiser_call():
""" Check that we roughly get the same value
as in Fig. 3.9 of Hillert's diploma thesis.
TODO: The check should be made against a published
value instead """
actual = Schlickeiser()(1e-3)
assert_allclose(actual / 189946, 1, places=5)
def test_Galprop_call():
Galprop()
# TODO
```
#### File: stats/tests/test_data.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from numpy.testing import assert_allclose
from ...stats import Stats
def test_Stats():
n_on, n_off, a_on, a_off = 1, 2, 3, 4
stats = Stats(n_on=n_on, n_off=n_off, a_on=a_on, a_off=a_off)
assert_allclose(stats.alpha, a_on / a_off)
assert_allclose(stats.background, a_on / a_off * n_off)
assert_allclose(stats.excess, n_on - a_on / a_off * n_off)
def test_make_stats():
pass
def test_combine_stats():
pass
```
#### File: utils/tests/test_array.py
```python
from __future__ import print_function, division
import numpy as np
from ..array import array_stats_str
def test_array_stats_str():
actual = array_stats_str(np.pi, 'pi')
assert actual == 'pi : size = 1, min = 3.142, max = 3.142\n'
actual = array_stats_str([np.pi, 42])
assert actual == 'size = 2, min = 3.142, max = 42.000\n'
```
#### File: utils/tests/test_random.py
```python
from __future__ import print_function, division
import numpy as np
from numpy.testing import assert_allclose
from ..random import sample_sphere, sample_powerlaw, sample_sphere_distance
def test_sample_sphere():
np.random.seed(0)
lon, lat = sample_sphere(size=2)
assert_allclose(lon, [3.44829694, 4.49366732])
assert_allclose(lat, [0.20700192, 0.08988736])
def test_sample_powerlaw():
np.random.seed(0)
x = sample_powerlaw(x_min=0.1, x_max=10, gamma=2, size=2)
assert_allclose(x, [0.21897428, 0.34250971])
def test_sample_sphere_distance():
np.random.seed(0)
x = sample_sphere_distance(distance_min=0.1, distance_max=42, size=2)
assert_allclose(x, [34.386731, 37.559774])
x = sample_sphere_distance(distance_min=0.1, distance_max=42, size=1e3)
assert x.min() >= 0.1
assert x.max() <= 42
``` |
{
"source": "jolespin/compositional",
"score": 2
} |
#### File: lib/compositional/compositional.py
```python
from __future__ import print_function, division
# Built-ins
import sys,warnings,functools
from collections import Mapping
from importlib import import_module
# Version specific
if sys.version_info.major == 2:
from StringIO import StringIO
if sys.version_info.major == 3:
from io import StringIO
# External
import numpy as np
import pandas as pd
from pandas._libs.algos import nancorr
# =========
# Utilities
# =========
# Check packages
def check_packages(packages, namespace=None, import_into_backend=True, verbose=False):
"""
Check if packages are available (and import into global namespace)
If package is a tuple then imports as follows: ("numpy", "np") where "numpy" is full package name and "np" is abbreviation
To import packages into current namespace: namespace = globals()
To import packages in backend, e.g. if this is used in a module/script, use `import_into_backend`
packages: str, non-tuple iterable
usage:
@check_packages(["sklearn", "scipy", ("numpy", "np")])
def f():
pass
Adapted from the following source:
soothsayer_utils (https://github.com/jolespin/soothsayer_utils)
"""
# Force packages into sorted non-redundant list
if isinstance(packages,(str, tuple)):
packages = [packages]
packages = set(packages)
# Set up decorator for package imports
# Wrapper
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
missing_packages = []
for pkg in packages:
if isinstance(pkg, tuple):
assert len(pkg) == 2, "If a package is tuple type then it must have 2 elements e.g. ('numpy', 'np')"
pkg_name, pkg_variable = pkg
else:
pkg_name = pkg_variable = pkg
try:
package = import_module(pkg_name)
if import_into_backend:
globals()[pkg_variable] = package
if namespace is not None:
namespace[pkg_variable] = package
if verbose:
print("Importing {} as {}".format(pkg_name, pkg_variable), True, file=sys.stderr)
except ImportError:
missing_packages.append(pkg_name)
if verbose:
print("Cannot import {}:".format(pkg_name), False, file=sys.stderr)
assert not missing_packages, "Please install the following packages to use this function:\n{}".format( ", ".join(missing_packages))
return func(*args, **kwargs)
return wrapper
return decorator
# ===========================
# Compositional data analysis
# ===========================
# Extension of CLR to use custom centroids, references, and zeros without pseudocounts
def transform_xlr(X, reference_components=None, centroid="mean", return_zeros_as_neginfinity=False, zeros_ok=True):
"""
# Description
Extension of CLR to incorporate custom centroids, reference components (iqlr), and handle missing values.
This implementation is more versatile than skbio's implementation but that makes it slower if it done iteratively.
# Documentation on CLR:
http://scikit-bio.org/docs/latest/generated/skbio.stats.composition.clr.html#skbio.stats.composition.clr
# Parameters
* X:
- Compositional data
(1D): pd.Series or 1D np.array
(2D): pd.DataFrame or 2D np.array
* centroid:
- Can be precomputed or a function applied to the log-transformed composition(s)
(1/2)D: 'mean', 'median', callable function
(1D): numeric
(2D): pd.Series, dict
* reference_components:
- Custom group of components used during the centroid calculation
* return_zeros_as_neginfinity:
True: Returns zeros as -np.inf
False: Returns zeros as np.nan
* zeros_ok:
True: Mask zeros with np.nan with warning
False: Error
"""
n_dimensions = len(X.shape)
assert n_dimensions in {1,2}, "`X` must be 1D or 2D"
assert np.all(X >= 0), "`X` cannot contain negative values because of log-transformation step."
assert not isinstance(reference_components, tuple), "`reference_components` cannot be type tuple"
# 1-Dimensional
if n_dimensions == 1:
# Check for labels
components = None
if isinstance(X, pd.Series):
components = X.index
X = X.values
if reference_components is None:
reference_components = components
reference_components = list(map(lambda component: components.get_loc(component), reference_components))
X = X.astype(float)
# Check for zeros
X_contains_zeros = False
n_zeros = np.any(X == 0).flatten().sum()
if n_zeros:
if zeros_ok:
mask_zeros = X == 0
X[mask_zeros] = np.nan
X_contains_zeros = True
warnings.warn("N={} zeros detected in `X`. Masking zeros as NaN and will default to nan-robust functions if 'mean' or 'median' were provided for centroid".format(n_zeros))
else:
raise Exception("N={} zeros detected in `X`. Either preprocess, add pseudocounts, or `zeros_ok=True".format(n_zeros))
# Log transformation
X_log = np.log(X)
# Centroid
centroid_is_string = isinstance(centroid, str)
centroid_is_function = hasattr(centroid, "__call__")
centroid_is_precomputed = np.issubdtype(type(centroid), np.number)
if not centroid_is_precomputed:
# Get function associated with string for centroid
if centroid_is_string:
centroid = centroid.lower()
assert centroid in {"mean", "median"}, "Please use 'mean','median', or a precomputed centroid"
if X_contains_zeros:
centroid = {"mean":np.nanmean, "median":np.nanmedian}[centroid]
else:
centroid = {"mean":np.mean, "median":np.median}[centroid]
centroid_is_function = True
# Compute centroid using function
if centroid_is_function:
func = centroid
centroid = func(X_log[reference_components])
# Transform
X_transformed = X_log - centroid
# Output
if all([return_zeros_as_neginfinity, X_contains_zeros]):
X_transformed[mask_zeros] = -np.inf
if components is not None:
X_transformed = pd.Series(X_transformed, index=components)
return X_transformed
# 2-Dimensional
if n_dimensions == 2:
# Check for labels
index = None
components = None
if isinstance(X, pd.DataFrame):
index = X.index
components = X.columns
X = X.values
if reference_components is None:
reference_components = components
reference_components = list(map(lambda component: components.get_loc(component), reference_components))
X = X.astype(float)
# Check for zeros
X_contains_zeros = False
n_zeros = np.any(X == 0).flatten().sum()
if n_zeros:
if zeros_ok:
mask_zeros = X == 0
X[mask_zeros] = np.nan
X_contains_zeros = True
warnings.warn("N={} zeros detected in `X`. Masking zeros as NaN and will default to nan-robust functions if 'mean' or 'median' were provided for centroid".format(n_zeros))
else:
raise Exception("N={} zeros detected in `X`. Either preprocess, add pseudocounts, or `zeros_ok=True".format(n_zeros))
# Log transformation
X_log = np.log(X)
# Centroid
centroid_is_string = isinstance(centroid, str)
centroid_is_function = hasattr(centroid, "__call__")
centroid_is_precomputed = False
# Preprocess precomputed centroid
if np.all(np.logical_not([centroid_is_string, centroid_is_function])):
if index is not None:
if isinstance(centroid, Mapping):
centroid = pd.Series(centroid)
assert isinstance(centroid, pd.Series), "If `centroid` is dict-like/pd.Series then `X` must be a `pd.DataFrame`."
assert set(centroid.index) >= set(index), "Not all indicies from `centroid` are available in `X.index`."
centroid = centroid[index].values
assert len(centroid) == X_log.shape[0], "Dimensionality is not compatible: centroid.size != X.shape[0]."
centroid_is_precomputed = True
if not centroid_is_precomputed:
# Get function associated with string for centroid
if centroid_is_string:
centroid = centroid.lower()
assert centroid in {"mean", "median"}, "Please use 'mean','median', or a precomputed centroid"
if X_contains_zeros:
centroid = {"mean":np.nanmean, "median":np.nanmedian}[centroid]
else:
centroid = {"mean":np.mean, "median":np.median}[centroid]
centroid_is_function = True
# Compute centroid using function
if centroid_is_function:
func = centroid
# If function has "axis" argument
try:
centroid = func(X_log[:,reference_components], axis=-1)
# If function does not have "axis" argument
except TypeError:
centroid = list(map(func, X_log[:,reference_components]))
# Broadcast centroid
centroid = np.asarray(centroid)
if len(centroid.shape) == 1:
centroid = centroid[:,np.newaxis]
# Transform
X_transformed = X_log - centroid
# Output
if all([return_zeros_as_neginfinity, X_contains_zeros]):
X_transformed[mask_zeros] = -np.inf
if components is not None:
X_transformed = pd.DataFrame(X_transformed, index=index, columns=components)
return X_transformed
# CLR Normalization
def transform_clr(X, return_zeros_as_neginfinity=False, zeros_ok=True):
"""
Wrapper around `transform_xlr`
# Description
Extension of CLR to handle missing values.
This implementation is more versatile than skbio's implementation but that makes it slower if it done iteratively.
# Documentation on CLR:
http://scikit-bio.org/docs/latest/generated/skbio.stats.composition.clr.html#skbio.stats.composition.clr
# Parameters
* X:
- Compositional data
(1D): pd.Series or 1D np.array
(2D): pd.DataFrame or 2D np.array
* return_zeros_as_neginfinity:
True: Returns zeros as -np.inf
False: Returns zeros as np.nan
* zeros_ok:
True: Mask zeros with np.nan with warning
False: Error
"""
return transform_xlr(X, reference_components=None, centroid="mean", return_zeros_as_neginfinity=return_zeros_as_neginfinity, zeros_ok=zeros_ok)
# Interquartile range log-ratio transform
def transform_iqlr(X, percentile_range=(25,75), centroid="mean", interval_type="open", return_zeros_as_neginfinity=False, zeros_ok=True, ddof=0):
"""
Wrapper around `transform_xlr`
# Description
Interquartile range log-ratio transform
# Parameters
* X: pd.DataFrame or 2D np.array
* percentile_range: A 2-element tuple of percentiles
* interval: 'open' = (a,b) and 'closed' = [a,b]. 'open' is used by `propr` R package:
* centroid, return_zeros_as_neginfinity, and zeros_ok: See `transform_xlr`
Adapted from the following source:
* https://github.com/tpq/propr/blob/2bd7c44bf59eaac6b4d329d38afd40ac83e2089a/R/2-proprCall.R#L31
"""
# Checks
n_dimensions = len(X.shape)
assert n_dimensions in {2}, "`X` must be 2D"
assert np.all(X >= 0), "`X` cannot contain negative values because of log-transformation step."
assert interval_type in {"closed", "open"}, "`interval_type` must be in the following: {closed, open}"
percentile_range = tuple(sorted(percentile_range))
assert len(percentile_range) == 2, "percentile_range must have 2 elements"
index=None
components=None
if isinstance(X, pd.DataFrame):
index = X.index
components = X.columns
X = X.values
# Compute the variance of the XLR transform
X_xlr = transform_xlr(X, centroid=centroid, return_zeros_as_neginfinity=False, zeros_ok=zeros_ok)
xlr_var = np.nanvar(X_xlr, axis=0, ddof=ddof)
# Calculate upper and lower bounds from percentiles
lower_bound, upper_bound = np.percentile(xlr_var, percentile_range)
# Get the reference components
if interval_type == "open":
reference_components = np.where((lower_bound < xlr_var) & (xlr_var < upper_bound))[0]
if interval_type == "closed":
reference_components = np.where((lower_bound <= xlr_var) & (xlr_var <= upper_bound))[0]
X_iqlr = transform_xlr(X, reference_components=reference_components, centroid=centroid, return_zeros_as_neginfinity=return_zeros_as_neginfinity, zeros_ok=zeros_ok)
if components is not None:
X_iqlr = pd.DataFrame(X_iqlr, index=index, columns=components)
return X_iqlr
# Pairwise variance log-ratio
def pairwise_vlr(X):
"""
# Description
Pairwise variance log-ratio
# Parameters
* X: pd.DataFrame or 2D np.array
Adapted from the following source:
* https://github.com/tpq/propr
ddof=1 for compatibility with propr package in R
To properly handle missing values and optimize speed, nancorr from pandas must be used which does not take ddof
"""
# Checks
n_dimensions = len(X.shape)
assert n_dimensions in {2}, "`X` must be 2D"
assert np.all(X >= 0), "`X` cannot contain negative values because of log-transformation step."
components = None
if isinstance(X, pd.DataFrame):
components = X.columns
X = X.values
X = X.astype("float64")
n,m = X.shape
# Check for zeros
n_zeros = np.any(X == 0).flatten().sum()
if n_zeros:
raise Exception("N={} zeros detected in `X`. Either preprocess or add pseudocounts.".format(n_zeros))
X_log = np.log(X)
covariance = nancorr(X_log, cov=True) # covariance = np.cov(X_log.T, ddof=ddof)
diagonal = np.diagonal(covariance)
vlr = -2*covariance + diagonal[:,np.newaxis] + diagonal
if components is not None:
vlr = pd.DataFrame(vlr, index=components, columns=components)
return vlr
# Pairwise rho proportionality
def pairwise_rho(X=None, reference_components=None, centroid="mean", interval_type="open", xlr=None, vlr=None):
"""
# Description
Pairwise proportionality `rho` (Erb et al. 2016)
# Parameters
* X: pd.DataFrame or 2D np.array of compositional data (rows=samples, columns=components)
* reference_components: See `transform_xlr`. Can also be `percentiles` for `transform_iqlr` or 'iqlr' string.
* interval: 'open' = (a,b) and 'closed' = [a,b]. 'open' is used by `propr` R package:
* centroid: See `transform_xlr`
* xlr: pd.DataFrame or 2D np.array of transformed compositional data (e.g. clr, iqlr) (must be used with `vlr` and not `X`)
* vlr: pd.DataFrame or 2D np.array of variance log-ratios (must be used with `xlr` and not `X`)
Adapted from the following source:
* https://github.com/tpq/propr
Citation:
* https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004075
* https://link.springer.com/article/10.1007/s12064-015-0220-8
ddof=1 for compatibility with propr package in R
"""
components = None
# Compute xlr and vlr from X
if X is not None:
assert all(map(lambda x: x is None, [xlr, vlr])), "If `X` is not None then `xlr` and `vlr` cannot be provided."
if isinstance(X, pd.DataFrame):
components = X.columns
X = X.values
vlr = pairwise_vlr(X)
if isinstance(reference_components, str):
if reference_components.lower() == "iqlr":
reference_components = (25,75)
# Use percentiles
if isinstance(reference_components, tuple):
xlr = transform_iqlr(X,percentile_range=reference_components, centroid=centroid, interval_type=interval_type, zeros_ok=False)
# Use CLR
else:
xlr = transform_xlr(X, reference_components=reference_components, centroid=centroid, zeros_ok=False)
# Provide xlr and vlr
else:
assert all(map(lambda x: x is not None, [xlr,vlr])), "If `X` is None then `xlr` and `vlr` must be provided."
assert type(xlr) is type(vlr), "`xlr` and `vlr` should be same type (i.e. pd.DataFrame, np.ndarray)"
if isinstance(xlr, pd.DataFrame):
assert np.all(xlr.columns == vlr.columns) & np.all(xlr.columns == vlr.index), "`xlr.columns` need to be the same as `vlr.index` and `vlr.columns`"
components = xlr.columns
xlr = xlr.values
vlr = vlr.values
# rho (Erb et al. 2016)
n, m = xlr.shape
variances = np.var(xlr, axis=0) # variances = np.var(X_xlr, axis=0, ddof=ddof)
rhos = 1 - (vlr/np.add.outer(variances,variances))
if components is not None:
rhos = pd.DataFrame(rhos, index=components, columns=components)
return rhos
# Pairwise phi proportionality
def pairwise_phi(X=None, symmetrize=True, triangle="lower", reference_components=None, centroid="mean", interval_type="open", xlr=None, vlr=None):
"""
# Description
Pairwise proportionality `phi` (Lovell et al. 2015)
# Parameters
* X: pd.DataFrame or 2D np.array of compositional data (rows=samples, columns=components)
* symmetrize: Force symmetric matrix
* triangle: Use lower or upper triangle for reference during symmetrization
* reference_components: See `transform_xlr`. Can also be `percentiles` for `transform_iqlr` or 'iqlr' string.
* interval: 'open' = (a,b) and 'closed' = [a,b]. 'open' is used by `propr` R package:
* centroid: See `transform_xlr`
* xlr: pd.DataFrame or 2D np.array of transformed compositional data (e.g. clr, iqlr) (must be used with `vlr` and not `X`)
* vlr: pd.DataFrame or 2D np.array of variance log-ratios (must be used with `xlr` and not `X`)
Adapted from the following source:
* https://github.com/tpq/propr
Citation:
* https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004075
ddof=1 for compatibility with propr package in R
"""
components = None
# Compute xlr and vlr from X
if X is not None:
assert all(map(lambda x: x is None, [xlr, vlr])), "If `X` is not None then `xlr` and `vlr` cannot be provided."
if isinstance(X, pd.DataFrame):
components = X.columns
X = X.values
vlr = pairwise_vlr(X)
if isinstance(reference_components, str):
if reference_components.lower() == "iqlr":
reference_components = (25,75)
# Use percentiles
if isinstance(reference_components, tuple):
xlr = transform_iqlr(X,percentile_range=reference_components, centroid=centroid, interval_type=interval_type, zeros_ok=False)
# Use CLR
else:
xlr = transform_xlr(X, reference_components=reference_components, centroid=centroid, zeros_ok=False)
# Provide xlr and vlr
else:
assert all(map(lambda x: x is not None, [xlr,vlr])), "If `X` is None then `xlr` and `vlr` must be provided."
assert type(xlr) is type(vlr), "`xlr` and `vlr` should be same type (i.e. pd.DataFrame, np.ndarray)"
if isinstance(xlr, pd.DataFrame):
assert np.all(xlr.columns == vlr.columns) & np.all(xlr.columns == vlr.index), "`xlr.columns` need to be the same as `vlr.index` and `vlr.columns`"
components = xlr.columns
xlr = xlr.values
vlr = vlr.values
# phi (Lovell et al. 2015)
n, m = xlr.shape
variances = np.var(xlr, axis=0)#[:,np.newaxis]
phis = vlr/variances
if symmetrize:
assert triangle in {"lower","upper"}, "`triangle` must be one of the following: {'lower','upper'}"
if triangle == "upper":
idx_triangle = np.tril_indices(m, -1)
if triangle == "lower":
idx_triangle = np.triu_indices(m, 1)
phis[idx_triangle] = phis.T[idx_triangle]
if components is not None:
phis = pd.DataFrame(phis, index=components, columns=components)
return phis
# ILR Transformation
@check_packages(["skbio"])
def transform_ilr(X:pd.DataFrame, tree=None, check_polytomy=True, verbose=True):
"""
if `tree` is None then orthonormal basis for Aitchison simplex defaults to J.J.Egozcue orthonormal basis.
"""
# Imports
from skbio import TreeNode
from skbio.stats.composition import ilr
assert isinstance(X, pd.DataFrame), "`X` must be a pd.DataFrame"
assert not np.any(X == 0), "`X` cannot contain zeros because of log-transforms. Preprocess or use a pseudocount e.g. (X+1) or (X/(1/X.shape[1]**2))"
# Determine tree module
def _infer_tree_type(tree):
tree_type = None
query_type = str(tree.__class__).split("'")[1].split(".")[0]
if query_type in {"skbio"}:
tree_type = "skbio"
if query_type in {"ete2","ete3"}:
tree_type = "ete"
assert tree_type is not None, "Please use either skbio or ete[2/3] tree. Tree type deterined as {}".format(query_type)
return tree_type
# Get leaves from tree
def _get_leaves(tree, tree_type):
if tree_type == "skbio":
leaves_in_tree = set(map(lambda leaf:leaf.name, tree.tips()))
if tree_type == "ete":
leaves_in_tree = set(tree.get_leaf_names())
return leaves_in_tree
def _check_polytomy(tree, tree_type):
if tree_type == "ete":
# Check bifurcation
n_internal_nodes = len(list(filter(lambda node:node.is_leaf() == False, tree.traverse())))
n_leaves = len(list(filter(lambda node:node.is_leaf(), tree.traverse())))
if n_internal_nodes < (n_leaves - 1):
raise Exception("Please resolve tree polytomy and force bifurcation: Use `tree.resolve_polytomy()` before naming nodes for `ete`")
if tree_type == "skbio":
# Check bifurcation
n_internal_nodes = len(list(filter(lambda node:node.is_tip() == False, tree.traverse())))
n_leaves = len(list(filter(lambda node:node.is_tip(), tree.traverse())))
if n_internal_nodes < (n_leaves - 1):
raise Exception("Please resolve tree polytomy and force bifurcation: Use `tree.bifurcate()` before naming nodes for `skbio`")
# ETE Tree
if sys.version_info.major == 2:
ete_info = ("ete2","ete")
if sys.version_info.major == 3:
ete_info = ("ete3","ete")
@check_packages([ete_info])
def _ete_to_skbio( tree):
# Convert ete to skbio
tree = TreeNode.read(StringIO(tree.write(format=1, format_root_node=True)), convert_underscores=False)
return tree
def _prune_tree(tree, tree_type, leaves):
if tree_type == "ete":
tree.prune(leaves)
if tree_type == "skbio":
tree = tree.shear(leaves)
tree.prune()
return tree
# ILR with tree
@check_packages(["gneiss"], import_into_backend=False)
def _ilr_with_tree(X, tree):
# Import ilr_transform from gneiss
from gneiss.composition import ilr_transform
# Check tree type
tree_type = _infer_tree_type(tree)
# Check leaves
components = set(X.columns)
leaves_in_tree = _get_leaves(tree=tree, tree_type=tree_type)
assert components <= leaves_in_tree, "Not all components (X.columns) are represented in tree"
# Prune tree
if components < leaves_in_tree:
tree = tree.copy()
n_leaves_before_pruning = len(leaves_in_tree)
tree = _prune_tree(tree=tree, tree_type=tree_type, leaves=components)
n_leaves_after_pruning = len(_get_leaves(tree=tree, tree_type=tree_type))
n_pruned = n_leaves_before_pruning - n_leaves_after_pruning
if verbose:
print("Pruned {} attributes to match components (X.columns)".format(n_pruned), file=sys.stderr)
# Polytomy
if check_polytomy:
_check_polytomy(tree=tree, tree_type=tree_type)
# ETE
if tree_type == "ete":
tree = _ete_to_skbio(tree=tree)
return ilr_transform(table=X, tree=tree)
# ILR without tree
def _ilr_without_tree(X):
return pd.DataFrame(ilr(X), index=X.index)
# Without tree
if tree is None:
return _ilr_without_tree(X=X)
# With tree
else:
return _ilr_with_tree(X=X, tree=tree)
``` |
{
"source": "jolespin/genopype",
"score": 2
} |
#### File: genopype/genopype/genopype.py
```python
from __future__ import print_function, division
import os, sys, glob, time,subprocess, random
from collections import OrderedDict, Counter
# Pathlib
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
# Scandir
try:
from os import scandir
except ImportError:
from scandir import scandir
# Soothsayer utils
from soothsayer_utils import *
# Know Bugs:
# * Currently (2019.07.26), the "completed_message" parameter does not work.
# Check filename
def check_filename(filename, acceptable_characters={".","-","_"}):
status_ok = True
for character in str(filename).lower():
conditions = [
character.isalnum(),
character in acceptable_characters,
# character != " ",
]
if not any(conditions):
status_ok = False
break
return status_ok
# Format filename
def format_filename(name, replacement_character="_", acceptable_characters={".","-","_"}):
listed_string = list(name)
idx_nonalnum = list()
for i, character in enumerate(listed_string):
if not check_filename(character):
idx_nonalnum.append(i)
for i in idx_nonalnum:
listed_string[i] = replacement_character
return "".join(listed_string)
# Create directory
def create_directory(directory, sleep=(0.016180339887, 1.6180339887)):
# Sleep was added so files aren't created at the same time
if bool(sleep):
sleep_duration = random.uniform(sleep[0], sleep[1])
time.sleep(sleep_duration)
assert is_path_like(directory, path_must_exist=False)
# directory = os.path.realpath(directory)
if not os.path.exists(directory):
os.makedirs(directory)
return directory
# Validating file existence
def validate_file_existence(paths, prologue="Validating the following files:", minimum_filesize=2, f_verbose=None):
if is_file_like(f_verbose) or (f_verbose is None):
print(prologue, file=f_verbose)
paths_expanded = list()
for path in paths:
if "*" in path:
paths_expanded += glob.glob(path)
else:
# path = os.path.realpath(path)
paths_expanded.append(path)
for path in paths_expanded:
# Get the absolute path
# path = os.path.realpath(path)
# Get the path to the symlink target
if os.path.islink(path):
path = os.readlink(path)
assert os.path.exists(path), "The following path does not exist: {}".format(path)
# original_path = None
# symlink = None
if os.path.islink(path):
path = os.readlink(path)
if not os.path.isdir(path):
size_bytes = os.path.getsize(path)
assert size_bytes >= minimum_filesize, "The following file appears to be empty ({} bytes): {}".format(size_bytes, path)
if is_file_like(f_verbose) or (f_verbose is None):
size_mb = size_bytes >> 20
if size_mb < 1:
print("[=] File exists ({} bytes):".format(size_bytes), path, file=f_verbose)
else:
print("[=] File exists ({} MB):".format(size_mb), path, file=f_verbose)
else:
size_bytes = get_directory_size(path)
assert size_bytes >= minimum_filesize, "The following directory appears to be empty ({} bytes): {}".format(size_bytes, path)
if is_file_like(f_verbose) or (f_verbose is None):
size_mb = size_bytes >> 20
if size_mb < 1:
print("[=] Directory exists ({} bytes):".format(size_bytes), path, file=f_verbose)
else:
print("[=] Directory exists ({} MB):".format(size_mb), path, file=f_verbose)
# Get directory tree structure
def get_directory_tree(root, ascii=False):
if not ascii:
return DisplayablePath.view(root)
else:
return DisplayablePath.get_ascii(root)
# Directory size
def get_directory_size(path_directory='.'):
"""
Adapted from @Chris:
https://stackoverflow.com/questions/1392413/calculating-a-directorys-size-using-python
"""
path_directory = format_path(path_directory)
total_size = 0
seen = {}
for dirpath, dirnames, filenames in os.walk(path_directory):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat = os.stat(fp)
except OSError:
continue
try:
seen[stat.st_ino]
except KeyError:
seen[stat.st_ino] = True
else:
continue
total_size += stat.st_size
return total_size
# ===============
# Shell utilities
# ===============
# # View directory structures
class DisplayablePath(object):
"""
Display the tree structure of a directory.
Implementation adapted from the following sources:
* Credits to @abstrus
https://stackoverflow.com/questions/9727673/list-directory-tree-structure-in-python
"""
display_filename_prefix_middle = '|__'
display_filename_prefix_last = '|__'
display_parent_prefix_middle = ' '
display_parent_prefix_last = '| '
def __init__(self, path, parent_path, is_last):
self.path = pathlib.Path(str(path))
self.parent = parent_path
self.is_last = is_last
if self.parent:
self.depth = self.parent.depth + 1
else:
self.depth = 0
@property
def displayname(self):
if self.path.is_dir():
return self.path.name + '/'
return self.path.name
@classmethod
def make_tree(cls, root, parent=None, is_last=False, criteria=None):
root = pathlib.Path(str(root))
criteria = criteria or cls._default_criteria
displayable_root = cls(root, parent, is_last)
yield displayable_root
children = sorted(list(path
for path in root.iterdir()
if criteria(path)),
key=lambda s: str(s).lower())
count = 1
for path in children:
is_last = count == len(children)
if path.is_dir():
for item in cls.make_tree(path, parent=displayable_root, is_last=is_last, criteria=criteria):
yield item
else:
yield cls(path, displayable_root, is_last)
count += 1
@classmethod
def _default_criteria(cls, path):
return True
@property
def displayname(self):
if self.path.is_dir():
return self.path.name + '/'
return self.path.name
def displayable(self):
if self.parent is None:
return self.displayname
_filename_prefix = (self.display_filename_prefix_last
if self.is_last
else self.display_filename_prefix_middle)
parts = ['{!s} {!s}'.format(_filename_prefix,
self.displayname)]
parent = self.parent
while parent and parent.parent is not None:
parts.append(self.display_parent_prefix_middle
if parent.is_last
else self.display_parent_prefix_last)
parent = parent.parent
return ''.join(reversed(parts))
# Additions by <NAME> for Soothsayer
@classmethod
def get_ascii(cls, root):
ascii_output = list()
paths = cls.make_tree(root)
for path in paths:
ascii_output.append(path.displayable())
return "\n".join(ascii_output)
@classmethod
def view(cls, root, file=sys.stdout):
print(cls.get_ascii(root), file=file)
# Bash commands
class Command(object):
"""
Run bash commands and stuff.
Recommended usage:
------------------
with open("test_commands.sh", "w") as f_cmds:
cmd = Command("echo ':)' > testing_output.txt", name="TEST", f_cmds=f_cmds)
cmd.run(epilogue="footer", prologue="header", checkpoint="testing_output.txt.checkpoint")
or
f_cmds = open("test_commands.sh", "w")
cmd = Command("echo ':)'' > testing_output.txt", name="TEST", f_cmds=f_cmds)
cmd.run(epilogue="footer", prologue="header", checkpoint="testing_output.txt.checkpoint")
f_cmds.close()
Just in case you need a quick one-liner [not recommended but works]:
-------------------------------------------------------------------
cmd = Command("echo ':)'' > testing_output.txt", name="TEST", f_cmds="test_commands.sh")
cmd.run(epilogue="footer", prologue="header", checkpoint="testing_output.txt.checkpoint").close()
or
cmd = Command("echo ':)'' > testing_output.txt", name="TEST", f_cmds="test_commands.sh")
cmd.run(epilogue="footer", prologue="header", checkpoint="testing_output.txt.checkpoint")
cmd.close()
Future:
-------
* Create an object called ExecutablePipeline that wraps Command objects together
* Something like this:
ep = ExecutablePipeline(name="RNA-seq mapping", description="Quality trim, remove contaminants, and map reads to reference")
# This method
ep.create_step(name="kneaddata", pos=1, checkpoint="path/to/checkpoint", write_stdout="path/to/stdout", write_stderr="path/to/stderr", write_returncode="path/to/returncode")
ep["kneaddata"].set_inputs(*?)
ep["kneaddata"].set_outputs(*?)
# or this method
ep.create_step(name="kneaddata", pos=1, checkpoint="path/to/checkpoint", write_stdout="path/to/stdout", write_stderr="path/to/stderr", write_returncode="path/to/returncode", inputs=*?, outputs=*?)
ep.execute(?*)
Here is an example for constructing pipelines:
-------------------------------------------------------------------
# =========
# Utility
# =========
def process_command(cmd, f_cmds, logfile_name, description, directories, io_filepaths):
start_time = time.time()
# Info
program = logfile_name.split("_")[-1]
print(description, file=sys.stdout)
print("Input: ", io_filepaths[0], "\n", "Output: ", io_filepaths[1], sep="", file=sys.stdout)
print("Command: ", " ".join(cmd), file=sys.stdout)
executable = Command(cmd, name=logfile_name, description=description, f_cmds=f_cmds)
executable.run(
prologue=format_header(program, "_"),
dry="infer",
errors_ok=False,
error_message="Check the following files: {}".format(os.path.join(directories["log"], "{}.*".format(logfile_name))),
checkpoint=os.path.join(directories["checkpoints"], "{}".format(logfile_name)),
write_stdout=os.path.join(directories["log"], "{}.o".format(logfile_name)),
write_stderr=os.path.join(directories["log"], "{}.e".format(logfile_name)),
write_returncode=os.path.join(directories["log"], "{}.returncode".format(logfile_name)),
f_verbose=sys.stdout,
)
print("Duration: {}".format(executable.duration_), file=sys.stdout)
return executable
# =========
# Kneaddata
# =========
program = "kneaddata"
# Add to directories
output_directory = directories[("intermediate", program)] = create_directory(os.path.join(directories["intermediate"], "{}_output".format(program)))
# Info
step = "1"
logfile_name = "{}_{}".format(step, program)
description = "{}. {} | Removing human associated reads and quality trimming".format(step, program)
# i/o
input_filepath = [opts.r1, opts.r2]
output_filename = ["kneaddata_repaired_1.fastq.gz", "kneaddata_repaired_2.fastq.gz"]
output_filepath = list(map(lambda filename: os.path.join(output_directory, filename), output_filename))
io_filepaths = [input_filepath, output_filepath]
# Parameters
params = {
"reads_r1":input_filepath[0],
"reads_r2":input_filepath[1],
"output_directory":output_directory,
"opts":opts,
"directories":directories,
}
cmd = get_kneaddata_cmd(**params)
process = process_command(cmd, f_cmds=f_cmds, logfile_name=logfile_name, description=description, directories=directories, io_filepaths=io_filepaths)
sys.stdout.flush()
"""
def __init__(self, cmd, name=None, description=None, f_cmds=sys.stdout ):
if isinstance(cmd, str):
cmd = [cmd]
cmd = " ".join(cmd)
self.cmd = cmd
if not is_file_like(f_cmds):
if is_path_like(f_cmds, path_must_exist=False):
f_cmds = open(f_cmds, "w")
self.f_cmds = f_cmds
self.name = name
self.description = description
def __repr__(self):
class_name = str(self.__class__)[17:-2]
return '{}(name={}, description={}, cmd="{}")'.format(class_name, self.name, self.description, self.cmd)
def close(self):
self.f_cmds.close()
return self
def _write_output(self, data, filepath):
if filepath is not None:
if not is_file_like(filepath):
filepath = format_path(filepath)
f_out = open(filepath, "w")
else:
f_out = filepath
print(data, file=f_out)
f_out.flush()
if self.f_cmds not in {sys.stdout, sys.stderr}:
os.fsync(self.f_cmds.fileno())
if f_out not in {sys.stdout, sys.stderr}:
f_out.close()
# Run command
def run(
self,
prologue=None,
epilogue=None,
errors_ok=False,
dry="infer",
checkpoint=None,
write_stdout=None,
write_stderr=None,
write_returncode=None,
close_file=False,
checkpoint_message_notexists="Running. .. ... .....",
checkpoint_message_exists="Loading. .. ... .....",
error_message=None,
completed_message=None,
f_verbose=None,
n_linebreaks=1,
acceptable_returncodes=[0],
popen_kws=dict(),
):
"""
Should future versions should have separate prologue and epilogue for f_cmds and f_verbose?
"""
# Verbose
if f_verbose is None:
f_verbose = sys.stdout
# Return codes
if isinstance(acceptable_returncodes, int):
acceptable_returncodes = [acceptable_returncodes]
assert all(map(lambda returncode:isinstance(returncode, int), acceptable_returncodes)), "Please use integer return codes"
# ----------
# Checkpoint
# ----------
if checkpoint is not None:
checkpoint = format_path(checkpoint)
if dry == "infer":
dry = False
if checkpoint is not None:
if os.path.exists(checkpoint):
dry = True
if checkpoint_message_exists is not None:
print(checkpoint_message_exists, file=f_verbose)
f_verbose.flush()
else:
if checkpoint_message_notexists is not None:
print(checkpoint_message_notexists, file=f_verbose)
f_verbose.flush()
# ----
# Info
# ----
if self.f_cmds is not None:
# Prologue
if prologue is not None:
self.prologue_ = prologue
print("#", prologue, file=self.f_cmds)
# Command
print(self.cmd, file=self.f_cmds)
# Epilogue
if epilogue is not None:
self.epilogue_ = epilogue
print("#", epilogue, file=self.f_cmds)
# Padding
assert n_linebreaks >= 0, "`n_linebreaks` must be an integer >= 0"
print("\n"*(n_linebreaks-1), file=self.f_cmds)
self.f_cmds.flush()
if self.f_cmds not in {sys.stdout, sys.stderr}:
os.fsync(self.f_cmds.fileno())
# Run
if not dry:
start_time = time.time()
f_stdout = subprocess.PIPE
if write_stdout is not None:
write_stdout = format_path(write_stdout)
f_stdout = open(write_stdout, "wb")
f_stderr = subprocess.PIPE
if write_stderr is not None:
write_stderr = format_path(write_stderr)
f_stderr = open(write_stderr, "wb")
# Execute the process
self.process_ = subprocess.Popen(self.cmd, shell=True, stdout=f_stdout, stderr=f_stderr, **popen_kws) #! Future: Use file objects instead here so it can be written in real time
# Wait until process is complete and return stdout/stderr
self.stdout_, self.stderr_ = self.process_.communicate() # Use this .communicate instead of .wait to avoid zombie process that hangs due to defunct. Removed timeout b/c it's not available in Python 2
# Set stdout and stderr to the contents of the file object created...or just the path then do a get_stdout() function
if hasattr(f_stdout, "close"):
f_stdout.close()
if hasattr(f_stderr, "close"):
f_stderr.close()
# Return code
self.returncode_ = self.process_.returncode
self.duration_ = format_duration(start_time)
# # stdout
# if isinstance(self.stdout_, bytes):
# self.stdout_ = self.stdout_.decode("utf-8")
# self._write_output(data=self.stdout_, filepath=write_stdout)
# # stderr
# if isinstance(self.stderr_, bytes):
# self.stderr_ = self.stderr_.decode("utf-8")
# self._write_output(data=self.stderr_, filepath=write_stderr)
# Return code
self._write_output(data=self.returncode_, filepath=write_returncode)
# Check
if not errors_ok:
if self.returncode_ not in acceptable_returncodes:
if error_message is not None:
print(error_message, file=f_verbose)
sys.exit(self.returncode_)
if self.returncode_ in acceptable_returncodes:
if completed_message is not None:
print(completed_message, file=f_verbose)
# Create checkpoint
if checkpoint is not None:
if self.returncode_ in acceptable_returncodes:
duration = format_duration(start_time)
with open(checkpoint, "w") as f_checkpoint:
print(get_timestamp(), duration, file=f_checkpoint)
# Close file object
if self.f_cmds not in {None, sys.stdout, sys.stderr}:
if close_file:
self.close()
return self
# Executable
class ExecutablePipeline(object):
def __init__(self,
name=None,
description=None,
checkpoint_directory=None,
log_directory=None,
checkpoint_message_notexists="Running. .. ... .....",
checkpoint_message_exists="Loading. .. ... .....",
f_cmds=None,
f_verbose=None,
bypass_io_validation_if_checkpoints_exist=False,
**metadata
):
self.name = name
self.description = description
self.metadata = metadata
self.executables = dict()
self.f_cmds = f_cmds
self.f_verbose = f_verbose
self.checkpoint_message_notexists = checkpoint_message_notexists
self.checkpoint_message_exists = checkpoint_message_exists
# Log directory
self.log_directory = log_directory
if log_directory is not None:
assert is_path_like(log_directory, path_must_exist=False), "`{}` is not path-like".format(log_directory)
log_directory = format_path(log_directory)
self.log_directory = create_directory(log_directory)
# Checkpoint directory
self.checkpoint_directory = checkpoint_directory
if checkpoint_directory is not None:
assert is_path_like(checkpoint_directory, path_must_exist=False), "`{}` is not path-like".format(checkpoint_directory)
checkpoint_directory = format_path(checkpoint_directory)
self.checkpoint_directory = create_directory(checkpoint_directory)
self.bypass_io_validation_if_checkpoints_exist = bypass_io_validation_if_checkpoints_exist
# Compiled
self.compiled = False
# Add step in pipeline
def add_step(self,
# Required
id,
cmd,
step="infer",
log_prefix="infer",
# Descriptions
description=None,
# I/O
input_filepaths=None,
output_filepaths=None,
# Utility
dry="infer",
errors_ok=False,
validate_inputs=True,
validate_outputs=True,
acceptable_returncodes=[0],
):
# Step
if step == "infer":
step = len(self.executables) + 1
assert isinstance(step, int), "Please specify an integer step."
# if bypass_io_validation_if_checkpoint_exists:
# HOW TO DETEREMINE WHAT THE CHECKPOINT FILE IS AT THIS STAGE
# if os.path.exists(checkpoint):
#
attrs = dict()
attrs["step"] = step
# Command
attrs["executable"] = Command(cmd,
name=id,
description=description,
f_cmds=self.f_cmds,
)
attrs["description"] = description
attrs["log_prefix"] = log_prefix
#I/O
if isinstance(input_filepaths, str):
input_filepaths = [input_filepaths]
if input_filepaths is None:
input_filepaths = list()
attrs["input_filepaths"] = input_filepaths
if isinstance(output_filepaths, str):
output_filepaths = [output_filepaths]
if output_filepaths is None:
output_filepaths = list()
attrs["output_filepaths"] = output_filepaths
# Checkpoints
attrs["checkpoint"] = None
attrs["write_stdout"] = None
attrs["write_stderr"] = None
attrs["write_returncode"] = None
attrs["error_message"] = None
attrs["completed_message"] = None
attrs["acceptable_returncodes"] = acceptable_returncodes
# Checkpoint
attrs["dry"] = dry
# Validation
attrs["errors_ok"] = errors_ok
attrs["validate_inputs"] = validate_inputs
attrs["validate_outputs"] = validate_outputs
self.executables[id] = attrs
return self
# Set attribute
def set_attribute(self, id, attribute, value):
assert id in self.executables, "`{}` not in `executables`".format(id)
self.executables[id][attribute] = value
return self
# Set the order of operations
def set_order_of_executables(self, ordering):
for id, step in ordering.items():
self.executables[id]
return self
# Compile everything and get ready for execution
def compile(self):
# if func_steps is None:
# func_steps = lambda step:step
# Check duplicate steps
steps = list()
for id, attrs in self.executables.items():
steps.append(attrs["step"])
assert all(map(lambda x: x == 1, Counter(steps).values()))
# Check missing steps
assert set(steps) == set(range(min(steps), max(steps)+1)), "Please check for missing steps."
# Check boolean attrs
for id, attrs in self.executables.items():
for attribute in ["errors_ok", "validate_inputs", "validate_outputs"]:
assert isinstance(attrs[attribute], bool), "`{}` must be a boolean type".format(attribute)
# Compiled steps
self.compiled_steps_ = OrderedDict()
print(format_header(". .. ... Compiling ... .. .", "="), file=self.f_verbose)
for id, attrs in sorted(self.executables.items(), key=lambda x:x[1]["step"]):
# Logfile name
if attrs["log_prefix"] in {"infer", None}:
attrs["log_prefix"] = "__".join([str(attrs["step"]).strip(), format_filename(id, replacement_character="-").strip()])
assert check_filename(attrs["log_prefix"]), "Please format the filename `{}` so it only inlcudes alphanumeric characters, '.', '_', and '-'.".format(attrs["log_prefix"])
# assert all(x)
# Checkpoint
if self.checkpoint_directory is not None:
attrs["checkpoint"] = os.path.join(self.checkpoint_directory, attrs["log_prefix"] )
# Log files
if self.log_directory is not None:
attrs["write_stdout"] = os.path.join(self.log_directory, "{}.o".format(attrs["log_prefix"] ))
attrs["write_stderr"] = os.path.join(self.log_directory, "{}.e".format(attrs["log_prefix"] ))
attrs["write_returncode"] = os.path.join(self.log_directory, "{}.returncode".format(attrs["log_prefix"] ))
attrs["error_message"] = "Check log files to diagnose error:\ncat {}.*".format(os.path.join(self.log_directory, attrs["log_prefix"] ))
attrs["completed_message"] = "\nLog files:\n{}.*".format(os.path.join(self.log_directory, attrs["log_prefix"] )) # Use glob here
# Add step order
self.compiled_steps_[attrs["step"]] = id
# Update attributes
self.executables[id].update(attrs)
print("Step: {}, {} | log_prefix = {} | {}".format(attrs["step"], id, attrs["log_prefix"], attrs["description"]), file=self.f_verbose)
# Compiled
self.compiled = True
return self
# Execute pipeline
def execute(self, steps=None, description="Executing pipeline", restart_from_checkpoint=None):
start_time = time.time()
assert self.compiled, "Please compile before continuing."
if steps is None:
steps = list(self.compiled_steps_.keys())
if self.name is not None:
if self.description is None:
print(format_header(". .. ... {} ... .. .".format(self.name), "_"), file=self.f_verbose)
else:
print(format_header(". .. ... {} || {} ... .. .".format(self.name, self.description), "_"), file=self.f_verbose)
print("", file=self.f_verbose)
if restart_from_checkpoint is not None:
restart_from_checkpoint = int(restart_from_checkpoint)
assert restart_from_checkpoint in steps, "Cannot restart from checkpoint `{}` because it does not exist".format(restart_from_checkpoint)
if self.checkpoint_directory is not None:
if restart_from_checkpoint == "preprocessing":
restart_from_checkpoint = 1
target_checkpoint = restart_from_checkpoint
print("Restarting pipeline from checkpoint:", target_checkpoint, file=self.f_verbose)
for file in scandir(self.checkpoint_directory):
if "_" in file.name:
query_checkpoint = int(file.name.split("_")[0].split(".")[0])
if query_checkpoint >= target_checkpoint:
print("...[-] {}".format(file.path), file=self.f_verbose)
os.remove(file.path)
# Intermediate files
for intermediate_filepath in self.executables[self.compiled_steps_[query_checkpoint]]["output_filepaths"]:
if os.path.exists(os.path.realpath(intermediate_filepath)):
try:
os.remove(intermediate_filepath)
print("...[-] {}".format(intermediate_filepath), file=self.f_verbose)
except OSError: # Changed from PermissionError 2020.01.15
pass
else:
print("...[=] {}".format(file.path), file=self.f_verbose)
print("", file=self.f_verbose)
for step, id in pv(self.compiled_steps_.items(), description=description):
if step in steps:
attrs = self.executables[id]
executable = attrs["executable"]
# Headers
print(format_header(". {} .".format(id), "="), sep="", file=self.f_verbose)
print("Input: ", attrs["input_filepaths"], "\n", "Output: ", attrs["output_filepaths"], "\n", sep="", file=self.f_verbose)
print("Command:", file=self.f_verbose)
print(attrs["executable"].cmd, "\n", file=self.f_verbose)
# ===============
# Execute command
# ===============
# Bypass io validation
if self.bypass_io_validation_if_checkpoints_exist:
if os.path.exists(attrs["checkpoint"]):
# if_file_isnt_empty?
attrs["validate_inputs"] = False
attrs["validate_outputs"] = False
print("! Bypassing I/O validation: {}\n".format(attrs["checkpoint"]), file=self.f_verbose)
# Check inputs
if attrs["validate_inputs"]:
input_filepaths = attrs["input_filepaths"]
if bool(input_filepaths):
assert is_nonstring_iterable(input_filepaths), "`input_filepaths` must be a non-string iterable"
# paths_expanded = list()
# for path in input_filepaths:
# if "*" in path:
# paths_expanded += glob.glob(path)
# else:
# # path = os.path.realpath(path)
# paths_expanded.append(path)
# self.executables[id]["input_filepaths"] = paths_expanded
validate_file_existence(input_filepaths, prologue="Validating the following input files:", f_verbose=self.f_verbose)
print("", file=self.f_verbose)
# Execute
executable.run(
prologue=id,
dry=attrs["dry"],
errors_ok=attrs["errors_ok"],
error_message=attrs["error_message"],
completed_message=attrs["completed_message"],
checkpoint=attrs["checkpoint"],
checkpoint_message_notexists=self.checkpoint_message_notexists,
checkpoint_message_exists=self.checkpoint_message_exists,
write_stdout=attrs["write_stdout"],
write_stderr=attrs["write_stderr"],
write_returncode=attrs["write_returncode"],
acceptable_returncodes=attrs["acceptable_returncodes"],
f_verbose=self.f_verbose,
)
# Check outputs
if attrs["validate_outputs"]:
output_filepaths = attrs["output_filepaths"]
if bool(output_filepaths):
assert is_nonstring_iterable(output_filepaths), "`output_filepaths` must be a non-string iterable"
validate_file_existence(output_filepaths, prologue="\nValidating the following output files:", f_verbose=self.f_verbose)
# print("", file=self.f_verbose)
print("\nDuration: {}\n".format(format_duration(start_time)), file=self.f_verbose)
self.duration_ = format_duration(start_time)
print("\n", format_header("Total duration: {}".format(self.duration_), "."), sep="", file=self.f_verbose)
return self
# # Save object
# def to_file(self, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL, *args):
# # Cannot serialize file objects while open so we need make placeholders
# f_cmds = self.f_cmds
# f_verbose = self.f_verbose
# self.f_verbose = None
# self.f_cmds = None
# # for id in self.executables.keys():
# # self.executables[id]["executable"].f_verbose = None
# # self.executables[id]["executable"].f_cmds = None
# # Write object
# write_object(self, path=path, compression=compression, protocol=protocol, *args)
# # Set file objects
# self.f_verbose = f_verbose
# self.f_cmds = f_cmds
# return self
# Load object
@classmethod
def from_file(cls, path, compression="infer", f_verbose=None, f_cmds=None):
cls = read_object(path=path, compression=compression)
cls.f_verbose = f_verbose
cls.f_cmds = f_cmds
return cls
def __getitem__(self, id):
assert id in self.executables, "`{}` not in `executables`".format(id)
return self.executables[id]
def main():
directories = dict()
directories["output"] = create_directory("pipeline_testing")
directories["checkpoints"] = create_directory(os.path.join(directories["output"], "checkpoints"))
directories["logs"] = create_directory(os.path.join(directories["output"], "logs"))
with open(os.path.join(directories["output"], "commands.sh"), "w") as f_cmds:
ep = ExecutablePipeline(name="Sith", description="The rule of two", f_cmds=f_cmds, checkpoint_directory=directories["checkpoints"], log_directory=directories["logs"], bypass_io_validation_if_checkpoints_exist=True)
# Step 1
output_filepaths = [os.path.join(directories["output"], "holocron.txt")]
message = "Two there should be; no more, no less. One to embody the power, the other to crave it.\n"
ep.add_step(id="<NAME>",
cmd="echo '{}' > {}".format(message, output_filepaths[0]),
output_filepaths=output_filepaths,
description = "Begin the rule of two",
errors_ok=False,
validate_outputs=True,
)
# Step 2
input_filepaths = [os.path.join(directories["output"], "holocron.txt")]
output_filepaths = [os.path.join(directories["output"], "*.txt")]
ep.add_step(id="<NAME>",
cmd="(cat {} && echo 'jedi') > {} ".format(input_filepaths[0], os.path.join(directories["output"], "wisdom.txt")),
input_filepaths=input_filepaths,
output_filepaths=output_filepaths,
description = "Read the holocron",
errors_ok=False,
validate_inputs=True,
validate_outputs=True,
)
# Step 3
input_filepaths = [os.path.join(directories["output"], "holocron.txt"), os.path.join(directories["output"], "wisdom.txt")]
output_directory = create_directory(os.path.join(directories["output"], "temple"))
output_filepaths = [os.path.join(output_directory, "data-crystal.txt"), output_directory]
cmds = [
"(",
"mkdir -p {}".format(output_directory),
"&&",
"cat {} {} > {}".format(input_filepaths[0], input_filepaths[1], output_filepaths[0]),
")",
]
ep.add_step(id="<NAME>",
cmd=cmds,
input_filepaths=input_filepaths,
output_filepaths=output_filepaths,
description = "Move the data",
)
ep.compile()
ep.execute(restart_from_checkpoint=3)
# Directory structure
print("\n", format_header("Directory structure:","_"), "\n", get_directory_tree(directories["output"], ascii=True), sep="", file=sys.stdout)
if __name__ == "__main__":
main()
``` |
{
"source": "jolespin/hive_networkx",
"score": 2
} |
#### File: hive_networkx/hive_networkx/hive_networkx.py
```python
from __future__ import print_function, division
# Built-ins
from collections import OrderedDict, defaultdict
import sys, datetime, copy, warnings
# External
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.stats import entropy, mannwhitneyu
from scipy.spatial.distance import squareform, pdist
from itertools import combinations
# soothsayer_utils
from soothsayer_utils import assert_acceptable_arguments, is_symmetrical, is_graph, is_nonstring_iterable, dict_build, dict_filter, is_dict, is_dict_like, is_color, is_number, write_object, format_memory, format_header, check_packages
try:
from . import __version__
except ImportError:
__version__ = "ImportError: attempted relative import with no known parent package"
# ensemble_networkx
from ensemble_networkx import Symmetric, condensed_to_dense
# ==========
# Conversion
# ==========
# Polar to cartesian coordinates
def polar_to_cartesian(r, theta):
x = r * np.cos(theta)
y = r * np.sin(theta)
return(x, y)
# Cartesian to polar coordinates
def cartesian_to_polar(x, y):
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return(r, theta)
# =============
# Normalization
# =============
# Normalize MinMax
def normalize_minmax(x, feature_range=(0,1)):
"""
Adapted from the following source:
* https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html
"""
x_std = (x - x.min())/(x.max() - x.min())
return x_std * (feature_range[1] - feature_range[0]) + feature_range[0]
# =======================================================
# Hive
# =======================================================
class Hive(object):
def __init__(self, data, name=None, node_type=None, edge_type=None, axis_type=None, description=None, tol=1e-10):
"""
Hive plots for undirected networks
Hive plots:
Should only be used with 2-3 axis unless intelligently ordered b/c the arcs will overlap.
Notes:
* Does not store networkx graph to overuse memory just use .to_networkx as generate them in real time.
Usage:
import soothsayer_utils as syu
import ensemble_networkx ax enx
import hive_networkx as hx
# Load data
X, y, colors = syu.get_iris_data(["X", "y", "colors"])
n, m = X.shape
# Get association matrix (n,n)
method = "pearson"
df_sim = X.T.corr(method=method)
ratio = 0.382
number_of_edges = int((n**2 - n)/2)
number_of_edges_negative = int(ratio*number_of_edges)
# Make half of the edges negative to showcase edge coloring (not statistically meaningful at all)
for a, b in zip(np.random.RandomState(0).randint(low=0, high=149, size=number_of_edges_negative), np.random.RandomState(1).randint(low=0, high=149, size=number_of_edges_negative)):
if a != b:
df_sim.values[a,b] = df_sim.values[b,a] = df_sim.values[a,b]*-1
# Create a Symmetric object from the association matrix
sym_iris = enx.Symmetric(data=df_sim, node_type="<NAME>", edge_type=method, name="iris", association="network")
# ====================================
# Symmetric(Name:iris, dtype: float64)
# ====================================
# * Number of nodes (iris sample): 150
# * Number of edges (correlation): 11175
# * Association: network
# * Memory: 174.609 KB
# --------------------------------
# | Weights
# --------------------------------
# (iris_1, iris_0) 0.995999
# (iris_0, iris_2) 0.999974
# (iris_3, iris_0) 0.998168
# (iris_0, iris_4) 0.999347
# (iris_0, iris_5) 0.999586
# ...
# (iris_148, iris_146) 0.988469
# (iris_149, iris_146) 0.986481
# (iris_147, iris_148) 0.995708
# (iris_149, iris_147) 0.994460
# (iris_149, iris_148) 0.999916
# Create NetworkX graph from the Symmetric object
graph_iris = sym_iris.to_networkx()
# # Create Hive
hive = hx.Hive(graph_iris, axis_type="species")
# Organize nodes by species for each axis
number_of_query_nodes = 3
axis_nodes = OrderedDict()
for species, _y in y.groupby(y):
axis_nodes[species] = _y.index[:number_of_query_nodes]
# Make sure there each node is specific to an axis (not fastest way, easiest to understand)
nodelist = list()
for name_axis, nodes in axis_nodes.items():
nodelist += nodes.tolist()
assert pd.Index(nodelist).value_counts().max() == 1, "Each node must be on only one axis"
# Add axis for each species
node_styles = dict(zip(['setosa', 'versicolor', 'virginica'], ["o", "p", "D"]))
for name_axis, nodes in axis_nodes.items():
hive.add_axis(name_axis, nodes, sizes=150, colors=colors[nodes], split_axis=True, node_style=node_styles[name_axis])
hive.compile()
# ===============================
# Hive(Name:iris, dtype: float64)
# ===============================
# * Number of nodes (iris sample): 150
# * Number of edges (pearson): 11175
# * Axes (species): ['setosa', 'versicolor', 'virginica']
# * Memory: 174.609 KB
# * Compiled: True
# ---------------------------
# | Axes
# ---------------------------
# 0. setosa (3) [iris_0, iris_1, iris_2]
# 1. versicolor (3) [iris_50, iris_51, iris_52]
# 2. virginica (3) [iris_100, iris_101, iris_102]
# Plot Hive
color_negative, color_positive = ('#278198', '#dc3a23')
edge_colors = hive.weights.map(lambda w: {True:color_negative, False:color_positive}[w < 0])
legend = dict(zip(["Positive", "Negative"], [color_positive, color_negative]))
fig, axes = hive.plot(func_edgeweight=lambda w: (w**10), edge_colors=edge_colors, style="light", show_node_labels=True, title="Iris", legend=legend)
"""
# Placeholders
self.nodes_in_hive = None
self.edges_in_hive = None
self.weights = None
# self.graph = None
self.name = name
self.node_type = node_type
self.edge_type = edge_type
# Propogate
if isinstance(data, pd.DataFrame):
data = self._from_pandas_adjacency(data, name, node_type, edge_type, tol) # -> Symmetric
if isinstance(data, Symmetric):
self._from_symmetric(data, name, node_type, edge_type)
if all([
(self.nodes_in_hive is None),
(self.edges_in_hive is None),
(self.weights is None),
]):
assert is_graph(data), "`data` must be either a pd.DataFrame adjacency, a Symmetric, or a networkx graph object" # Last resort, use this if Symmetric isn't provided
self._from_networkx(data)
# Initialize
self.axes = OrderedDict()
self.node_mapping_ = OrderedDict()
self.compiled = False
self.axis_type = axis_type
self.description = description
self.version = __version__
self.number_of_nodes_ = None
self.memory = self.weights.memory_usage()
self.__synthesized__ = datetime.datetime.utcnow()
def _from_pandas_adjacency(self, data, name, node_type, edge_type, tol):
# Convert pd.DataFrame into a Symmetric object
assert isinstance(data, pd.DataFrame), "Must be a 2-dimensional pandas DataFrame object"
assert is_symmetrical(data, tol=tol), "DataFrame must be symmetrical. Please force symmetry with (X + X.T)/2"
return Symmetric(data=data, name=name, node_type=node_type, edge_type=edge_type, association="network", nans_ok=False, tol=tol)
def _from_symmetric(self, data, name, node_type, edge_type):
# Propogate information from Symmetric
if name is None:
self.name = data.name
if node_type is None:
self.node_type = data.node_type
if edge_type is None:
self.edge_type = data.edge_type
self.nodes_in_hive = data.nodes
self.edges_in_hive = data.edges
self.weights = data.weights
# return data.to_networkx()
def _from_networkx(self, graph):
# Propogate information from graph
for attr in ["name", "node_type", "edge_type"]:
if getattr(self, attr) is None:
if attr in graph.graph:
value =graph.graph[attr]
if bool(value):
setattr(self, attr, value)
# if self.graph is None:
# self.graph = graph
if self.nodes_in_hive is None:
self.nodes_in_hive = pd.Index(sorted(graph.nodes()))
if (self.edges_in_hive is None) or (self.weights is None):
self.weights = dict()
for edge_data in graph.edges(data=True):
edge = frozenset(edge_data[:-1])
weight = edge_data[-1]["weight"]
self.weights[edge] = weight
self.weights = pd.Series(self.weights, name="Weights")#.sort_index()
self.edges_in_hive = pd.Index(self.weights.index, name="Edges")
# Built-ins
def __repr__(self):
pad = 4
header = format_header("Hive(Name:{}, dtype: {})".format(self.name, self.weights.dtype),line_character="=")
n = len(header.split("\n")[0])
fields = [
header,
pad*" " + "* Number of nodes ({}): {}".format(self.node_type, len(self.nodes_in_hive)),
pad*" " + "* Number of edges ({}): {}".format(self.edge_type, len(self.edges_in_hive)),
pad*" " + "* Axes ({}): {}".format(self.axis_type, list(self.axes.keys())),
pad*" " + "* Memory: {}".format(format_memory(self.memory)),
pad*" " + "* Compiled: {}".format(self.compiled),
]
if self.compiled:
for field in map(lambda line:pad*" " + line, format_header("| Axes", "-", n=n-pad).split("\n")):
fields.append(field)
for field in map(lambda line: pad*" " + str(line), repr(self.axes_preview_).split("\n")[:-1]):
fields.append(field)
return "\n".join(fields)
def __call__(self, name_axis=None):
return self.get_axis_data(name_axis=name_axis)
# def __getitem__(self, key):
# return self.weights[key]
# Add axis to HivePlot
def add_axis(self, name_axis, nodes, sizes=None, colors=None, split_axis:bool=False, node_style="o", scatter_kws=dict()):
"""
Add or update axis
nodes: Can be either an iterable of nodes or a dict-like with node positions {node:position}
"""
# Initialize axis container
self.axes[name_axis] = defaultdict(dict)
self.axes[name_axis]["colors"] = None
self.axes[name_axis]["sizes"] = None
self.axes[name_axis]["split_axis"] = split_axis
self.axes[name_axis]["node_style"] = node_style
self.axes[name_axis]["scatter_kws"] = scatter_kws
# Assign (preliminary) node positions
if is_nonstring_iterable(nodes) and not isinstance(nodes, pd.Series):
nodes = pd.Series(np.arange(len(nodes)), index=nodes)
if is_dict(nodes):
nodes = pd.Series(nodes)
nodes = nodes.sort_values()
assert set(nodes.index) <= set(self.nodes_in_hive), "All nodes in axis should be in the Hive and they aren't..."
# Set values
self.axes[name_axis]["node_positions"] = pd.Series(nodes, name=(name_axis, "node_positions"))
self.axes[name_axis]["nodes"] = pd.Index(nodes.index, name=(name_axis, "nodes"))
self.axes[name_axis]["number_of_nodes"] = nodes.size
# Group node with axis
self.node_mapping_.update(dict_build([(name_axis, self.axes[name_axis]["nodes"])]))
# Assign component colors
if colors is None:
colors = "white"
if is_color(colors):
colors = dict_build([(colors, self.axes[name_axis]["nodes"])])
if is_dict(colors):
colors = pd.Series(colors)
if not is_color(colors):
if is_nonstring_iterable(colors) and not isinstance(colors, pd.Series):
colors = pd.Series(colors, index=self.axes[name_axis]["nodes"])
self.axes[name_axis]["colors"] = pd.Series(colors[self.axes[name_axis]["nodes"]], name=(name_axis, "node_colors"))
# Assign component sizes
if sizes is None:
sizes = 100
if is_number(sizes):
sizes = dict_build([(sizes, self.axes[name_axis]["nodes"])])
if is_dict(sizes):
sizes = pd.Series(sizes)
self.axes[name_axis]["sizes"] = pd.Series(sizes[nodes.index], name=(name_axis, "node_sizes"))
# Compile the data for plotting
def compile(self, axes_theta_degrees=None, split_theta_degree=None, inner_radius=None, theta_center=90, axis_normalize=True, axis_maximum=1000):
"""
inner_radius should be similar units to axis_maximum
"""
number_of_axes = len(self.axes)
if split_theta_degree is None:
split_theta_degree = (360/number_of_axes)*0.16180339887
self.split_theta_degree = split_theta_degree
self.axis_maximum = axis_maximum
if inner_radius is None:
if axis_normalize:
inner_radius = (1/5)*self.axis_maximum
else:
inner_radius = 3
self.inner_radius = inner_radius
self.outer_radius = self.axis_maximum - self.inner_radius
self.theta_center = theta_center
# Adjust all of the node_positions
for i, query_axis in enumerate(self.axes):
# If the axis is normalized, force everything between the minimum position and the `outer_radius` (that is, the axis_maximum - inner_radius. This ensures the axis_maximum is actually what is defined)
if axis_normalize:
node_positions = self.axes[query_axis]["node_positions"]
self.axes[query_axis]["node_positions_normalized"] = normalize_minmax(node_positions, feature_range=(min(node_positions), self.outer_radius) )
else:
self.axes[query_axis]["node_positions_normalized"] = self.axes[query_axis]["node_positions"].copy()
# Offset the node positions by the inner radius
self.axes[query_axis]["node_positions_normalized"] = self.axes[query_axis]["node_positions_normalized"] + self.inner_radius
# Axis thetas
if axes_theta_degrees is not None:
assert hasattr(axes_theta_degrees, "__iter__"), "`axes_theta_degrees` must be either None or an iterable of {} angles in degrees".format(number_of_axes)
assert len(axes_theta_degrees) == number_of_axes, "`axes_theta_degrees` must be either None or an iterable of {} angles in degrees".format(number_of_axes)
if axes_theta_degrees is None:
axes_theta_degrees = list()
for i in range(number_of_axes):
theta_add = (360/number_of_axes)*i
axes_theta_degrees.append(theta_add)
# Adjust all of the axes angles
for i, query_axis in enumerate(self.axes):
# If the axis is in single mode
theta_add = axes_theta_degrees[i] #(360/number_of_axes)*i
if not self.axes[query_axis]["split_axis"]:
# If the query axis is the first then the `theta_add` will be 0
self.axes[query_axis]["theta"] = np.array([self.theta_center + theta_add])
else:
self.axes[query_axis]["theta"] = np.array([self.theta_center + theta_add - split_theta_degree,
self.theta_center + theta_add + split_theta_degree])
self.axes[query_axis]["theta"] = np.deg2rad(self.axes[query_axis]["theta"])
self.axes_theta_degrees_ = dict(zip(self.axes.keys(), axes_theta_degrees))
# Nodes
self.nodes_ = list()
for axes_data in self.axes.values():
self.nodes_ += list(axes_data["nodes"])
assert len(self.nodes_) == len(set(self.nodes_)), "Axes cannot contain duplicate nodes"
self.number_of_nodes_ = len(self.nodes_)
# Edges
self.edges_ = list(map(frozenset, combinations(self.nodes_, r=2)))
self.number_of_edges_ = len(self.edges_)
# Axes
self.number_of_axes_ = number_of_axes
self.axes_preview_ = pd.Series(dict(zip(self.axes.keys(), map(lambda data:list(data["nodes"]), self.axes.values()))), name="Axes preview")
self.axes_preview_.index = self.axes_preview_.index.map(lambda name_axis: "{}. {} ({})".format(self.axes_preview_.index.get_loc(name_axis), name_axis, len(self.axes_preview_[name_axis])))
# Compile
self.compiled = True
def _get_quadrant_info(self, theta_representative):
# 0/360
if theta_representative in np.deg2rad([0,360]):
horizontalalignment = "left"
verticalalignment = "center"
quadrant = 0
# 90
if theta_representative == np.deg2rad(90):
horizontalalignment = "center"
verticalalignment = "bottom"
quadrant = 90
# 180
if theta_representative == np.deg2rad(180):
horizontalalignment = "right"
verticalalignment = "center"
quadrant = 180
# 270
if theta_representative == np.deg2rad(270):
horizontalalignment = "center"
verticalalignment = "top"
quadrant = 270
# Quadrant 1
if np.deg2rad(0) < theta_representative < np.deg2rad(90):
horizontalalignment = "left"
verticalalignment = "bottom"
quadrant = 1
# Quadrant 2
if np.deg2rad(90) < theta_representative < np.deg2rad(180):
horizontalalignment = "right"
verticalalignment = "bottom"
quadrant = 2
# Quadrant 3
if np.deg2rad(180) < theta_representative < np.deg2rad(270):
horizontalalignment = "right"
verticalalignment = "top"
quadrant = 3
# Quadrant 4
if np.deg2rad(270) < theta_representative < np.deg2rad(360):
horizontalalignment = "left"
verticalalignment = "top"
quadrant = 4
return quadrant, horizontalalignment, verticalalignment
def plot(self,
title=None,
# Arc style
arc_style="curved",
# Show components
show_axis=True,
show_nodes=True,
show_edges=True,
show_border = False,
show_axis_labels=True,
show_node_labels=False,
show_polar_grid=False,
show_cartesian_grid=False,
node_label_mapping=None,
# Colors
axis_color=None,
edge_colors=None,
background_color=None,
# Alphas
edge_alpha=0.5,
node_alpha=0.8,
axis_alpha=0.618,
# Keywords
title_kws=dict(),
axis_kws=dict(),
axis_label_kws=dict(),
node_label_kws=dict(),
node_label_line_kws=dict(),
node_kws=dict(),
edge_kws=dict(),
legend_kws=dict(),
legend_label_kws=dict(),
# Figure
style="dark",
edge_linestyle="-",
axis_linestyle="-",
node_label_linestyle=":",
legend_markerstyle="s",
legend=None,
# polar=True,
ax_polar=None,
ax_cartesian=None,
clip_edgeweight=5,
granularity=100,
func_edgeweight=None,
figsize=(10,10),
# Padding
pad_axis_label = "infer",
pad_node_label = 5,
# pad_node_label_line = 0,
# node_label_position_vertical_axis="right",
):
if node_label_mapping is None:
node_label_mapping = dict()
polar = True #! Address this in future versions
assert self.compiled == True, "Please `compile` before plotting"
accepted_arc_styles = {"curved", "linear"}
assert_acceptable_arguments(arc_style, accepted_arc_styles)
if arc_style == "linear":
granularity = 2
if style in ["dark", "black", "night", "sith"]:
style = "dark_background"
if style in ["light", "white", "day", "jedi"] :
style = "seaborn-white"
with plt.style.context(style):
# Create figure
if ax_polar is not None:
fig = plt.gcf()
figsize = fig.get_size_inches()
# Polar canvas
if ax_polar is None:
fig = plt.figure(figsize=figsize)
ax_polar = plt.subplot(111, polar=polar)
# Cartesian canvas
if ax_cartesian is None:
ax_cartesian = fig.add_axes(ax_polar.get_position(), frameon=False, polar=False)
if polar == True:
y = 0.95
if polar == False:
y = 1.1
# Remove clutter from plot
ax_polar.grid(show_polar_grid)
ax_polar.set_xticklabels([])
ax_polar.set_yticklabels([])
ax_cartesian.grid(show_cartesian_grid)
ax_cartesian.set_xticklabels([])
ax_cartesian.set_yticklabels([])
if not show_border: # Not using ax.axis('off') becuase it removes facecolor
for spine in ax_polar.spines.values():
spine.set_visible(False)
for spine in ax_cartesian.spines.values():
spine.set_visible(False)
node_padding = " "*pad_node_label
# Default colors
if axis_color is None:
if style == "dark_background":
axis_color = "white"
axis_label_color = "white"
else:
axis_color = "darkslategray"
axis_label_color = "black"
if background_color is not None:
ax_polar.set_facecolor(background_color)
ax_cartesian.set_facecolor(background_color)
# Title
_title_kws = {"fontweight":"bold", "y":y}
_title_kws.update(title_kws)
if "fontsize" not in _title_kws:
_title_kws["fontsize"] = figsize[0] * np.sqrt(figsize[0])/2 + 2
# Axis labels
_axis_label_kws = {"fontweight":None, "color":axis_label_color}
_axis_label_kws.update(axis_label_kws)
if "fontsize" not in _axis_label_kws:
_axis_label_kws["fontsize"] = figsize[0] * np.sqrt(figsize[0])/2
# Node labels
_node_label_kws = {"fontsize":12}
_node_label_kws.update(node_label_kws)
_node_label_line_kws = {"linestyle":node_label_linestyle, "color":axis_color}
_node_label_line_kws.update(node_label_line_kws)
# Axis plotting
_axis_kws = {"linewidth":3.382, "alpha":axis_alpha, "color":axis_color, "linestyle":axis_linestyle, "zorder":0}
_axis_kws.update(axis_kws)
# Edge plotting
_edge_kws = {"alpha":edge_alpha, "linestyle":edge_linestyle} # "zorder", _node_kws["zorder"]+1}
_edge_kws.update(edge_kws)
# Node plotting
_node_kws = {"linewidth":1.618, "edgecolor":axis_color, "alpha":node_alpha,"zorder":2}
_node_kws.update(node_kws)
# Legend plotting
_legend_label_kws = {"marker":legend_markerstyle, "markeredgecolor":axis_color, "markeredgewidth":1, "linewidth":0}
_legend_label_kws.update(legend_label_kws)
_legend_kws = {'fontsize': 15, 'frameon': True, 'facecolor': background_color, 'edgecolor': axis_color, 'loc': 'center left', 'bbox_to_anchor': (1.1, 0.5), "markerscale":1.6180339887}
_legend_kws.update(legend_kws)
# Edge info
edges = self.weights[self.edges_].abs()
if func_edgeweight is not None:
edges = func_edgeweight(edges)
if clip_edgeweight is not None:
edges = np.clip(edges, a_min=None, a_max=clip_edgeweight)
if edge_colors is None:
edge_colors = axis_color
if is_color(edge_colors):
edge_colors = dict_build([(edge_colors, edges.index)])
if is_dict(edge_colors):
edge_colors = pd.Series(edge_colors)
if not is_color(edge_colors):
if is_nonstring_iterable(edge_colors) and not isinstance(edge_colors, pd.Series):
edge_colors = pd.Series(edge_colors, index=edges.index)
edge_colors = pd.Series(edge_colors[edges.index], name="edge_colors").to_dict()
# Axes label pads
if pad_axis_label is None:
pad_axis_label = 0
if pad_axis_label == "infer":
pad_axis_label = list()
for i, (name_axis, axes_data) in enumerate(self.axes.items()):
node_positions = axes_data["node_positions_normalized"]
pad_axis_label.append(0.06180339887*(node_positions.max() - node_positions.min()))
if isinstance(pad_axis_label, (int,float)):
pad_axis_label = [pad_axis_label]*self.number_of_axes_
assert hasattr(pad_axis_label, "__iter__"), "`pad_axis_label` must be either None, 'infer', a scalar, or an iterable of {} pads".format(self.number_of_axes_)
assert len(pad_axis_label) == self.number_of_axes_, "`pad_axis_label` must be either None, 'infer', a scalar, or an iterable of {} pads".format(self.number_of_axes_)
# ================
# Plot edges
# ================
# Draw edges
if show_edges:
for (edge, weight) in edges.iteritems():
if abs(weight) > 0:
node_A, node_B = edge
name_axis_A = self.node_mapping_[node_A]
name_axis_B = self.node_mapping_[node_B]
# Check axis
intraaxis_edge = (name_axis_A == name_axis_B)
# Within axis edges
if intraaxis_edge:
name_consensus_axis = name_axis_A
# Plot edges on split axis
if self.axes[name_consensus_axis]["split_axis"]:
# print(type(edge), edge, edge in edge_colors)
color = edge_colors[edge]
# Draw edges between same axis
# Node A -> B
ax_polar.plot([*self.axes[name_consensus_axis]["theta"]], # Unpack
[self.axes[name_consensus_axis]["node_positions_normalized"][node_A], self.axes[name_consensus_axis]["node_positions_normalized"][node_B]],
c=color,
linewidth=weight,
**_edge_kws,
)
# Node B -> A
ax_polar.plot([*self.axes[name_consensus_axis]["theta"]], # Unpack
[self.axes[name_consensus_axis]["node_positions_normalized"][node_B], self.axes[name_consensus_axis]["node_positions_normalized"][node_A]],
c=color,
linewidth=weight,
**_edge_kws,
)
# Between axis
if not intraaxis_edge:
axes_ordered = list(self.axes.keys())
terminal_axis_edge = False
# Last connected to the first
if (name_axis_A == axes_ordered[-1]):
if (name_axis_B == axes_ordered[0]):
thetas = [self.axes[name_axis_A]["theta"].max(), self.axes[name_axis_B]["theta"].min()]
radii = [self.axes[name_axis_A]["node_positions_normalized"][node_A], self.axes[name_axis_B]["node_positions_normalized"][node_B]]
terminal_axis_edge = True
# First connected to the last
if (name_axis_A == axes_ordered[0]):
if (name_axis_B == axes_ordered[-1]):
thetas = [self.axes[name_axis_B]["theta"].max(), self.axes[name_axis_A]["theta"].min()]
radii = [self.axes[name_axis_B]["node_positions_normalized"][node_B], self.axes[name_axis_A]["node_positions_normalized"][node_A]]
terminal_axis_edge = True
if not terminal_axis_edge:
if axes_ordered.index(name_axis_A) < axes_ordered.index(name_axis_B):
thetas = [self.axes[name_axis_A]["theta"].max(), self.axes[name_axis_B]["theta"].min()]
if axes_ordered.index(name_axis_A) > axes_ordered.index(name_axis_B):
thetas = [self.axes[name_axis_A]["theta"].min(), self.axes[name_axis_B]["theta"].max()]
radii = [self.axes[name_axis_A]["node_positions_normalized"][node_A], self.axes[name_axis_B]["node_positions_normalized"][node_B]]
# Radii node positions
#
# Necessary to account for directionality of edge.
# If this doesn't happen then there is a long arc
# going counter clock wise instead of clockwise
# If straight lines were plotted then it would be thetas and radii before adjusting for the curve below
if terminal_axis_edge:
theta_end_rotation = thetas[0]
theta_next_rotation = thetas[1] + np.deg2rad(360)
thetas = [theta_end_rotation, theta_next_rotation]
# Create grid for thetas
t = np.linspace(start=thetas[0], stop=thetas[1], num=granularity)
# Get radii for thetas
radii = interp1d(thetas, radii)(t)
thetas = t
ax_polar.plot(thetas,
radii,
c=edge_colors[edge],
linewidth=weight,
**_edge_kws,
)
# ===================
# Plot axis and nodes
# ===================
for i, (name_axis, axes_data) in enumerate(self.axes.items()):
# Retrieve
node_positions = axes_data["node_positions_normalized"]
colors = axes_data["colors"].tolist() # Needs `.tolist()` for Matplotlib version < 2.0.0
sizes = axes_data["sizes"].tolist()
# Positions
# =========
# Get a theta value for each node on the axis
if not axes_data["split_axis"]:
theta_single = np.repeat(axes_data["theta"][0], repeats=node_positions.size)
theta_vectors = [theta_single]
# Split the axis so within axis interactions can be visualized
if axes_data["split_axis"]:
theta_split_A = np.repeat(axes_data["theta"][0], repeats=node_positions.size)
theta_split_B = np.repeat(axes_data["theta"][1], repeats=node_positions.size)
theta_vectors = [theta_split_A, theta_split_B]
theta_representative = np.mean(axes_data["theta"])
# Quadrant
# =======
quadrant, horizontalalignment, verticalalignment = self._get_quadrant_info(theta_representative)
# Plot axis
# =========
if show_axis:
for theta in axes_data["theta"]:
ax_polar.plot(
2*[theta],
[min(node_positions), max(node_positions)],
**_axis_kws,
)
# Plot axis labels
# ================
if show_axis_labels:
ax_polar.text(
s = name_axis,
x = theta_representative,
y = node_positions.size + node_positions.max() + pad_axis_label[i],
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
**_axis_label_kws,
)
# Plot nodes
# ========
if show_nodes:
for theta in theta_vectors:
# Filled
ax_polar.scatter(
theta,
node_positions,
c=axes_data["colors"],
s=axes_data["sizes"],
marker=axes_data["node_style"],
**_node_kws,
)
# Empty
ax_polar.scatter(
theta,
node_positions,
facecolors='none',
s=axes_data["sizes"],
marker=axes_data["node_style"],
alpha=1,
zorder=_node_kws["zorder"]+1,
# zorder=-1,
edgecolor=_node_kws["edgecolor"],
linewidth=_node_kws["linewidth"],
)
# Plot node labels
# ================
index_labels = node_positions.index
if is_nonstring_iterable(show_node_labels):
index_labels = pd.Index(show_node_labels) & index_labels
show_node_labels = True
if show_node_labels:
if not polar:
warnings.warn("`show_node_labels` is not available in version: {}".format(__version__))
else:
horizontalalignment_nodelabels = None
for name_node, r in node_positions[index_labels].iteritems():
#! Address this in future version
# # Vertical axis case
# vertical_axis_left = (quadrant in {90,270}) and (node_label_position_vertical_axis == "left")
# vertical_axis_right = (quadrant in {90,270}) and (node_label_position_vertical_axis == "right")
# if vertical_axis_left:
# horizontalalignment_nodelabels = "right" # These are opposite b/c nodes should be on the left which means padding on the right
# if vertical_axis_right:
# horizontalalignment_nodelabels = "left" # Vice versa
# Pad on the right and push label to left
# if (quadrant == 3) or vertical_axis_left:
# node_label = "{}{}".format(name_node,node_padding)
# theta_anchor_padding = max(axes_data["theta"])
# # Pad on left and push label to the right
# if (quadrant == 4) or vertical_axis_right:
# node_label = "{}{}".format(node_padding,name_node)
# theta_anchor_padding = min(axes_data["theta"])
# theta_anchor is where the padding ends up
# Relabel node
name_node = node_label_mapping.get(name_node, name_node)
# Pad on the right and push label to left
if quadrant in {2,3, 180} :
node_label = "{}{}".format(name_node,node_padding)
theta_anchor_padding = max(axes_data["theta"])
x, y = polar_to_cartesian(r, theta_anchor_padding)
xs_line = [-self.axis_maximum, x]
x_text = -self.axis_maximum
horizontalalignment_nodelabels = "right"
# Pad on the right and push label to left
if quadrant in {0, 1,4, 90, 270} :
node_label = "{}{}".format(node_padding,name_node)
theta_anchor_padding = min(axes_data["theta"])
x, y = polar_to_cartesian(r, theta_anchor_padding)
xs_line = [x, self.axis_maximum]
x_text = self.axis_maximum
horizontalalignment_nodelabels = "left"
# Node label line
ax_cartesian.plot(
xs_line,
[y, y],
**_node_label_line_kws,
)
if all([
not axes_data["split_axis"],
quadrant in {0,180},
]):
warnings.warn("Cannot plot node labels when axis is not split for angles 0 or 180 in version: {}".format(__version__))
else:
# Node label text
ax_cartesian.text(
x=x_text,
y=y,
s=node_label,
horizontalalignment=horizontalalignment_nodelabels,
verticalalignment="center",
**_node_label_kws,
)
# Adjust limits
# ===========
r_max = max(ax_polar.get_ylim())
if title is not None:
fig.suptitle(title, **_title_kws)
ax_cartesian.set_xlim(-r_max, r_max)
ax_cartesian.set_ylim(-r_max, r_max)
# Plot Legend
# ===========
if legend is not None:
assert is_dict_like(legend), "`legend` must be dict-like"
handles = list()
for label, color in legend.items():
handle = plt.Line2D([0,0],[0,0], color=color, **_legend_label_kws)
handles.append(handle)
ax_cartesian.legend(handles, legend.keys(), **_legend_kws)
return fig, [ax_polar, ax_cartesian]
# Axis data
def get_axis_data(self, name_axis=None, field=None):
if name_axis is None:
print("Available axes:", set(self.axes.keys()), file=sys.stderr)
else:
assert name_axis in self.axes, "{} is not in the axes".format(name_axis)
df = pd.DataFrame(dict_filter(self.axes[name_axis], ["colors", "sizes", "node_positions", "node_positions_normalized"]))
if self.compiled:
df["theta"] = [self.axes[name_axis]["theta"]]*df.shape[0]
df.index.name = name_axis
if field is not None:
return df[field]
else:
return df
# Connections
def get_axis_connections(self, name_axis=None, sort_by=None, ascending=False, return_multiindex=False):
assert self.compiled == True, "Please `compile` before getting connections"
if name_axis is not None:
assert name_axis in self.axes, "{} is not in the available axes for `name_axis`. Please add and recompile or choose one of the available axes:\n{}".format(name_axis, list(self.axes.keys()))
df_dense = condensed_to_dense(self.weights, index=self.nodes_)
df_connections = df_dense.groupby(self.node_mapping_, axis=1).sum()
if name_axis is not None:
idx_axis_nodes = self.axes[name_axis]["nodes"]
df_connections = df_connections.loc[idx_axis_nodes,:]
df_connections.index.name = name_axis
if sort_by is not None:
assert sort_by in self.axes, f"{sort_by} is not in the available axes for `sort_by`. Please add and recompile or choose one of the available axes:\n{self.axes.keys()}"
df_connections = df_connections.sort_values(by=sort_by, axis=0, ascending=ascending)
if return_multiindex:
df_connections.index = pd.MultiIndex.from_tuples(df_connections.index.map(lambda id_node: (self.node_mapping_[id_node], id_node)))
return df_connections
# Stats
# =====
def compare(self, data, func_stats=mannwhitneyu, name_stat=None, tol=1e-10):
"""
Compare the connections between 2 Hives or adjacencies using the specified axes assignments.
"""
assert self.compiled == True, "Please `compile` before comparing adjacencies"
assert_acceptable_arguments(type(data), {pd.DataFrame, Symmetric, Hive})
if isinstance(data, (Hive, Symmetric)):
df_dense__query = condensed_to_dense(data.weights)
if isinstance(data, pd.DataFrame):
assert is_symmetric(data, tol=tol)
df_dense__query = data
assert set(self.nodes_) <= set(df_dense__query.index), "`data` must contain all nodes from reference Hive"
df_dense__reference = self.to_dense()
d_statsdata = OrderedDict()
# Get nodes
d_statsdata = OrderedDict()
for id_node in df_dense__reference.index:
# Get axis groups
stats_axes_data = list()
for name_axis in self.axes:
idx_axis_nodes = self.axes[name_axis]["nodes"]
n = self.axes[name_axis]["number_of_nodes"]
# Get comparison data
u = df_dense__reference.loc[id_node,idx_axis_nodes]
v = df_dense__query.loc[id_node,idx_axis_nodes]
# Get stats
stat, p = func_stats(u,v)
if name_stat is None:
if hasattr(func_stats, "__name__"):
name_stat = func_stats.__name__
else:
name_stat = str(func_stats)
# Store data
row = pd.Series(OrderedDict([
((name_axis, "number_of_nodes"), n),
((name_axis, "∑(reference)"), u.sum()),
((name_axis, "∑(query)"), v.sum()),
((name_axis, name_stat), stat),
((name_axis, "p_value"), p)
]))
stats_axes_data.append(row)
# Build pd.DataFrame
d_statsdata[id_node] = pd.concat(stats_axes_data)
return pd.DataFrame(d_statsdata).T
# Exports
# =======
def to_networkx(self, into=None, **attrs):
if into is None:
into = nx.Graph
metadata = { "node_type":self.node_type, "edge_type":self.edge_type}
metadata.update(attrs)
graph = into(name=self.name, **metadata)
for (node_A, node_B), weight in self.weights.iteritems():
graph.add_edge(node_A, node_B, weight=weight)
return graph
def to_symmetric(self, nodes=None, **symmetric_kws):
_symmetric_kws = dict(node_type=self.node_type, edge_type=self.edge_type, association="network", name=self.name)
_symmetric_kws.update(symmetric_kws)
if nodes is not None:
assert set(nodes) <= set(self.nodes_in_hive), "Not all `nodes` available in Hive"
edges = list(combinations(nodes, r=2))
weights = self.weights[edges]
else:
weights = self.weights
return Symmetric(weights, **_symmetric_kws)
def to_file(self, path:str, compression="infer"):
write_object(self, path=path, compression=compression)
return self
def to_dense(self, nodes=None, fill_diagonal=np.nan):
if nodes is not None:
assert set(nodes) <= set(self.nodes_in_hive), "Not all `nodes` available in Hive"
else:
nodes = self.nodes_in_hive
return condensed_to_dense(self.weights, index=nodes, fill_diagonal=fill_diagonal)
def copy(self):
return copy.deepcopy(self)
``` |
{
"source": "jolespin/scikit-bio",
"score": 3
} |
#### File: stats/distance/_permanova.py
```python
from functools import partial
import numpy as np
from ._base import (_preprocess_input_sng, _run_monte_carlo_stats,
_build_results, DistanceMatrix)
from skbio.util._decorator import experimental
from ._cutils import permanova_f_stat_sW_cy
@experimental(as_of="0.4.0")
def permanova(distance_matrix, grouping, column=None, permutations=999):
"""Test for significant differences between groups using PERMANOVA.
Permutational Multivariate Analysis of Variance (PERMANOVA) is a
non-parametric method that tests whether two or more groups of objects
(e.g., samples) are significantly different based on a categorical factor.
It is conceptually similar to ANOVA except that it operates on a distance
matrix, which allows for multivariate analysis. PERMANOVA computes a
pseudo-F statistic.
Statistical significance is assessed via a permutation test. The assignment
of objects to groups (`grouping`) is randomly permuted a number of times
(controlled via `permutations`). A pseudo-F statistic is computed for each
permutation and the p-value is the proportion of permuted pseudo-F
statisics that are equal to or greater than the original (unpermuted)
pseudo-F statistic.
Parameters
----------
distance_matrix : DistanceMatrix
Distance matrix containing distances between objects (e.g., distances
between samples of microbial communities).
grouping : 1-D array_like or pandas.DataFrame
Vector indicating the assignment of objects to groups. For example,
these could be strings or integers denoting which group an object
belongs to. If `grouping` is 1-D ``array_like``, it must be the same
length and in the same order as the objects in `distance_matrix`. If
`grouping` is a ``DataFrame``, the column specified by `column` will be
used as the grouping vector. The ``DataFrame`` must be indexed by the
IDs in `distance_matrix` (i.e., the row labels must be distance matrix
IDs), but the order of IDs between `distance_matrix` and the
``DataFrame`` need not be the same. All IDs in the distance matrix must
be present in the ``DataFrame``. Extra IDs in the ``DataFrame`` are
allowed (they are ignored in the calculations).
column : str, optional
Column name to use as the grouping vector if `grouping` is a
``DataFrame``. Must be provided if `grouping` is a ``DataFrame``.
Cannot be provided if `grouping` is 1-D ``array_like``.
permutations : int, optional
Number of permutations to use when assessing statistical
significance. Must be greater than or equal to zero. If zero,
statistical significance calculations will be skipped and the p-value
will be ``np.nan``.
Returns
-------
pandas.Series
Results of the statistical test, including ``test statistic`` and
``p-value``.
See Also
--------
anosim
Notes
-----
See [1]_ for the original method reference, as well as ``vegan::adonis``,
available in R's vegan package [2]_.
The p-value will be ``np.nan`` if `permutations` is zero.
References
----------
.. [1] Anderson, <NAME>. "A new method for non-parametric multivariate
analysis of variance." Austral Ecology 26.1 (2001): 32-46.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
Examples
--------
See :mod:`skbio.stats.distance.anosim` for usage examples (both functions
provide similar interfaces).
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Input must be a DistanceMatrix.")
sample_size = distance_matrix.shape[0]
num_groups, grouping = _preprocess_input_sng(
distance_matrix.ids, sample_size, grouping, column)
# Calculate number of objects in each group.
group_sizes = np.bincount(grouping)
s_T = (distance_matrix[:] ** 2).sum() / sample_size
# we are going over the whole matrix, instead of just upper triangle
# so cut in half
s_T /= 2.0
test_stat_function = partial(_compute_f_stat, sample_size, num_groups,
distance_matrix, group_sizes, s_T)
stat, p_value = _run_monte_carlo_stats(test_stat_function, grouping,
permutations)
return _build_results('PERMANOVA', 'pseudo-F', sample_size, num_groups,
stat, p_value, permutations)
def _compute_f_stat(sample_size, num_groups, distance_matrix, group_sizes,
s_T, grouping):
"""Compute PERMANOVA pseudo-F statistic."""
# Calculate s_W for each group, accounting for different group sizes.
s_W = permanova_f_stat_sW_cy(distance_matrix.data,
group_sizes, grouping)
s_A = s_T - s_W
return (s_A / (num_groups - 1)) / (s_W / (sample_size - num_groups))
``` |
{
"source": "joleuger/audioserver",
"score": 3
} |
#### File: install-files/install/control_mopidy_mpc.py
```python
mpcPrefix= "mpc -h [email protected] "
# mpc -h mopidy@localhost repeat on
# mpc -h mopidy@localhost stop
# mpc -h mopidy@localhost add "file:///path/to/music.mp3"
# mpc -h mopidy@localhost clear
# mpc -h mopidy@localhost play
import pathlib
import os
import glob
import subprocess
def readm3u(m3ufilepath):
basedir=os.path.dirname(m3ufilepath)
m3ufile = open(m3ufilepath,'r')
firstLine=m3ufile.readline()
playlist = []
for entry in m3ufile:
entry=entry.strip()
if entry.startswith('#EXTM3U'):
pass
elif entry.startswith('#EXTINF:'):
pass
elif (len(entry) != 0):
if os.path.isabs(entry):
playlist.append(entry)
else:
newpath=os.path.join(basedir,entry)
playlist.append(newpath)
m3ufile.close()
return playlist
#files = glob.glob("/path/to/music/*mp3")
files = readm3u("/path/to/music/playlist..m3u")
subprocess.call(mpcPrefix+"repeat on", shell=True)
subprocess.call(mpcPrefix+"stop", shell=True)
subprocess.call(mpcPrefix+"clear", shell=True)
for file in files:
absFile=os.path.abspath(file)
uri=pathlib.Path(absFile).as_uri()
subprocess.call(mpcPrefix+"add "+uri, shell=True)
subprocess.call(mpcPrefix+"play", shell=True)
``` |
{
"source": "joleuger/bluetooth-monitor",
"score": 2
} |
#### File: joleuger/bluetooth-monitor/core.py
```python
from pydbus import SessionBus
from pydbus import SystemBus
import asyncio, gbulb
from gi.repository.GLib import GError
#from hbmqtt.client import MQTTClient, ClientException
import paho.mqtt.client as mqtt
import subprocess
import os
import signal
import re
class BluetoothAudioBridge:
def __init__(self, loop):
self.loop = loop
self.DbusPulseAudioPath=""
self.DbusBluezOnSystemBus=True
self.DbusBluezBusName="org.bluez"
self.DbusBluezObjectPath="/org/bluez/hci0"
self.DbusBluezObject=None
self.DbusBluezReceivingFuture=None
self.DbusBluezDiscoveredDevices={}
self.DbusBluezUUIDsOfDevices={}
self.DbusBluezConnectedDevices={}
self.MqttPath="/BluetoothAudioBridge"
self.MqttServer="localhost"
self.MqttUsername="vhost:username"
self.MqttPassword="password"
self.MqttClient=None
self.MqttMessageQueue=asyncio.Queue()
self.MqttReceivingFuture=None
self.Continue=True
self.CancellationToken=self.loop.create_future()
self.TraceLevel=0
self.PollingCycle=3
self.mqttReceivedConnect=self.makeConnect
self.mqttReceivedPairAndTrust=self.makePairAndTrust
self.mqttReceivedScan=self.makeScan
self.dbusBtDeviceDetected=self.btDeviceDetected
self.dbusBtDeviceRemoved=self.btDeviceRemoved
self.dbusBtDeviceConnected=self.btDeviceConnected
self.dbusBtDeviceDisconnected=self.btDeviceDisconnected
self.dbusScanProcesses=0
self.btDeviceConfig = {}
self.btRunningProcesses = {}
def loadConfig(self,appConfig):
self.TraceLevel=appConfig["traceLevel"]
self.PollingCycle=appConfig["pollingCycle"]
self.btDeviceConfig = appConfig["bluetoothDevices"]
def trace(self,level,msg):
if self.TraceLevel >= level:
print(msg)
async def awaitOrStop(self,future):
# currently unused
done,pending = await asyncio.wait([self.CancellationToken, future],return_when=asyncio.FIRST_COMPLETED)
firstFinished=next(iter(done))
if firstFinished==self.CancellationToken:
#Note: pending tasks are still running
return (False,None)
#print(firstFinished)
#print(firstFinished.result())
return (True,firstFinished.result())
def makeConnect(self,message):
self.trace(0,"MQTT: received connect")
def makePairAndTrust(self,message):
self.trace(0,"MQTT: received pair and trust")
def makeScan(self,message):
self.scanProcesses=self.scanProcesses+1
self.trace(0,"MQTT: received scan")
asyncio.ensure_future(self.stopScanningIn30Seconds)
async def stopScanningIn30Seconds(self):
await asyncio.sleep(30)
self.scanProcesses=self.scanProcesses-1
if (self.scanProcesses==0):
self.trace(2,"stop scanning for devices")
async def mqttProcessMessages(self):
while self.Continue:
message=await self.MqttMessageQueue.get()
if message==None:
self.trace(0,"stopping message proccessing")
return
self.trace(1,"MQTT: received message")
if message.startswith("Connect"):
self.mqttReceivedConnect(message)
if message.startswith("Pair and trust"):
self.mqttReceivedConnect(message)
if message.startswith("Scan"):
self.mqttReceivedScan(message)
async def registerMqtt(self):
def on_connect(client, userdata, flags, rc):
self.trace(0,"Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("/BluetoothAudioBridge/commands")
def on_message(client, userdata, msg):
self.trace(1,msg.topic+" "+str(msg.payload))
msgDecoded=msg.payload.decode("utf-8")
asyncio.ensure_future(self.MqttMessageQueue.put(msgDecoded))
async def mqttReceiving():
while self.Continue:
self.trace(3,"MQTT: wait for message")
client.loop_read()
client.loop_write()
client.loop_misc()
await asyncio.sleep(0.1)
client.disconnect()
client.loop_read()
client.loop_write()
client.loop_misc()
self.MqttReceivingFuture.set_result(True)
asyncio.ensure_future(self.MqttMessageQueue.put(None)) # add final (empty) message into queue for a clean shutdown
def on_disconnect(client, userdata, rc):
if rc != 0:
self.trace(0,"Unexpected disconnection.")
client = mqtt.Client(client_id="thing-bluetoothbridge",)
client.on_connect = on_connect
client.on_message = on_message
client.on_disconnect = on_disconnect
client.username_pw_set(self.MqttUsername, password=self.MqttPassword)
client.connect(self.MqttServer, 1883, 60)
#register receiver
self.MqttReceivingFuture=self.loop.create_future()
asyncio.ensure_future(self.mqttProcessMessages())
asyncio.ensure_future(mqttReceiving())
self.trace(0,"registered on MQTT")
async def btDeviceDetected(self,address):
self.trace(0,"device detected "+address)
async def btDeviceRemoved(self,address):
self.trace(0,"device removed "+address)
def btClassIsAudio(self,btClass):
# https://www.bluetooth.com/specifications/assigned-numbers/baseband
major_service_audio_bit = 1<<21
major_device_audio_bit = 1<<10
is_audio_service = (major_service_audio_bit & btClass)==major_service_audio_bit
is_audio_device = (major_device_audio_bit & btClass)==major_device_audio_bit
return is_audio_service and is_audio_device
def btDeviceHasA2DPSink(self,uuids):
# https://github.com/pauloborges/bluez/blob/master/lib/uuid.h
if "0000110b-0000-1000-8000-00805f9b34fb" in uuids:
return True
return False
def stdoutOfPopen(self):
if self.TraceLevel < 3:
return subprocess.DEVNULL
return None
async def btDeviceConnected(self,address):
self.trace(0,"device connected "+address)
if address in self.btRunningProcesses:
processGroupToKill=self.btRunningProcesses[address].pid
os.killpg(os.getpgid(processGroupToKill), signal.SIGTERM)
await asyncio.sleep(1)
os.killpg(os.getpgid(processGroupToKill), signal.SIGKILL)
self.btRunningProcesses.pop(address,None)
deviceConfig=None
if address in self.btDeviceConfig:
deviceConfig = self.btDeviceConfig[address]
else:
uuids=self.DbusBluezUUIDsOfDevices[address]
if self.btDeviceHasA2DPSink(uuids) and "other_a2dp_sinks" in self.btDeviceConfig:
deviceConfig=self.btDeviceConfig["other_a2dp_sinks"]
if deviceConfig!=None:
if "onConnectCommand" in deviceConfig:
command=deviceConfig["onConnectCommand"]
if command:
commandToExecute=command.replace("$DEVICE",address)
self.btRunningProcesses[address]=subprocess.Popen(commandToExecute,shell=True, start_new_session=True,stdout=self.stdoutOfPopen(),stderr=self.stdoutOfPopen())
async def btDeviceDisconnected(self,address):
self.trace(0,"device disconnected "+address)
if address in self.btRunningProcesses:
processGroupToKill=self.btRunningProcesses[address].pid
os.killpg(os.getpgid(processGroupToKill), signal.SIGTERM)
await asyncio.sleep(1)
os.killpg(os.getpgid(processGroupToKill), signal.SIGKILL)
self.btRunningProcesses.pop(address,None)
deviceConfig=None
if address in self.btDeviceConfig:
deviceConfig = self.btDeviceConfig[address]
else:
uuids=self.DbusBluezUUIDsOfDevices[address]
if self.btDeviceHasA2DPSink(uuids) and "other_a2dp_sinks" in self.btDeviceConfig:
deviceConfig=self.btDeviceConfig["other_a2dp_sinks"]
if deviceConfig!=None:
if "onDisconnectCommand" in deviceConfig:
command=deviceConfig["onDisconnectCommand"]
if command:
commandToExecute=command.replace("$DEVICE",address)
self.btRunningProcesses[address]=subprocess.Popen(commandToExecute,shell=True, start_new_session=True,stdout=self.stdoutOfPopen(),stderr=self.stdoutOfPopen())
async def lookForDbusChanges(self):
deviceFilter = re.compile("^[/]\w+[/]\w+[/]\w+[/]dev_(?P<btmac>\w+)$")
while self.Continue:
self.trace(3,"DBUS: wait for device")
try:
self.trace(1,"DBUS: GetManagedObjects()")
managedObjects = await self.loop.run_in_executor(None, lambda: self.DbusBluezRootNode.GetManagedObjects())
await asyncio.sleep(0.5) # give PulseAudio a chance of connecting (not sure if necessary)
foundDevices={}
for objPath,obj in managedObjects.items():
match = deviceFilter.match(objPath)
if match:
btmac=match.group("btmac")
dev=obj[self.DbusBluezBusName+".Device1"]
foundDevices[btmac]=dev
self.trace(3,"Found "+str(len(foundDevices))+" devices")
removeDevices=[]
for oldDevice in self.DbusBluezDiscoveredDevices:
if oldDevice not in foundDevices:
removeDevices.append(oldDevice)
await self.dbusBtDeviceRemoved(oldDevice)
for removeDevice in removeDevices:
self.DbusBluezDiscoveredDevices.pop(removeDevice,None)
for foundDevice in foundDevices:
if foundDevice not in self.DbusBluezDiscoveredDevices:
self.DbusBluezDiscoveredDevices[foundDevice]=True
await self.dbusBtDeviceDetected(foundDevice)
# now check disconnect <-> connect
connectedDevices = {}
for foundDevice,dev in foundDevices.items():
if foundDevice not in self.DbusBluezUUIDsOfDevices:
self.DbusBluezUUIDsOfDevices[foundDevice] = dev["UUIDs"]
isConnected = dev["Connected"]
if isConnected :
connectedDevices[foundDevice]=True
disconnectedDevices=[]
for alreadyConnectedDevice in self.DbusBluezConnectedDevices:
if alreadyConnectedDevice not in connectedDevices:
disconnectedDevices.append(alreadyConnectedDevice)
await self.dbusBtDeviceDisconnected(alreadyConnectedDevice)
for disconnectedDevice in disconnectedDevices:
self.DbusBluezConnectedDevices.pop(disconnectedDevice,None)
for connectedDevice in connectedDevices:
if connectedDevice not in self.DbusBluezConnectedDevices:
self.DbusBluezConnectedDevices[connectedDevice]=True
await self.dbusBtDeviceConnected(connectedDevice)
except KeyError as err:
self.trace(0,"dbus error (KeyError)")
print(err)
self.trace(0,err)
except GError as err:
self.trace(0,"dbus error (GError)")
self.trace (0,err)
await asyncio.sleep(self.PollingCycle)
print("finished looking for dbus changes")
self.DbusBluezReceivingFuture.set_result(True)
async def registerDbus(self):
try:
if self.DbusBluezOnSystemBus:
self.DbusBluezObject = SystemBus()
else:
self.DbusBluezObject = SessionBus()
self.trace(0,"listening on D-BUS")
self.DbusBluezRootNode = self.DbusBluezObject.get(self.DbusBluezBusName,"/")
self.trace(0,"connected to org.bluez")
except GError as err:
self.trace(0,"dbus error (register)")
self.trace (0,err)
self.DbusBluezRootNode=None
if self.DbusBluezRootNode:
self.DbusBluezReceivingFuture=self.loop.create_future()
asyncio.ensure_future(self.lookForDbusChanges())
async def register(self):
await self.registerMqtt()
await self.registerDbus()
async def unregister(self):
self.Continue=False
if (self.DbusBluezReceivingFuture):
await self.DbusBluezReceivingFuture
self.DbusBluezReceivingFuture = None
if (self.MqttReceivingFuture):
await self.MqttReceivingFuture
self.MqttReceivingFuture=None
``` |
{
"source": "jolevesq/msd-map",
"score": 2
} |
#### File: msd-map/src/convertMXD.py
```python
import os, locale
import sys
import arcpy
import string
import zipfile
import xml.etree.ElementTree as ET
import lookupEPSG as EPSG
import commonSections as sections
import symbols
print 'Argument List:', str(sys.argv)
def log(txt, offset=0, upper=True):
if upper:
txt = txt.upper()
file.write(' ' * offset + txt + '\n')
# the MapServer folder
mapServerFolder = sys.argv[3]
# set variables to select an MXD file
mxdFileFolder = sys.argv[1]
mxdFilePath = sys.argv[1] + sys.argv[2]
mxdFileName = sys.argv[2]
# get local format for thousand separator
loc = locale.getlocale()
# get a reference to the Map Document
mxd = arcpy.mapping.MapDocument(mxdFilePath)
# create a map file with the same name of the MXD file, in the same folder
file = open(mxdFileFolder + os.path.splitext(mxdFileName)[0] + '.map', 'w')
# convert to MSD (Map Service Definition)
# TODO: !!!!!!!!!! we need to have the right data source path. if not, we need to repair the link !!!!!!!!!!
arcpy.mapping.ConvertToMSD(mxd, mxdFileFolder + os.path.splitext(mxdFileName)[0] + '.msd')
zz = zipfile.ZipFile(mxdFileFolder + os.path.splitext(mxdFileName)[0] + '.msd')
# get documentInfo
rootDocInfo = ET.parse(zz.open('DocumentInfo.xml')).getroot()
title = rootDocInfo.find('./DocumentTitle').text
abstract = rootDocInfo.find('./Subject').text
keywords = rootDocInfo.find('./Keywords').text.replace(' ', '')
dataFrame = os.path.basename(rootDocInfo.find('./ActiveMapRepositoryPath').text)
# get dataFrameInfo
# if data have the same name as the dataFrame, dataFrame will have a number suffix.
# We need to remove it from the path.
rootDataFrame = ET.parse(zz.open((dataFrame.split('.')[0].rstrip(string.digits)) + '/' + dataFrame)).getroot()
name = rootDataFrame.find('./Name').text.replace(' ', '_')
ext = [rootDataFrame.find('./DefaultExtent/XMin').text,
rootDataFrame.find('./DefaultExtent/YMin').text,
rootDataFrame.find('./DefaultExtent/XMax').text,
rootDataFrame.find('./DefaultExtent/YMax').text]
layersNode = rootDataFrame.find('./Layers')
layers = []
for elem in layersNode.iter():
if elem.text != None:
layers.append(elem.text.split('=')[1])
log('MAP')
log('NAME "{name}"'.format(name=name), 2)
log(sections.getMapCommon(), 2, False)
log(sections.getExtent(ext), 2, False)
log(sections.getSize(), 2, False)
log(sections.getUnits() + '\n', 2, False)
log(sections.getPng(), 2, False)
log(sections.getGif(), 2, False)
log(sections.getJpeg(), 2, False)
log(sections.getScale(), 2, False)
log(sections.getLegend(), 2, False)
epsg = EPSG.getCodeFromWKID(rootDataFrame.find('./DefaultExtent/SpatialReference/WKID').text)
log(sections.getProjection(epsg), 2, False)
log(sections.getWeb(title, abstract, keywords), 2, False)
log(sections.getSeparator(), 2, False)
for fileName in layers:
tree = ET.parse(zz.open(fileName))
root = tree.getroot()
name = root.find('./Name').text.replace(' ', '_')
data = root.find('./FeatureTable/DataConnection/Dataset').text
minScale = root.find('./MinScale').text
maxScale = root.find('./MaxScale').text
connection = root.find('./FeatureTable/DataConnection/WorkspaceConnectionString').text.replace('DATABASE=', '')
geomType = sections.getLayerGeometry(root.find('./FeatureTable/FieldDescriptions'))
log('LAYER', 2)
log('NAME "' + name + '"', 4, False)
log('DATA "' + data + '"', 4, False)
# log('MINSCALEDENOM ' + minScale, 4)
# log('MAXSCALEDENOM ' + maxScale, 4)
log('STATUS ON', 4)
log('CONNECTIONTYPE OGR', 4)
log('CONNECTION "./data/' + mapServerFolder + '/' + os.path.basename(connection) + '"', 4, False)
log('TYPE ' + geomType, 4)
log(sections.getLayerMetadata(name, epsg), 4, False)
# symbology
if geomType == 'Line':
log(symbols.getSymbologyLine(root), 4, False)
elif geomType == 'Polygon':
log(symbols.getSymbologyPolygon(root), 4, False)
else:
log(symbols.getSymbologyPoint(root), 4, False)
# if geomType == 'Point':
# # outline
# colorR = root.find('./Symbolizer/Symbol/Symbol/SymbolLayers/CIMSymbolLayer/OutlineColor/R').text
# colorG = root.find('./Symbolizer/Symbol/Symbol/SymbolLayers/CIMSymbolLayer/OutlineColor/G').text
# colorB = root.find('./Symbolizer/Symbol/Symbol/SymbolLayers/CIMSymbolLayer/OutlineColor/B').text
# width = root.find('./Symbolizer/Symbol/Symbol/SymbolLayers/CIMSymbolLayer/OutlineWidth').text
# # fill
# fillR = root.find('./Symbolizer/Symbol/Symbol/SymbolLayers/CIMSymbolLayer/FillColor/R').text
# fillG = root.find('./Symbolizer/Symbol/Symbol/SymbolLayers/CIMSymbolLayer/FillColor/G').text
# fillB = root.find('./Symbolizer/Symbol/Symbol/SymbolLayers/CIMSymbolLayer/FillColor/B').text
# # size
# size = root.find('./Symbolizer/Symbol/Symbol/SymbolLayers/CIMSymbolLayer/Size').text
# log(sections.getPointStyle(fillR + ' ' +fillG + ' ' + fillB, size), 4, False)
log('END # layer', 2, False)
log(sections.getSeparator(), 2, False)
log('END # map')
file.close()
del zz
del mxd
openNotepad = 'true'
if openNotepad == 'true':
# open TXT file with Notepad or other program with which txt extension is associated with
os.startfile(file.name)
``` |
{
"source": "jolexa/aws-codebuild-dockerhub",
"score": 2
} |
#### File: aws-codebuild-dockerhub/lambda/clean-old-codebuilds.py
```python
import logging
import os
import json
import datetime
from dateutil import parser
import boto3
from botocore.exceptions import ClientError
logging.basicConfig()
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logger = logging.getLogger("mylogger")
logger.setLevel("INFO")
try:
region = os.environ['AWS_DEFAULT_REGION']
except:
region = 'us-east-2'
def check_delete_candidate(codebuild):
name = codebuild['name']
logger.info("Checking: {}".format(name))
tags = codebuild['tags']
sevendaysago = datetime.datetime.now() - datetime.timedelta(days=7)
for i in tags:
key, value = i['key'], i['value']
if key == "X-Delete-Via-Lambda-Eligible":
if value == "True":
logger.info("Delete via Lambda is True")
for t in tags:
key, value = t['key'], t['value']
if key == 'X-Created-Date':
cdate = parser.parse(value)
if cdate < sevendaysago:
logger.info("Candidate for removal due to age")
return True
else:
logger.info("Created Date is not older than 7 days")
logger.info("Not a candidate for removal")
return False
def delete_s3_if_exists(codebuild):
s3client = boto3.client('s3')
for i in codebuild['tags']:
key, bucketname = i['key'], i['value']
if key == "X-Created-S3-Bucket":
try:
objs = s3client.list_objects_v2(
Bucket=bucketname
)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchBucket':
logger.info("Bucket Already Deleted, not trying further")
return True
else:
raise
for i in objs['Contents']:
logger.info("Deleting Object: {}".format(i))
delete = s3client.delete_object(
Bucket=bucketname,
Key=i['Key']
)
s3client.delete_bucket(Bucket=bucketname)
logger.info("Deleted Bucket: {}".format(bucketname))
return True
def lambda_handler(event, context):
client = boto3.client('codebuild', region_name=region)
logger.info(json.dumps(event, indent=4))
# All this pagination code becuase boto3 doesn't support pagination on
# coebuild at time of wrting
projects_list = client.list_projects()
projects = projects_list['projects']
next_token = projects_list.get('nextToken')
while next_token is not None:
projects_list = client.list_projects(
nextToken=next_token
)
projects.append(projects_list['projects'])
next_token = projects_list.get('nextToken')
# end pagination code, 'projects' is now ready to use
for i in projects:
response = client.batch_get_projects(names=[i])
p = response['projects'][0] # there will only be one item in this list
name = p['name']
if check_delete_candidate(p):
delete_s3_if_exists(p) # delete the orphaned s3 bucket
client.delete_project(name=name)
logger.info("Deleted project: {}".format(name))
boto3.client('logs').delete_log_group(
logGroupName='/aws/codebuild/' + name
)
if __name__ == '__main__':
# Running this from localhost with proper permissions will work as intended
event = {}
context = {}
lambda_handler(event, context)
```
#### File: aws-codebuild-dockerhub/lambda/listener.py
```python
import logging
import os
import hmac
from hashlib import sha1
import json
import re
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
def lambda_handler(event, context):
logger.info(json.dumps(event, indent=4))
try:
# Get the sha_name and sig from the headers
sha_name, sig = event['headers']['X-Hub-Signature'].split('=')
# Get the body of the message
body = event['body']
# Only support sha1
if sha_name != 'sha1':
return {
"statusCode": 501,
"body": "Something is wrong, unsupported sha type?\n"
}
except Exception as e:
logger.exception(e)
return {
"statusCode": 501,
"body": "Something is wrong, not from GitHub?\n"
}
secret = os.environ['GHSECRET']
# HMAC requires the key to be bytes, but data is string
mac = hmac.new(secret, body, sha1)
if not hmac.compare_digest(unicode(mac.hexdigest()), unicode(sig)):
logger.critical("Signature doesn't match")
return {
'body': "Signature doesn't match",
'statusCode': 403
}
githookbody = json.loads(body)
logger.debug(json.dumps(githookbody, indent=4))
# some sanity checking, only support ping and push
if event['headers']['X-GitHub-Event'] == "ping":
logger.info("ping event, returning")
return {
'body': "pong",
'statusCode': 200
}
elif event['headers']['X-GitHub-Event'] != "push":
logger.critical("hook event is not supported")
return {
'body': "hook event is not supported",
'statusCode': 501
}
if githookbody['repository']['private']: # bool
logger.info("Event Accepted but Private Repos are not supported")
return {
'body': "Event Accepted but Private Repos are not supported",
'statusCode': 200
}
repo_url = githookbody['repository']['url'] + ".git"
username = githookbody['repository']['owner']['name']
builds_list = []
# https://developer.github.com/v3/activity/events/types/#pushevent
# Check the commits array for added or modified files, if the path contains
# a "/" it probably fits the opionated repo structure
for i in githookbody['commits']:
for a in i['added']:
if re.search("/", a):
builds_list.append(a.split("/")[0])
for m in i['modified']:
if re.search("/", m):
builds_list.append(m.split("/")[0])
# Removes dupes to prevent spawning duplicate jobs
builds = list(set(builds_list))
# Spawn the CodeBuild Job
if builds: # False if empty
import boto3
client = boto3.client('lambda')
message_input = {
'repo_url': repo_url,
'builds': builds,
'username': username,
}
logger.debug(message_input)
response = client.invoke(
FunctionName=os.environ['SpawnCodeBuildFunctionArn'],
InvocationType='Event', # async
LogType='None',
Payload=json.dumps(message_input)
)
else:
logger.info("Not spawning a codebuild job due to input/commit")
# Everything is good
return {
"statusCode": 200,
"body": "accepted"
}
```
#### File: aws-codebuild-dockerhub/lambda/spawn-codebuild.py
```python
import logging
import os
import json
import uuid
import shutil
import datetime
from posixpath import basename
import boto3
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
try:
region = os.environ['AWS_DEFAULT_REGION']
except:
region = 'us-east-2'
def create_dummy_s3_input():
client = boto3.client('s3')
bucketname = "aws-codebuild-dockerhub-{}-{}".format(
boto3.client('sts').get_caller_identity().get('Account'),
str(uuid.uuid4().get_hex().lower()[0:10])
)
# create bucket
client.create_bucket(
Bucket=bucketname,
CreateBucketConfiguration={
'LocationConstraint': region
}
)
# create zip
shutil.make_archive("/tmp/dummyzipfile", 'zip')
# Upload zip
client.put_object(
Body=open("/tmp/dummyzipfile.zip", 'r'),
Bucket=bucketname,
Key="dummyzipfile.zip"
)
os.remove("/tmp/dummyzipfile.zip")
return bucketname, "dummyzipfile.zip"
def lambda_handler(event, context):
logger.info(json.dumps(event, indent=4))
repo_url = event.get('repo_url')
repo_path = basename(repo_url).split('.git')[0]
username = event.get('username')
client = boto3.client('codebuild')
for build_target in event.get('builds'):
# Every CodeBuild job needs some repo to "build" - this is a fake build
# project that automatically gets removed
bucketname, obj = create_dummy_s3_input()
buildjob = client.create_project(
name='-'.join([
'aws-codebuild-dockerhub',
build_target.replace(".", "-"),
str(uuid.uuid4().get_hex().lower()[0:10])
]),
source={
'type': 'S3',
'location': "{}/{}".format(bucketname, obj),
'buildspec': '''
version: 0.1
phases:
build:
commands:
- git clone {0}
- cd {1}/{2} && [ -e Dockerfile ] &&
docker build -t {3}/{2} . && docker login -u {3} -p
$(aws ssm get-parameters --names {5} --with-decryption
--query Parameters[0].Value --output text) && docker push {3}/{2} && docker logout
post_build:
commands:
- aws s3 rb s3://{6} --force
- curl -s
https://raw.githubusercontent.com/jolexa/aws-codebuild-dockerhub/master/invoke-sns-notify-lambda.sh
> invoke-sns-notify-lambda.sh && bash ./invoke-sns-notify-lambda.sh {7} $CODEBUILD_BUILD_ID
'''.format(
repo_url, # 0 https://github.com/username/repo.git
repo_path, # 1 repo
build_target, # 2 directory in repo
username, # 3 username
region, # 4
os.getenv('SSMLeadingKey'),
bucketname, # 6
os.getenv('NotifyFunctionName') # 7
),
},
artifacts={
'type': 'NO_ARTIFACTS',
},
environment={
'type': 'LINUX_CONTAINER',
'image': 'aws/codebuild/docker:1.12.1',
'computeType': 'BUILD_GENERAL1_SMALL',
},
serviceRole=os.getenv('CodeBuildRoleArn'),
timeoutInMinutes=20,
# these tags are used by the cleanup lambda so it doesn't stomp on
# other CodeBuild jobs that may be present
tags=[
{
"key": "X-Created-S3-Bucket",
"value": bucketname
},
{
"key": "X-Created-Date",
"value": str(datetime.date.today())
},
{
"key": "X-Delete-Via-Lambda-Eligible",
"value": "True"
}
]
)
# Start the build
client.start_build(
projectName=buildjob['project']['name']
)
``` |
{
"source": "jolexa/gimmeatroll.com",
"score": 2
} |
#### File: gimmeatroll.com/lambda/main.py
```python
import os
import random
import logging
import boto3
logging.getLogger('boto3').setLevel(logging.CRITICAL)
logging.getLogger('botocore').setLevel(logging.CRITICAL)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def find_random_s3_image():
client = boto3.client('s3')
# Return one random key in the bucket
key = random.choice(client.list_objects_v2(
Bucket='gimmeatroll.com',
MaxKeys=1000
)['Contents'])['Key']
return "https://s3.{}.amazonaws.com/gimmeatroll.com/{}".format(
os.environ['AWS_DEFAULT_REGION'],
key)
def handler(event, context):
logger.debug(event)
logo = find_random_s3_image()
html = "<html><meta property='og:image' content='{}'/><img src='{}'></html>".format(logo, logo)
return html
if __name__== "__main__":
event = {}
context = {}
os.environ['AWS_DEFAULT_REGION'] = 'us-east-2'
print(handler(event, context))
``` |
{
"source": "jolexa/s3-staticsite-multiregion",
"score": 2
} |
#### File: s3-staticsite-multiregion/scripts/invalidate-all.py
```python
from time import time
import sys
import boto3
client = boto3.client('cloudfront')
def get_id(url):
print("get_id args: {0}".format(url))
# url: asdf.cloudfront.net
# return: E2134123ASDF
# where E2134123ASDF is the id of asdf.cloudfront.net
paginator = client.get_paginator('list_distributions')
response_iterator = paginator.paginate()
for i in response_iterator:
for j in i['DistributionList']['Items']:
if j['Aliases']['Items'][0] == url:
return j['Id']
response = client.create_invalidation(
DistributionId=get_id(sys.argv[1]),
InvalidationBatch={
'Paths': {
'Quantity': 1,
'Items': [
'/*'
],
},
'CallerReference': str(time()).replace(".", "")
}
)
``` |
{
"source": "joleys/niko-home-control-II",
"score": 2
} |
#### File: custom_components/nhc2/camera.py
```python
import logging
import httpx
import voluptuous as vol
from homeassistant.components.camera import DEFAULT_CONTENT_TYPE, PLATFORM_SCHEMA, SUPPORT_STREAM, Camera
from homeassistant.exceptions import TemplateError
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
from .nhccoco.coco import CoCo
from .nhccoco.coco_device_class import CoCoDeviceClass
from .nhccoco.coco_accesscontrol import CoCoAccessControl
from .const import DOMAIN, KEY_GATEWAY, BRAND
from .helpers import nhc2_entity_processor
KEY_GATEWAY = KEY_GATEWAY
KEY_ENTITY = 'nhc2_accesscontrol'
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Load NHC2 access control based on a config entry."""
hass.data.setdefault(KEY_ENTITY, {})[config_entry.entry_id] = []
gateway: CoCo = hass.data[KEY_GATEWAY][config_entry.entry_id]
_LOGGER.debug('Platform is starting')
gateway.get_devices(CoCoDeviceClass.ACCESSCONTROL,
nhc2_entity_processor(hass,
config_entry,
async_add_entities,
KEY_ENTITY,
lambda x: NHC2HassAccessControl(x))
)
class NHC2HassAccessControl(Camera):
"""Implementation of NHC2 Camera."""
def __init__(self, nhc2accesscontrol: CoCoAccessControl):
"""Initialize a camera."""
nhc2accesscontrol.on_change = self._on_change
self._nhc2accesscontrol = nhc2accesscontrol
self._auth = httpx.BasicAuth(username='user', password='')
self._stream_source = self._nhc2accesscontrol.stream_source
@property
def supported_features(self):
"""Return supported features for this camera."""
return SUPPORT_STREAM
@property
def name(self):
"""Return the name of this device."""
return self._nhc2accesscontrol.name
async def stream_source(self):
"""Return the source of the stream."""
if self._stream_source is None:
return None
try:
return self._stream_source.async_render(parse_result=False)
except TemplateError as err:
_LOGGER.error("Error parsing template %s: %s", self._stream_source, err)
return None
``` |
{
"source": "jolfr/capstone-02",
"score": 3
} |
#### File: src/data/dataverse.py
```python
import os
import requests
def download(ref, filename):
print('Beginning ' + filename + ' download with requests')
os.chdir(os.path.dirname(os.path.abspath(__file__)))
url = 'https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=' + ref
file_path = '../../data/external/' + filename
r = requests.get(url)
r.raise_for_status()
with open(file_path, 'wb') as f:
f.write(r.content)
return r.status_code, r.headers['content-type'], r.encoding
```
#### File: src/data/shape_data.py
```python
import pandas as pd
import numpy as nd
def generate_filled(df):
years = pd.DataFrame(range(1988, 2017))
years.columns = ['year']
unique = df.zipcode.unique()
result = pd.DataFrame()
for zipcode in unique:
append = years.copy()
append['zipcode'] = zipcode
result = pd.concat([result, append], ignore_index=True)
return result
```
#### File: src/data/zip_to_msa_data.py
```python
import os
import requests
def download():
print('Beginning ZCTA to MSA download with requests')
os.chdir(os.path.dirname(os.path.abspath(__file__)))
url = 'https://www2.census.gov/geo/docs/maps-data/data/rel/zcta_cbsa_rel_10.txt'
filename = '../../data/external/zip_to_msa.csv'
r = requests.get(url)
r.raise_for_status()
with open(filename, 'wb') as f:
f.write(r.content)
return r.status_code, r.headers['content-type'], r.encoding
``` |
{
"source": "jolguk/cognitive-services-speech-sdk",
"score": 3
} |
#### File: python/from-microphone/quickstart.py
```python
import azure.cognitiveservices.speech as speechsdk
import time
# Creates an instance of a speech config with specified subscription key and service region.
# Replace with your own subscription key and service region (e.g., "westus").
# speech_key, service_region = "36a492adffde420390f891fa14a8633f", "northeurope"
# speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
speech_config = speechsdk.SpeechConfig(
endpoint="ws://localhost:5000/speech/recognition/conversation/cognitiveservices/v1")
# Creates a recognizer with the given settings
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config)
print("Say something...")
# Starts speech recognition, and returns after a single utterance is recognized. The end of a
# single utterance is determined by listening for silence at the end or until a maximum of 15
# seconds of audio is processed. The task returns the recognition text as result.
# Note: Since recognize_once() returns only a single utterance, it is suitable only for single
# shot recognition like command or query.
# For long-running multi-utterance recognition, use start_continuous_recognition() instead.
# result = speech_recognizer.recognize_once()
# Checks result.
# if result.reason == speechsdk.ResultReason.RecognizedSpeech:
# print("Recognized: {}".format(result.text))
# elif result.reason == speechsdk.ResultReason.NoMatch:
# print("No speech could be recognized: {}".format(result.no_match_details))
# elif result.reason == speechsdk.ResultReason.Canceled:
# cancellation_details = result.cancellation_details
# print("Speech Recognition canceled: {}".format(cancellation_details.reason))
# if cancellation_details.reason == speechsdk.CancellationReason.Error:
# print("Error details: {}".format(cancellation_details.error_details))
done = False
def stop_cb(evt):
"""callback that stops continuous recognition upon receiving an event `evt`"""
print('CLOSING on {}'.format(evt))
speech_recognizer.stop_continuous_recognition()
global done
done = True
# Connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognizing.connect(lambda evt: print('RECOGNIZING: {}'.format(evt)))
speech_recognizer.recognized.connect(lambda evt: print('RECOGNIZED: {}'.format(evt)))
speech_recognizer.session_started.connect(lambda evt: print('SESSION STARTED: {}'.format(evt)))
speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start continuous speech recognition
speech_recognizer.start_continuous_recognition()
while not done:
time.sleep(.5)
# </code>
``` |
{
"source": "jolibrain/gpustat_server",
"score": 3
} |
#### File: gpustat_server/gpustat_server/__main__.py
```python
import json
from flask import Flask, Response
from gpustat import GPUStatCollection
app = Flask('gpustat_server')
def date_handler(obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
raise TypeError(type(obj))
@app.route("/")
def gpustat_server():
stats = GPUStatCollection.new_query()
rep = Response(json.dumps(stats.jsonify(), default=date_handler),
mimetype='application/json')
rep.headers = {**rep.headers,
**{'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET'}}
return rep
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Serve gpustat on given host:port")
parser.add_argument("--host", default="localhost")
parser.add_argument("--port", type=int, default=12345)
app.run(**vars(parser.parse_args()))
``` |
{
"source": "jolibrain/manette",
"score": 3
} |
#### File: jolibrain/manette/exploration_policy.py
```python
import numpy as np
import tensorflow as tf
import logging
class Action :
def __init__(self, tab_rep, i, a, r):
self.tab_rep = tab_rep
self.id = i
self.repeated = False
self.current_action = 0
self.nb_repetitions_left = 0
self.init_from_list(a, r)
def __str__(self):
return "id : "+str(self.id)+", action "+str(self.current_action)+" repeated "+str(self.nb_repetitions_left)+" times."
def init_from_list(self, a, r):
self.current_action = np.argmax(a)
self.nb_repetitions_left = self.tab_rep[np.argmax(r)]
if self.nb_repetitions_left > 0 :
self.repeated = True
def repeat(self):
self.nb_repetitions_left -= 1
if self.nb_repetitions_left == 0 :
self.repeated = False
return self.current_action
def reset(self):
self.repeated = False
self.current_action = 0
self.nb_repetitions_left = 0
def is_repeated(self):
return self.repeated
class ExplorationPolicy:
def __init__(self, args, test = False):
self.test = test
self.global_step = 0
self.egreedy_policy = args.egreedy
self.initial_epsilon = args.epsilon
self.epsilon = args.epsilon
self.softmax_temp = args.softmax_temp
self.keep_percentage = args.keep_percentage
self.annealed = args.annealed
self.annealing_steps = 80000000
self.max_repetition = args.max_repetition
self.nb_choices = args.nb_choices
self.tab_rep = self.get_tab_repetitions()
def get_tab_repetitions(self):
res = [0]*self.nb_choices
res[-1] = self.max_repetition
if self.nb_choices > 2 :
for i in range(1, self.nb_choices-1):
res[i] = int(self.max_repetition/(self.nb_choices-1)) * i
return res
def get_epsilon(self):
if self.global_step <= self.annealing_steps:
return self.initial_epsilon - (self.global_step * self.initial_epsilon / self.annealing_steps)
else:
return 0.0
def choose_next_actions(self, network_output_pi, network_output_rep, num_actions):
if self.test :
action_indices = self.argmax_choose(network_output_pi)
repetition_indices = self.argmax_choose(network_output_rep)
elif self.egreedy_policy :
action_indices = self.e_greedy_choose(network_output_pi)
repetition_indices = self.e_greedy_choose(network_output_rep)
else :
action_indices = self.multinomial_choose(network_output_pi)
repetition_indices = self.multinomial_choose(network_output_rep)
new_actions = np.eye(num_actions)[action_indices]
new_repetitions = np.eye(self.nb_choices)[repetition_indices]
self.global_step += len(network_output_pi)
if self.annealed : self.epsilon = get_epsilon()
return new_actions, new_repetitions
def argmax_choose(self, probs):
"""Choose the best actions"""
action_indexes = []
for p in probs :
action_indexes.append(np.argmax(p))
return action_indexes
def e_greedy_choose(self, probs):
"""Sample an action from an action probability distribution output by
the policy network using a greedy policy"""
action_indexes = []
for p in probs :
if np.random.rand(1)[0] < self.epsilon :
i = np.random.randint(0,len(p))
action_indexes.append(i)
else :
action_indexes.append(np.argmax(p))
return action_indexes
def multinomial_choose(self, probs):
"""Sample an action from an action probability distribution output by
the policy network using a multinomial law."""
# Subtract a tiny value from probabilities in order to avoid
# "ValueError: sum(pvals[:-1]) > 1.0" in numpy.multinomial
probs = probs - np.finfo(np.float32).epsneg
action_indexes = [int(np.nonzero(np.random.multinomial(1, p))[0]) for p in probs]
return action_indexes
```
#### File: jolibrain/manette/logger_utils.py
```python
import os
import numpy as np
import time
import json
import tensorflow as tf
import math
import matplotlib.pyplot as plt
def load_args(path):
if path is None:
return {}
with open(path, 'r') as f:
return json.load(f)
def save_args(args, folder, file_name='args.json'):
args = vars(args)
if not os.path.exists(folder):
os.makedirs(folder)
with open(os.path.join(folder, file_name), 'w') as f:
return json.dump(args, f)
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope(name):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
def get_grid_dim(x):
"""
Transforms x into product of two integers
:param x: int
:return: two ints
"""
factors = prime_powers(x)
if len(factors) % 2 == 0:
i = int(len(factors) / 2)
return factors[i], factors[i - 1]
i = len(factors) // 2
return factors[i], factors[i]
def prime_powers(n):
"""
Compute the factors of a positive integer
Algorithm from https://rosettacode.org/wiki/Factors_of_an_integer#Python
:param n: int
:return: set
"""
factors = set()
for x in range(1, int(math.sqrt(n)) + 1):
if n % x == 0:
factors.add(int(x))
factors.add(int(n // x))
return sorted(factors)
def fig2data ( fig ):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 2D array of RGB values
"""
# draw the renderer
fig.canvas.draw ( )
# Get the RGB buffer from the figure
w,h = fig.canvas.get_width_height()
rgb = np.fromstring ( fig.canvas.tostring_rgb(), dtype=np.uint8 )
return rgb.reshape(h, w, 3)
def plot_conv_output(conv_img):
"""
Makes plots of results of performing convolution
:param conv_img: numpy array of rank 4
:param name: string, name of convolutional layer
:return: nothing, plots are saved on the disk
"""
w_min = np.min(conv_img)
w_max = np.max(conv_img)
# get number of convolutional filters
num_filters = conv_img.shape[3]
# get number of grid rows and columns
grid_r, grid_c = get_grid_dim(num_filters)
# create figure and axes
fig, axes = plt.subplots(min([grid_r, grid_c]), max([grid_r, grid_c]))
# iterate filters
for l, ax in enumerate(axes.flat):
# get a single image
img = conv_img[0, :, :, l]
# put it on the grid
ax.imshow(img, vmin=w_min, vmax=w_max, interpolation='bicubic', cmap='Greys')
# remove any labels from the axes
ax.set_xticks([])
ax.set_yticks([])
# save figure
fig1 = plt.gcf()
data = fig2data(fig1)
plt.close()
return data
```
#### File: jolibrain/manette/networks.py
```python
import tensorflow as tf
from tensorflow.contrib import rnn
import logging
import numpy as np
class Operations():
def __init__(self, conf):
self.rgb = conf['rgb']
self.depth = 1
if self.rgb :
self.depth = 3
self.alpha_leaky_relu = conf['alpha_leaky_relu']
def flatten(self, _input):
shape = _input.get_shape().as_list()
dim = shape[1]*shape[2]*shape[3]
return tf.reshape(_input, [-1,dim], name='_flattened')
def conv2d(self, name, _input, filters, size, channels, stride, padding = 'VALID', init = "torch", activation = "relu"):
with tf.name_scope(name):
w = self.conv_weight_variable([size,size, channels,filters],
name + '_weights', init = init)
b = self.conv_bias_variable([filters], size, size, channels,
name + '_biases', init = init)
conv = tf.nn.conv2d(_input, w, strides=[1, stride, stride, 1],
padding=padding, name=name + '_convs')
if activation == "relu" :
out = tf.nn.relu(tf.add(conv, b), name='' + name + '_activations')
elif activation == "leaky_relu" :
x = tf.add(conv, b)
out = tf.maximum(x, self.alpha_leaky_relu * x, name='' + name + '_activations')
return w, b, out
def conv_weight_variable(self, shape, name, init = "torch"):
if init == "glorot_uniform":
receptive_field_size = np.prod(shape[:2])
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
d = np.sqrt(6. / (fan_in + fan_out))
else:
w = shape[0]
h = shape[1]
input_channels = shape[3]
d = 1.0 / np.sqrt(input_channels * w * h)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def conv_bias_variable(self, shape, w, h, input_channels, name, init= "torch"):
if init == "glorot_uniform":
initial = tf.zeros(shape)
else:
d = 1.0 / np.sqrt(input_channels * w * h)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def fc(self, name, _input, output_dim, activation = "relu", init = "torch"):
with tf.name_scope(name):
input_dim = _input.get_shape().as_list()[1]
w = self.fc_weight_variable([input_dim, output_dim],
name + '_weights', init = init)
b = self.fc_bias_variable([output_dim], input_dim,
'' + name + '_biases', init = init)
out = tf.add(tf.matmul(_input, w), b, name= name + '_out')
if activation == "relu":
out = tf.nn.relu(out, name='' + name + '_relu')
elif activation == "leaky_relu" :
out = tf.maximum(out, self.alpha_leaky_relu * out, name='' + name + '_leakyrelu')
return w, b, out
def fc_weight_variable(self, shape, name, init="torch"):
if init == "glorot_uniform":
fan_in = shape[0]
fan_out = shape[1]
d = np.sqrt(6. / (fan_in + fan_out))
else:
input_channels = shape[0]
d = 1.0 / np.sqrt(input_channels)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def fc_bias_variable(self, shape, input_channels, name, init= "torch"):
if init=="glorot_uniform":
initial = tf.zeros(shape, dtype='float32')
else:
d = 1.0 / np.sqrt(input_channels)
initial = tf.random_uniform(shape, minval=-d, maxval=d)
return tf.Variable(initial, name=name, dtype='float32')
def softmax(self, name, _input, output_dim, temp):
with tf.name_scope(name):
softmax_temp = tf.constant(temp, dtype=tf.float32)
input_dim = _input.get_shape().as_list()[1]
w = self.fc_weight_variable([input_dim, output_dim], name + '_weights')
b = self.fc_bias_variable([output_dim], input_dim, name + '_biases')
out = tf.nn.softmax(tf.div(tf.add(tf.matmul(_input, w), b), softmax_temp), name= name + '_policy')
return w, b, out
def log_softmax(self, name, _input, output_dim):
with tf.name_scope(name):
input_dim = _input.get_shape().as_list()[1]
w = self.fc_weight_variable([input_dim, output_dim], name + '_weights')
b = self.fc_bias_variable([output_dim], input_dim, name + '_biases')
out = tf.nn.log_softmax(tf.add(tf.matmul(_input, w), b), name= name + '_policy')
return w, b, out
def max_pooling(self, name, _input, stride=None, padding='VALID'):
shape = [1,2,2,1]
return tf.nn.max_pool(_input, shape, strides=shape, padding = padding, name=name)
def rnn(self, name, _input, n_input, n_steps, n_hidden):
with tf.name_scope(name):
# input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
_input = tf.transpose(_input, [1, 0, 2])
_input = tf.reshape(_input, [-1, n_input])
_input = tf.split(_input, n_steps, axis=0)
lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
outputs, states = rnn.static_rnn(lstm_cell, _input, dtype=tf.float32)
w = tf.Variable(tf.random_normal([n_hidden, n_hidden]))
b = tf.Variable(tf.random_normal([n_hidden]))
# Linear activation, using rnn inner loop last output
return w, b, tf.nn.bias_add(tf.matmul(outputs[-1], w), b)
class Network(object):
def __init__(self, conf):
self.name = conf['name']
self.num_actions = conf['num_actions']
self.clip_norm = conf['clip_norm']
self.clip_norm_type = conf['clip_norm_type']
self.device = conf['device']
self.rgb = conf['rgb']
self.activation = conf['activation']
self.alpha_leaky_relu = conf['alpha_leaky_relu']
self.depth = 1
if self.rgb : self.depth = 3
self.op = Operations(conf)
self.total_repetitions = conf['nb_choices']
self.NB_IMAGES = 4
with tf.device(self.device):
with tf.name_scope('Input'):
self.loss_scaling = 5.0
self.input_ph = tf.placeholder(tf.uint8, [None, 84, 84, self.depth* self.NB_IMAGES], name='input')
self.selected_action_ph = tf.placeholder("float32", [None, self.num_actions], name="selected_action")
self.selected_repetition_ph = tf.placeholder("float32", [None, self.total_repetitions], name="selected_repetition")
self.input = tf.scalar_mul(1.0/255.0, tf.cast(self.input_ph, tf.float32))
# This class should never be used, must be subclassed
# The output layer
self.output = None
def init(self, checkpoint_folder, saver, session):
last_saving_step = 0
with tf.device('/cpu:0'):
# Initialize network parameters
path = tf.train.latest_checkpoint(checkpoint_folder)
if path is None:
logging.info('Initializing all variables')
session.run(tf.global_variables_initializer())
else:
logging.info('Restoring network variables from previous run')
saver.restore(session, path)
last_saving_step = int(path[path.rindex('-')+1:])
return last_saving_step
class NIPSNetwork(Network):
def __init__(self, conf):
super(NIPSNetwork, self).__init__(conf)
with tf.device(self.device):
with tf.name_scope('Network'):
w_conv1, b_conv1, conv1 = self.op.conv2d('conv1', self.input, 16, 8, self.depth*self.NB_IMAGES, 4, activation = self.activation)
w_conv2, b_conv2, conv2 = self.op.conv2d('conv2', conv1, 32, 4, 16, 2, activation = self.activation)
w_fc3, b_fc3, fc3 = self.op.fc('fc3', self.op.flatten(conv2), 256, activation=self.activation)
self.convs = [conv1, conv2]
self.output = fc3
class BayesianNetwork(NIPSNetwork):
def __init__(self, conf):
super(BayesianNetwork, self).__init__(conf)
with tf.device(self.device):
with tf.name_scope('Network'):
dropout = tf.nn.dropout(self.output, conf["keep_percentage"])
w_fc4, b_fc4, fc4 = self.op.fc('fc4', dropout, 256, activation=self.activation)
self.output = fc4
class PpwwyyxxNetwork(Network):
def __init__(self, conf):
super(PpwwyyxxNetwork, self).__init__(conf)
with tf.device(self.device):
with tf.name_scope('Network'):
_, _, conv1 = self.op.conv2d('conv1', self.input, 32, 5, self.depth * self.NB_IMAGES, 1, padding = 'SAME', activation = self.activation)
mp_conv1 = self.op.max_pooling('mp_conv1', conv1)
_, _, conv2 = self.op.conv2d('conv2', mp_conv1, 32, 5, 32, 1, padding = 'SAME', activation = self.activation)
mp_conv2 = self.op.max_pooling('mp_conv2', conv2)
_, _, conv3 = self.op.conv2d('conv3', mp_conv2, 64, 4, 32, 1, padding = 'SAME', activation = self.activation)
mp_conv3 = self.op.max_pooling('mp_conv3', conv3)
_, _, conv4 = self.op.conv2d('conv4', mp_conv3, 64, 3, 64, 1, padding = 'SAME', activation = self.activation)
self.convs = [conv1, conv2, conv3, conv4]
_, _, fc5 = self.op.fc('fc5', self.op.flatten(conv4), 512, activation=self.activation)
self.output = fc5
class LSTMNetwork(Network):
def __init__(self, conf):
super(LSTMNetwork, self).__init__(conf)
with tf.device(self.device):
with tf.name_scope('Network'):
n_input = 6400
n_steps = 5
n_hidden = 32
n_outputs = 128
self.memory_ph = tf.placeholder(tf.uint8, [None, n_steps, 84, 84, self.depth* 4], name='input_memory')
_input = tf.scalar_mul(1.0/255.0, tf.cast(self.memory_ph, tf.float32))
_input = tf.reshape(_input, (-1, 84, 84, self.depth*4))
_, _, conv1 = self.op.conv2d('conv1', _input, 32, 5, self.depth * 4, 1, padding = 'SAME', activation = self.activation)
mp_conv1 = self.op.max_pooling('mp_conv1', conv1)
_, _, conv2 = self.op.conv2d('conv2', mp_conv1, 32, 5, 32, 1, padding = 'SAME', activation = self.activation)
mp_conv2 = self.op.max_pooling('mp_conv2', conv2)
_, _, conv3 = self.op.conv2d('conv3', mp_conv2, 64, 4, 32, 1, padding = 'SAME', activation = self.activation)
mp_conv3 = self.op.max_pooling('mp_conv3', conv3)
_, _, conv4 = self.op.conv2d('conv4', mp_conv3, 64, 3, 64, 1, padding = 'SAME', activation = self.activation)
self.first_conv, self.last_conv = conv1, conv4
self.out_conv = self.op.flatten(conv4)
input_lstm = tf.reshape(self.out_conv, (-1, n_steps, n_input))
_, _, out_lstm = self.op.rnn('lstm', input_lstm, n_input, n_steps, n_hidden)
_, _, fc6 = self.op.fc('fc6', out_lstm, n_outputs, activation=self.activation)
self.output = fc6
class NatureNetwork(Network):
def __init__(self, conf):
super(NatureNetwork, self).__init__(conf)
with tf.device(self.device):
with tf.name_scope('Network'):
_, _, conv1 = self.op.conv2d('conv1', self.input, 32, 8, self.depth*self.NB_IMAGES, 4, activation = self.activation)
_, _, conv2 = self.op.conv2d('conv2', conv1, 64, 4, 32, 2, activation = self.activation)
_, _, conv3 = self.op.conv2d('conv3', conv2, 64, 3, 64, 1, activation = self.activation)
self.convs = [conv1, conv2, conv3]
_, _, fc4 = self.op.fc('fc4', self.op.flatten(conv3), 512, activation=self.activation)
self.output = fc4
```
#### File: jolibrain/manette/paac.py
```python
import time
import logging
import numpy as np
from multiprocessing import Queue
from multiprocessing.sharedctypes import RawArray
from ctypes import c_uint, c_float
from actor_learner import *
from logger_utils import variable_summaries, plot_conv_output
from emulator_runner import EmulatorRunner
from exploration_policy import Action
from runners import Runners
class PAACLearner(ActorLearner):
def __init__(self, network_creator, environment_creator, explo_policy, args):
super(PAACLearner, self).__init__(network_creator, environment_creator, explo_policy, args)
self.workers = args.emulator_workers
self.total_repetitions = args.nb_choices
self.lstm_bool = (args.arch == 'LSTM')
self.tab_rep = explo_policy.tab_rep
#add the parameters to tensorboard
sess = tf.InteractiveSession()
file_args = open(args.debugging_folder+"args.json", 'r')
text = str(file_args.read())
summary_op = tf.summary.text('text', tf.convert_to_tensor(text))
text = sess.run(summary_op)
self.summary_writer.add_summary(text,0)
self.summary_writer.flush()
sess.close()
def _get_shared(self, array, dtype=c_float):
"""
Returns a RawArray backed numpy array that can be shared between processes.
:param array: the array to be shared
:param dtype: the RawArray dtype to use
:return: the RawArray backed numpy array """
shape = array.shape
shared = RawArray(dtype, array.reshape(-1))
return np.frombuffer(shared, dtype).reshape(shape)
def log_histogram(self, tag, values, step, bins=1000):
"""Logs the histogram of a list/vector of values"""
counts, bin_edges = np.histogram(values, bins=bins)
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
bin_edges = bin_edges[1:]
for edge in bin_edges : hist.bucket_limit.append(edge)
for c in counts : hist.bucket.append(c)
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.summary_writer.add_summary(summary, step)
self.summary_writer.flush()
def log_values(self, values, tag, length = 50, timestep = 500):
if len(values) > length and self.global_step % timestep == 0 :
mean = np.mean(values[-50:])
std = np.std(values[-50:])
summary = tf.Summary(value=[
tf.Summary.Value(tag=tag+'/mean', simple_value=mean),
tf.Summary.Value(tag=tag+'/min', simple_value=min(values[-50:])),
tf.Summary.Value(tag=tag+'/max', simple_value=max(values[-50:])),
tf.Summary.Value(tag=tag+'/std', simple_value=std),
tf.Summary.Value(tag=tag+'/std_over_mean', simple_value=min(2, np.absolute(std/mean)))
])
self.summary_writer.add_summary(summary, self.global_step)
self.summary_writer.flush()
def update_memory(self, memory, shared_states, whole_memory, t):
whole_memory[t] = memory
memory[:, :-1, :, :, :] = memory[:, 1:, :, :, :]
memory[:, -1, :, :, :] = shared_states
return memory, whole_memory
def train(self):
""" Main actor learner loop for parallel advantage actor critic learning."""
self.global_step = self.init_network()
global_step_start = self.global_step
counter = 0
total_rewards = []
total_steps = []
logging.debug("Starting training at Step {}".format(self.global_step))
# state, reward, episode_over, action, repetition
variables = [(np.asarray([emulator.get_initial_state() for emulator in self.emulators], dtype=np.uint8)),
(np.zeros(self.emulator_counts, dtype=np.float32)),
(np.asarray([False] * self.emulator_counts, dtype=np.float32)),
(np.zeros((self.emulator_counts, self.num_actions), dtype=np.float32)),
(np.zeros((self.emulator_counts, self.total_repetitions), dtype=np.float32))]
self.runners = Runners(self.tab_rep, EmulatorRunner, self.emulators, self.workers, variables)
self.runners.start()
shared_states, shared_rewards, shared_episode_over, shared_actions, shared_rep = self.runners.get_shared_variables()
if self.lstm_bool :
self.n_steps = 5
memory = np.zeros(([self.emulator_counts, self.n_steps]+list(shared_states.shape)[1:]), dtype=np.uint8)
whole_memory = np.zeros(([self.max_local_steps, self.emulator_counts, self.n_steps]+list(shared_states.shape)[1:]), dtype=np.uint8)
for e in range(self.emulator_counts) :
memory[e, -1, :, :, :] = shared_states[e]
summaries_op = tf.summary.merge_all()
emulator_steps = [0] * self.emulator_counts
total_episode_rewards = self.emulator_counts * [0]
actions_sum = np.zeros((self.emulator_counts, self.num_actions))
y_batch = np.zeros((self.max_local_steps, self.emulator_counts))
adv_batch = np.zeros((self.max_local_steps, self.emulator_counts))
rewards = np.zeros((self.max_local_steps, self.emulator_counts))
states = np.zeros([self.max_local_steps] + list(shared_states.shape), dtype=np.uint8)
actions = np.zeros((self.max_local_steps, self.emulator_counts, self.num_actions))
repetitions = np.zeros((self.max_local_steps, self.emulator_counts, self.total_repetitions))
values = np.zeros((self.max_local_steps, self.emulator_counts))
episodes_over_masks = np.zeros((self.max_local_steps, self.emulator_counts))
start_time = time.time()
while self.global_step < self.max_global_steps:
print('step : '+str(self.global_step))
loop_start_time = time.time()
total_action_rep = np.zeros((self.num_actions, self.total_repetitions))
nb_actions = 0
max_local_steps = self.max_local_steps
for t in range(max_local_steps):
#Choose actions and repetitions for each emulator
if not self.lstm_bool :
readouts_v_t, readouts_pi_t, readouts_rep_t = self.session.run(
[self.network.output_layer_v, self.network.output_layer_pi, self.network.output_layer_rep],
feed_dict={self.network.input_ph: shared_states})
new_actions, new_repetitions = self.explo_policy.choose_next_actions(readouts_pi_t, readouts_rep_t, self.num_actions)
else :
readouts_v_t, readouts_pi_t, readouts_rep_t = self.session.run(
[self.network.output_layer_v, self.network.output_layer_pi, self.network.output_layer_rep],
feed_dict={self.network.memory_ph: memory})
new_actions, new_repetitions = self.explo_policy.choose_next_actions(readouts_pi_t, readouts_rep_t, self.num_actions)
actions_sum += new_actions
for e in range(self.emulator_counts) :
nb_actions += np.argmax(new_repetitions[e]) + 1
# sharing the actions and repetitions to the different threads
for z in range(new_actions.shape[0]): shared_actions[z] = new_actions[z]
for z in range(new_repetitions.shape[0]): shared_rep[z] = new_repetitions[z]
actions[t] = new_actions
values[t] = readouts_v_t
states[t] = shared_states
repetitions[t] = new_repetitions
# Start updating all environments with next_actions
self.runners.update_environments()
self.runners.wait_updated()
# Done updating all environments, have new states, rewards and is_over
if self.lstm_bool :
memory, whole_memory = self.update_memory(memory, shared_states, whole_memory, t)
episodes_over_masks[t] = 1.0 - shared_episode_over.astype(np.float32)
for e, (actual_reward, episode_over) in enumerate(zip(shared_rewards, shared_episode_over)):
total_episode_rewards[e] += actual_reward
actual_reward = self.rescale_reward(actual_reward)
rewards[t, e] = actual_reward
emulator_steps[e] += self.tab_rep[np.argmax(new_repetitions[e])] + 1
self.global_step += 1
#rempli le tableau pour l'histogramme des actions - repetitions
a = np.argmax(new_actions[e])
r = np.argmax(new_repetitions[e])
total_action_rep[a][r] += 1
if episode_over:
total_rewards.append(total_episode_rewards[e])
total_steps.append(emulator_steps[e])
episode_summary = tf.Summary(value=[
tf.Summary.Value(tag='rl/reward', simple_value=total_episode_rewards[e]),
tf.Summary.Value(tag='rl/episode_length', simple_value=emulator_steps[e])
])
self.summary_writer.add_summary(episode_summary, self.global_step)
self.summary_writer.flush()
total_episode_rewards[e] = 0
emulator_steps[e] = 0
if self.lstm_bool :
memory[e] = np.zeros(([self.n_steps]+list(shared_states.shape)[1:]), dtype=np.uint8)
actions_sum[e] = np.zeros(self.num_actions)
##plot output of conv layers
# with tf.name_scope('Summary_ConvNet'):
# if self.global_step % (10000*self.emulator_counts*self.max_local_steps) == 0:
# convs = self.session.run(self.network.convs,
# feed_dict= {self.network.input_ph: [shared_states[0]]})
# imgs = [np.array([utils.plot_conv_output(conv)]) for conv in convs]
# sums = [tf.summary.image('conv'+str(i), imgs[i], 1) for i in range(len(imgs))]
# real_sums = self.session.run(sums)
# for s in real_sums : self.summary_writer.add_summary(s, self.global_step)
# self.summary_writer.flush()
if self.lstm_bool :
nest_state_value = self.session.run(
self.network.output_layer_v, feed_dict={self.network.memory_ph: memory })
else :
nest_state_value = self.session.run(
self.network.output_layer_v, feed_dict={self.network.input_ph: shared_states})
estimated_return = np.copy(nest_state_value)
for t in reversed(range(max_local_steps)):
estimated_return = rewards[t] + self.gamma * estimated_return * episodes_over_masks[t]
y_batch[t] = np.copy(estimated_return)
adv_batch[t] = estimated_return - values[t]
if self.lstm_bool :
flat_states = whole_memory.reshape([self.max_local_steps * self.emulator_counts, self.n_steps] + list(shared_states.shape)[1:])
else :
flat_states = states.reshape([self.max_local_steps * self.emulator_counts] + list(shared_states.shape)[1:])
flat_y_batch = y_batch.reshape(-1)
flat_adv_batch = adv_batch.reshape(-1)
flat_actions = actions.reshape(max_local_steps * self.emulator_counts, self.num_actions)
flat_rep = repetitions.reshape(max_local_steps * self.emulator_counts, self.total_repetitions)
lr = self.get_lr()
feed_dict = {self.network.critic_target_ph: flat_y_batch,
self.network.selected_action_ph: flat_actions,
self.network.selected_repetition_ph: flat_rep,
self.network.adv_actor_ph: flat_adv_batch,
self.learning_rate: lr}
if self.lstm_bool :
feed_dict[self.network.memory_ph] = flat_states
else :
feed_dict[self.network.input_ph] = flat_states
_, summaries = self.session.run(
[self.train_step, summaries_op],
feed_dict=feed_dict)
self.summary_writer.add_summary(summaries, self.global_step)
param_summary = tf.Summary(value=[
tf.Summary.Value(tag='parameters/lr', simple_value=lr)
])
self.summary_writer.add_summary(param_summary, self.global_step)
self.summary_writer.flush()
self.log_values(total_rewards, 'rewards_per_episode')
self.log_values(total_steps, 'steps_per_episode')
#ajout de l'histogramme des actions /repetitions
nb_a = [ sum(a) for a in total_action_rep]
nb_r = [ sum(r) for r in np.transpose(total_action_rep) ]
histo_a, histo_r = [], []
for i in range(self.num_actions) : histo_a += [i]*int(nb_a[i])
for i in range(self.total_repetitions) : histo_r += [self.tab_rep[i]+1]*int(nb_r[i])
self.log_histogram('actions', np.array(histo_a), self.global_step)
self.log_histogram('repetitions', np.array(histo_r), self.global_step)
counter += 1
if counter % (2048 / self.emulator_counts) == 0:
curr_time = time.time()
last_ten = 0.0 if len(total_rewards) < 1 else np.mean(total_rewards[-10:])
steps_per_sec = self.max_local_steps * self.emulator_counts / (curr_time - loop_start_time)
actions_per_s = nb_actions / (curr_time - loop_start_time)
average_steps_per_sec = (self.global_step - global_step_start) / (curr_time - start_time)
logging.info("Ran {} steps, at {} steps/s ({} steps/s avg), last 10 rewards avg {}"
.format(self.global_step, steps_per_sec, average_steps_per_sec, last_ten))
stats_summary = tf.Summary(value=[
tf.Summary.Value(tag='stats/steps_per_s', simple_value=steps_per_sec),
tf.Summary.Value(tag='stats/average_steps_per_s', simple_value=average_steps_per_sec),
tf.Summary.Value(tag='stats/actions_per_s', simple_value=actions_per_s)
])
self.summary_writer.add_summary(stats_summary, self.global_step)
self.summary_writer.flush()
self.save_vars()
self.cleanup()
def cleanup(self):
super(PAACLearner, self).cleanup()
self.runners.stop()
```
#### File: manette/scripts/genGifs.py
```python
import argparse
import subprocess
import os
def create_cmd(args, f, path):
cmd = ("python3 test.py -f "+path+
" -tc "+str(args.test_count)+
" -np "+str(args.noops)+
" -gn "+f+str(args.checkpoint)+
" -gf "+args.folder+"gifs/")
return cmd
def main(args):
path = args.folder+"gifs"
if not os.path.exists(path):
os.makedirs(path)
if args.game_folder == '' :
print(os.listdir(args.folder))
for f in os.listdir(args.folder):
if f != "gifs":
if args.checkpoint == 0 :
pathSrc = args.folder+f
else :
pathSrc = args.folder+f+"/checkpoints_saved/"+str(args.checkpoint)+"/"
subprocess.call(create_cmd(args, f, pathSrc), shell = True)
else :
f = args.game_folder
if args.checkpoint == 0 :
pathSrc = args.folder+f
else :
pathSrc = args.folder+f+"/checkpoints_saved/"+str(args.checkpoint)+"/"
subprocess.call(create_cmd(args, f, pathSrc), shell = True)
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-gf', default= '', type=str, help='Name of the game folder to generate. Default : all folder are generated.', dest='game_folder')
parser.add_argument('-f', '--folder', type=str, help="Folder where is saved the logs of all games.", dest="folder", required=True)
parser.add_argument('-tc', '--test_count', default='1', type=int, help="The amount of tests to run on the given network", dest="test_count")
parser.add_argument('-cp', '--checkpoint', default='0', type=int, help="The checkpoint from which to run the test", dest="checkpoint")
parser.add_argument('-np', '--noops', default=30, type=int, help="Maximum amount of no-ops to use", dest="noops")
return parser
if __name__ == '__main__':
args = get_arg_parser().parse_args()
main(args)
```
#### File: jolibrain/manette/test.py
```python
import os
from train import get_network_and_environment_creator
import logger_utils
import argparse
import numpy as np
import time
import tensorflow as tf
import random
from paac import PAACLearner
from exploration_policy import ExplorationPolicy, Action
def get_save_frame(name):
import imageio
writer = imageio.get_writer(name + '.gif', fps=30)
def get_frame(frame):
writer.append_data(frame)
return get_frame
def update_memory(memory, states):
memory[:, :-1, :, :, :] = memory[:, 1:, :, :, :]
memory[:, -1, :, :, :] = states
return memory
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--folder', type=str, help="Folder where to save the debugging information.", dest="folder", required=True)
parser.add_argument('-tc', '--test_count', default='1', type=int, help="The amount of tests to run on the given network", dest="test_count")
parser.add_argument('-np', '--noops', default=30, type=int, help="Maximum amount of no-ops to use", dest="noops")
parser.add_argument('-gn', '--gif_name', default=None, type=str, help="If provided, a gif will be produced and stored with this name", dest="gif_name")
parser.add_argument('-gf', '--gif_folder', default='', type=str, help="The folder where to save gifs.", dest="gif_folder")
parser.add_argument('-d', '--device', default='/gpu:0', type=str, help="Device to be used ('/cpu:0', '/gpu:0', '/gpu:1',...)", dest="device")
args = parser.parse_args()
arg_file = os.path.join(args.folder, 'args.json')
device = args.device
for k, v in logger_utils.load_args(arg_file).items():
setattr(args, k, v)
args.max_global_steps = 0
df = args.folder
args.debugging_folder = '/tmp/logs'
args.device = device
args.random_start = False
args.single_life_episodes = False
if args.gif_name:
args.visualize = 1
args.actor_id = 0
rng = np.random.RandomState(int(time.time()))
args.random_seed = rng.randint(1000)
explo_policy = ExplorationPolicy(args, test = False)
network_creator, env_creator = get_network_and_environment_creator(args, explo_policy)
network = network_creator()
saver = tf.train.Saver()
rewards = []
environments = [env_creator.create_environment(i) for i in range(args.test_count)]
if args.gif_name:
for i, environment in enumerate(environments):
environment.on_new_frame = get_save_frame(os.path.join(args.gif_folder, args.gif_name + str(i)))
config = tf.ConfigProto(allow_soft_placement = True)
if 'gpu' in args.device:
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
checkpoints_ = os.path.join(df, 'checkpoints')
network.init(checkpoints_, saver, sess)
states = np.asarray([environment.get_initial_state() for environment in environments])
if args.noops != 0:
for i, environment in enumerate(environments):
for _ in range(random.randint(0, args.noops)):
state, _, _ = environment.next(0)
states[i] = state
if args.arch == 'LSTM':
n_steps = 5
memory = np.zeros(([args.test_count, n_steps]+list(states.shape)[1:]), dtype=np.uint8)
for e in range(args.test_count):
memory[e, -1, :, :, :] = states[e]
episodes_over = np.zeros(args.test_count, dtype=np.bool)
rewards = np.zeros(args.test_count, dtype=np.float32)
while not all(episodes_over):
if args.arch == 'LSTM' :
readouts_pi_t, readouts_rep_t = sess.run(
[network.output_layer_pi, network.output_layer_rep],
feed_dict={network.memory_ph: memory})
else :
readouts_pi_t, readouts_rep_t = sess.run(
[network.output_layer_pi, network.output_layer_rep],
feed_dict={network.input_ph: states})
actions, repetitions = explo_policy.choose_next_actions(readouts_pi_t, readouts_rep_t, env_creator.num_actions)
for j, environment in enumerate(environments):
macro_action = Action(explo_policy.tab_rep, j, actions[j], repetitions[j])
state, r, episode_over = environment.next(macro_action.current_action)
states[j] = state
rewards[j] += r
episodes_over[j] = episode_over
while macro_action.is_repeated() and not episode_over :
state, r, episode_over = environment.next(macro_action.repeat())
states[j] = state
rewards[j] += r
episodes_over[j] = episode_over
macro_action.reset()
memory = update_memory(memory, states)
print('Performed {} tests for {}.'.format(args.test_count, args.game))
print('Mean: {0:.2f}'.format(np.mean(rewards)))
print('Min: {0:.2f}'.format(np.min(rewards)))
print('Max: {0:.2f}'.format(np.max(rewards)))
print('Std: {0:.2f}'.format(np.std(rewards)))
``` |
{
"source": "jolibrain/pytorch-CycleGAN-and-pix2pix",
"score": 2
} |
#### File: modules/resnet_architecture/mobile_resnet_generator.py
```python
import functools
import torch
from torch import nn
from models.modules.mobile_modules import SeparableConv2d
#from models.networks import WBlock, NBlock
from ...networks import init_net
import math
import sys
class MobileResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, dropout_rate, use_bias):
super(MobileResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, dropout_rate, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, dropout_rate, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [
SeparableConv2d(in_channels=dim, out_channels=dim,
kernel_size=3, padding=p, stride=1),
norm_layer(dim), nn.ReLU(True)
]
conv_block += [nn.Dropout(dropout_rate)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [
SeparableConv2d(in_channels=dim, out_channels=dim,
kernel_size=3, padding=p, stride=1),
norm_layer(dim)
]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class MobileResnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf, norm_layer=nn.InstanceNorm2d,
dropout_rate=0, n_blocks=9, padding_type='reflect', decoder=True,
wplus=True, init_type='normal', init_gain=0.02, gpu_ids=[],
img_size=128, img_size_dec=128):
assert (n_blocks >= 0)
self.decoder = decoder
self.wplus = wplus
super(MobileResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
n_blocks1 = n_blocks // 3
n_blocks2 = n_blocks1
n_blocks3 = n_blocks - n_blocks1 - n_blocks2
for i in range(n_blocks1):
model += [MobileResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
dropout_rate=dropout_rate,
use_bias=use_bias)]
for i in range(n_blocks2):
model += [MobileResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
dropout_rate=dropout_rate,
use_bias=use_bias)]
for i in range(n_blocks3):
model += [MobileResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
dropout_rate=dropout_rate,
use_bias=use_bias)]
if self.decoder:
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
else:
if wplus == False:
n_feat = 4096 #1024 # 256
to_w = [nn.Linear(n_feat,img_size_dec)] # sty2 image output size
self.to_w = nn.Sequential(*to_w)
self.conv = nn.Conv2d(ngf*mult,1, kernel_size=1)
else:
n_feat = 2**(2*int(math.log(img_size,2)-2))
self.n_wplus = (2*int(math.log(img_size_dec,2)-1))
self.wblocks = nn.ModuleList()
for n in range(0,self.n_wplus):
self.wblocks += [WBlock(ngf*mult,n_feat,init_type,init_gain,gpu_ids)]
self.nblocks = nn.ModuleList()
noise_map = [4,8,8,16,16,32,32,64,64,128,128,256,256,512,512,1024,1024]
for n in range(0,self.n_wplus-1):
self.nblocks += [NBlock(ngf*mult,n_feat,noise_map[n],init_type,init_gain,gpu_ids)]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
# input = input.clamp(-1, 1)
# for i, module in enumerate(self.model):
# print(i, input.size())
# print(module)
# if isinstance(module, nn.Conv2d):
# print(module.stride)
# input = module(input)
# return input
if self.decoder:
return self.model(input)
else:
output = self.model(input)
if not self.wplus:
output = self.conv(output).squeeze(dim=1)
output = torch.flatten(output,1)
output = self.to_w(output).unsqueeze(dim=0)
return output
else:
outputs = []
noutputs = []
for wc in self.wblocks:
outputs.append(wc(output))
outputs=torch.stack(outputs).unsqueeze(0)
for nc in self.nblocks:
noutputs.append(nc(output))
return outputs, noutputs
class WBlock(nn.Module):
"""Define a linear block for W"""
def __init__(self, dim, n_feat, init_type='normal', init_gain=0.02, gpu_ids=[]):
super(WBlock, self).__init__()
self.conv2d = nn.Conv2d(in_channels=dim,out_channels=1,kernel_size=1)
self.lin1 = nn.Linear(n_feat,32,bias=True)
self.lin2 = nn.Linear(32,512,bias=True)
w_block = []
w_block += [self.conv2d,nn.InstanceNorm2d(1),nn.Flatten(),self.lin1,nn.ReLU(True),self.lin2]
self.w_block = init_net(nn.Sequential(*w_block), init_type, init_gain, gpu_ids)
def forward(self, x):
out = self.w_block(x)
return out.squeeze(0)
class NBlock(nn.Module):
"""Define a linear block for N"""
def __init__(self, dim, n_feat, out_feat, init_type='normal', init_gain=0.02, gpu_ids=[]):
super(NBlock, self).__init__()
self.out_feat = out_feat
if out_feat < 32: # size of input
self.conv2d = nn.Conv2d(dim,1,kernel_size=1)
self.lin = nn.Linear(n_feat,out_feat**2)
n_block = []
n_block += [self.conv2d,nn.InstanceNorm2d(1),nn.Flatten(),self.lin]
self.n_block = init_net(nn.Sequential(*n_block), init_type, init_gain, gpu_ids)
else:
self.n_block = []
self.n_block = [SeparableConv2d(in_channels=256,out_channels=32,kernel_size=3,stride=1,padding=1),
nn.InstanceNorm2d(1),
nn.ReLU(True)]
self.n_block += [nn.Upsample((out_feat,out_feat))]
self.n_block += [nn.Conv2d(in_channels=32,out_channels=1,kernel_size=1)]
self.n_block += [nn.Flatten()]
self.n_block = init_net(nn.Sequential(*self.n_block), init_type, init_gain, gpu_ids)
def forward(self, x):
out = self.n_block(x)
return torch.reshape(out.unsqueeze(1),(1,1,self.out_feat,self.out_feat))
``` |
{
"source": "jolibrain/recognition",
"score": 3
} |
#### File: ai/src/dnn_feature_extractor.py
```python
import os, sys
from feature_generator import FeatureGenerator
from index_search import Indexer, Searcher
from dd_client import DD
import shelve
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
##TODO: may move to a tools.py
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
# class to define a dd DNN model
class DNNModel:
def __init__(self,name,model_repo,nclasses,extract_layer='',best=5,
img_width=224,img_height=224,description=''):
self.name = name
self.description = description
self.extract_layer = extract_layer
self.best = best
self.nclasses = nclasses
self.img_width = img_width
self.img_height = img_height
self.model_repo = model_repo
# a feature extractor for images, from a configurable layer of a pre-trained deep neural (convolutional) net
class DNNFeatureExtractor(FeatureGenerator):
def __init__(self,dnnmodel,image_files,index_repo,
batch_size=32,dd_host='localhost',dd_port=8080,dd_description='image classification',meta_in='',meta_out='',captions_in='',captions_out='',mapi_in='',mapi_out=''):
self.dd_host = dd_host
self.dd_port = dd_port
self.dd_description = dd_description
self.dd_mllib = 'caffe'
self.meta_in = meta_in
self.meta_out = meta_out
self.captions_in = captions_in
self.captions_out = captions_out
self.mapi_in = mapi_in
self.mapi_out = mapi_out
self.gpuid = 0
self.dnnmodel = dnnmodel
if self.dnnmodel.extract_layer:
self.dd_mltype = 'unsupervised'
else:
self.dd_mltype = 'supervised'
self.image_files = image_files
self.batch_size = batch_size
self.binarized = False
self.dd = DD(self.dd_host,self.dd_port)
self.dd.set_return_format(self.dd.RETURN_PYTHON)
self.index_repo = index_repo + '/' + self.dnnmodel.name
try:
os.mkdir(self.index_repo)
except:
#logger.warning('directory ' + self.index_repo + ' may alreay exist')
pass
self.st = {} # shelve used for full tags storage
self.stm = {} # in memory tmp storage
if self.dd_mltype == 'supervised':
self.st = shelve.open(self.index_repo + '/tags.bin')
self.delete_dd_service()
def __del__(self):
if self.dd_mltype == 'supervised':
for i,t in self.stm.iteritems():
self.st[i] = t
self.st.close()
def create_dd_service(self):
model = {'repository':self.dnnmodel.model_repo}
parameters_input = {'connector':'image','width':self.dnnmodel.img_width,'height':self.dnnmodel.img_height}
parameters_mllib = {'nclasses':self.dnnmodel.nclasses,'gpu':True,'gpuid':self.gpuid}
parameters_output = {}
screate = self.dd.put_service(self.dnnmodel.name,model,self.dd_description,self.dd_mllib,
parameters_input,parameters_mllib,parameters_output,self.dd_mltype)
outcode = screate['status']['code']
if outcode != 201 and outcode != 403:
logger.error('failed creation of DNN service ' + self.dnnmodel.name)
#return
raise Exception('failed creating DNN service ' + self.dnnmodel.name)
return
def delete_dd_service(self):
self.dd.delete_service(self.dnnmodel.name,clear='')
def preproc(self):
# none needed with dd at the moment
return
def index(self):
## feature generation, to be indexed or searched for
self.create_dd_service()
feature_vectors = []
uris = []
parameters_input = {}
parameters_mllib = {'gpu':True,'gpuid':self.gpuid,'extract_layer':self.dnnmodel.extract_layer}
if self.dd_mltype == 'unsupervised':
parameters_output = {'binarized':self.binarized}
# pass one image to get the size of the output layer
classif = self.dd.post_predict(self.dnnmodel.name,[self.image_files[0]],
parameters_input,parameters_mllib,parameters_output)
response_code = classif['status']['code']
if response_code != 200:
print 'response=',classif
logger.error('failed (index) initial prediction call to model ' + self.dnnmodel.name + ' via dd')
self.delete_dd_service()
return
dim = len(classif['body']['predictions']['vals'])
else:
parameters_output = {'best':self.dnnmodel.best}
dim = self.dnnmodel.nclasses
c = 0
logger.info('dnn feature prediction and indexing for service ' + self.dnnmodel.name + ' with layer of size ' + str(dim))
with Indexer(dim,self.index_repo) as indexer:
for x in batch(self.image_files,self.batch_size):
classif = self.dd.post_predict(self.dnnmodel.name,x,
parameters_input,parameters_mllib,parameters_output)
#print classif
response_code = classif['status']['code']
if response_code != 200:
print 'response=',classif
logger.error('failed (index) batch prediction call to model ' + self.dnnmodel.name + ' via dd')
continue
predictions = classif['body']['predictions']
if self.batch_size == 1 or len(self.image_files) == 1:
predictions = [predictions]
for p in predictions:
if self.dd_mltype == 'unsupervised':
indexer.index_single(c,p['vals'],p['uri'])
if c > 0 and c % self.batch_size == 0:
logger.info('indexed ' + str(c) + ' images')
else:
puri = str(p['uri'])
indexer.index_tags_single(p['classes'],p['uri'])
self.stm[puri] = []
for pc in p['classes']:
self.stm[puri].append(pc['cat'])
c = c + 1
indexer.build_index()
indexer.save_index()
logger.info('indexed a total of ' + str(c) + ' images')
self.delete_dd_service()
def search(self,jdataout={}):
self.create_dd_service()
parameters_input = {}
parameters_mllib = {'gpu':True,'gpuid':self.gpuid,'extract_layer':self.dnnmodel.extract_layer}
if self.dd_mltype == 'unsupervised':
parameters_output = {'binarized':self.binarized}
else:
parameters_output = {'best':self.dnnmodel.best}
logger.info('dnn feature prediction and searching for service ' + self.dnnmodel.name)
results = {}
with Searcher(self.index_repo,search_size=500) as searcher:
searcher.load_index()
for x in batch(self.image_files,self.batch_size):
classif = self.dd.post_predict(self.dnnmodel.name,x,
parameters_input,parameters_mllib,parameters_output)
response_code = classif['status']['code']
if response_code != 200:
print 'response=',classif
logger.error('failed batch (search) prediction call to model ' + self.dnnmodel.name + ' via dd')
self.delete_dd_service()
print classif
raise Exception('failed batch (search) prediction call to model ' + self.dnnmodel.name)
predictions = classif['body']['predictions']
if self.batch_size == 1 or len(self.image_files) == 1:
predictions = [predictions]
#print 'predictions=',predictions
for p in predictions:
if self.dd_mltype == 'unsupervised':
nns = searcher.search_single(p['vals'],p['uri'])
else:
puri = str(p['uri'])
nns = searcher.search_tags_single(p['classes'],puri)
nns['tags_out_all'] = []
for nn in nns['nns_uris']:
nns['tags_out_all'].append(self.st[str(nn)])
results[p['uri']] = nns
self.delete_dd_service()
return self.to_json(results,'/img/reuters/','/img/tate/',self.dnnmodel.name,self.dnnmodel.description,jdataout,self.meta_in,self.meta_out,self.captions_in,self.captions_out,self.mapi_in,self.mapi_out)
```
#### File: ai/src/ensembling.py
```python
class EnsemblingScores:
def __init__(self):
self.factors = {'composition_high_1':0.3,'composition':0.3,'places_composition':0.3,'places':0.2,'categories':0.1,'densecap':0.25,'mapi_tags':0.1,'mapi_cats':0.1,'mapi_people':0.2,'mapi':0.05,'txtembed':0.1}
self.summary_map = {'composition':'composition','places_composition':'composition','places':'context','categories':'context','densecap':'objects','mapi_cats':'context','mapi_tags':'context','mapi_people':'faces','mapi':'faces','txtembed':'context'}
return
# json_out in UI format, simple additive ensembling
def ensembling(self,json_out):
for k,m in json_out.iteritems(): # iterate matches
for o in m['output']: # iterate candidates for a given match
summary = {'scores':{'objects':0.0,'faces':0.0,'composition':0.0,'context':0.0},
'processing_time':0.0}
summary_sum = 0.0
final_score = 0.0
comp_score = 0.0 # compositional score
comp_num = 0.0
for g,v in o['features']['out'].iteritems(): # iterate generators of a match
factor = 1.0
for fa,fv in self.factors.iteritems():
if fa in g:
factor = fv
break
if g == 'composition_high_1':
#comp_num += 1.0
#comp_score += 3.0*v['score']#(1.0/comp_num) * factor*v['score']
final_score += self.factors[g]*v['score']
summary['scores'][self.summary_map['composition']] += 1.0
elif 'composition' in g:
comp_num += 1.0
comp_score += factor*v['score']#(1.0/comp_num) * factor*v['score']
summary['scores'][self.summary_map['composition']] += 1.0
elif 'categories' in g:
summary['scores'][self.summary_map['categories']] += 1.0
else:
if 'score' in v: # typically, captions have no score at the moment
final_score += factor*v['score']
summary['scores'][self.summary_map[g]] += 1.0
else:
summary_sum -= 1.0
summary_sum += 1.0
if comp_num > 0.0:
final_score += comp_score / comp_num
o['features']['score'] = final_score
for s,sv in summary['scores'].iteritems():
sv /= summary_sum
summary['scores'][s] = sv
o['features']['summary'] = summary
return json_out
``` |
{
"source": "joliejuly/blog",
"score": 2
} |
#### File: static/script versions/date.py
```python
import datetime
import functools
import time
import sqlite3
import re
from flask import (Flask, request, render_template, session, url_for,
redirect, flash)
app = Flask(__name__)
app.secret_key = '<KEY>'
app.password = '<PASSWORD>!'
#app.database = 'myblog.db'
#app.config['DEBUG'] = True
#login required wrapper
def login_required(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
if session.get("logged_in"):
return fn(*args, **kwargs)
return redirect(url_for('login', next=request.path))
return inner
def publish_later(desired_time, rowid):
""" Checks if it is time
to publish an entry """
while(1):
time_now = datetime.datetime.now()
time_now = time_now.timestamp()
post_time = datetime.datetime.strptime(desired_time, "%Y-%m-%d-%H-%M")
post_time = post_time.timestamp()
if time_now >= post_time:
break
conn = sqlite3.connect("myblog.db")
db = conn.cursor()
mytuple = (1, rowid)
db.execute("update entries set published=? where rowid=?", mytuple)
@app.route('/')
def index():
conn = sqlite3.connect("myblog.db")
conn.row_factory = sqlite3.Row
db = conn.cursor()
mytuple = (1,)
db.execute("select * from entries where published=? order by rowid desc", mytuple)
entries = db.fetchall()
#
return render_template('index.html', entries=entries)
@app.route('/create', methods = ["GET", "POST"])
@login_required
def create():
if request.method == "POST":
if request.form["title"] and request.form["content"]:
conn = sqlite3.connect("myblog.db")
db = conn.cursor()
title = request.form.get("title")
content = request.form.get("content")
localtime = time.asctime( time.localtime(time.time()) )
if request.form.get("published"):
mytuple = (title, content, 1, localtime)
db.execute("""insert into entries(title, content, published, date) values(?, ?, ?, ?)""", mytuple)
conn.commit()
conn.close()
else:
mytuple = (title, content, 0, localtime)
db.execute("""insert into entries(title, content, published, date) values(?, ?, ?, ?)""", mytuple)
conn.commit()
if request.form.get("to_be_published"):
#datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")
#outputs 2017-10-11-15-26 (2017, 11 oct, 15:26)
# this will be string "2017-10-11T09:00"
to_be_published = request.form.get("to_be_published")
#makes from 2017-10-11T09:00 this: 2017-10-11-09-00
to_be_published = re.sub(r'[T:]', r'-', to_be_published)
title = request.form.get("title")
mytuple = (title, )
db.execute("""select rowid from entries
where title=?
""", mytuple)
rowid = db.fetchone() #returns a tuple (1, ); to access 1 as int do this: rowid[0]
conn.close()
flash("Your post has been created successfully")
publish_later(to_be_published, rowid[0])
conn.close()
flash("Your post has been created successfully")
else:
flash("You haven't entered text or title")
return render_template('create.html')
return render_template('create.html')
@app.route('/login', methods=["GET", "POST"])
def login():
if request.method == "POST":
if request.form.get("password") == app.password:
session['logged_in'] = True
flash("You are logged in")
return redirect(url_for("index"))
else:
flash("Sorry, wrong password")
return render_template('login.html')
return render_template("login.html")
@app.route('/edit', methods=["POST", "GET"])
@login_required
def edit():
conn = sqlite3.connect("myblog.db")
conn.row_factory = sqlite3.Row
db = conn.cursor()
if request.method == "POST":
if request.form.get("edit_btn") == "edit":
entry_id = int(request.form.get("edit"))
mytuple = (entry_id, )
db.execute("select rowid, * from entries where rowid=?", mytuple)
entry = db.fetchone()
return render_template("edit1.html", entry=entry)
if request.form.get("delete") == "delete":
entry_id = int(request.form.get("edit"))
mytuple = (entry_id, )
db.execute("delete from entries where rowid=?", mytuple)
conn.commit()
flash("Entry has been deleted successfully")
return redirect(url_for("edit"))
else:
mytuple = (1,)
db.execute("select rowid, * from entries where published=? order by rowid desc", mytuple)
entries = db.fetchall()
conn.close()
return render_template('edit.html', entries=entries)
@app.route("/update", methods=["POST", "GET"])
@login_required
def update():
if request.method == "POST":
conn = sqlite3.connect("myblog.db")
conn.row_factory = sqlite3.Row
db = conn.cursor()
rowid = int(request.form.get("rowid"))
title = request.form.get("title")
content = request.form.get("content")
mydic = {"title": title,
"content": content,
"rowid": rowid}
db.execute("""update entries set
title=:title,
content=:content
where rowid=:rowid""", mydic)
conn.commit()
conn.close()
flash("Changes saved successfully")
return redirect(url_for("edit"))
return redirect(url_for("edit"))
@app.route('/logout', methods=["POST", "GET"])
@login_required
def logout():
if request.method == "POST":
session.clear()
flash('You are logged out')
return redirect(url_for('index'))
return redirect(url_for('login'))
if __name__ == '__main__':
app.run(debug=True)
```
#### File: static/script versions/login.py
```python
import functools
import time
import sqlite3
from flask import (Flask, request, render_template, session, url_for,
redirect, flash)
app = Flask(__name__)
app.secret_key = '<KEY>'
app.password = '<PASSWORD>!'
#app.database = 'myblog.db'
#app.config['DEBUG'] = True
#login required wrapper
def login_required(fn):
@functools.wraps(fn)
def inner(*args, **kwargs):
if session.get("logged_in"):
return fn(*args, **kwargs)
return redirect(url_for('login', next=request.path))
return inner
@app.route('/')
def index():
conn = sqlite3.connect("myblog.db")
conn.row_factory = sqlite3.Row
db = conn.cursor()
mytuple = (1,)
db.execute("select * from entries where published=? order by rowid desc", mytuple)
entries = db.fetchall()
#
return render_template('index.html', entries=entries)
@app.route('/create', methods = ["GET", "POST"])
@login_required
def create():
if request.method == "POST":
if request.form["title"] and request.form["content"]:
conn = sqlite3.connect("myblog.db")
db = conn.cursor()
title = request.form.get("title")
content = request.form.get("content")
localtime = time.asctime( time.localtime(time.time()) )
if request.form.get("published"):
mytuple = (title, content, 1, localtime)
db.execute("""insert into entries(title, content, published, date) values(?, ?, ?, ?)""", mytuple)
else:
mytuple = (title, content, 0, localtime)
db.execute("""insert into entries(title, content, published, date) values(?, ?, ?, ?)""", mytuple)
flash("Your post has been created successfully")
conn.commit()
conn.close()
else:
flash("You haven't entered text or title")
return render_template('create.html')
return render_template('create.html')
@app.route('/login', methods=["GET", "POST"])
def login():
if request.method == "POST":
if request.form.get("password") == app.password:
session['logged_in'] = True
flash("You are logged in")
return redirect(url_for("index"))
else:
flash("Sorry, wrong password")
return render_template('login.html')
return render_template("login.html")
@app.route('/edit', methods=["POST", "GET"])
@login_required
def edit():
conn = sqlite3.connect("myblog.db")
conn.row_factory = sqlite3.Row
db = conn.cursor()
if request.method == "POST":
if request.form.get("edit_btn") == "edit":
entry_id = int(request.form.get("edit"))
mytuple = (entry_id, )
db.execute("select rowid, * from entries where rowid=?", mytuple)
entry = db.fetchone()
return render_template("edit1.html", entry=entry)
if request.form.get("delete") == "delete":
entry_id = int(request.form.get("edit"))
mytuple = (entry_id, )
db.execute("delete from entries where rowid=?", mytuple)
conn.commit()
flash("Entry has been deleted successfully")
return redirect(url_for("edit"))
else:
mytuple = (1,)
db.execute("select rowid, * from entries where published=? order by rowid desc", mytuple)
entries = db.fetchall()
conn.close()
return render_template('edit.html', entries=entries)
@app.route("/update", methods=["POST", "GET"])
@login_required
def update():
if request.method == "POST":
conn = sqlite3.connect("myblog.db")
conn.row_factory = sqlite3.Row
db = conn.cursor()
rowid = int(request.form.get("rowid"))
title = request.form.get("title")
content = request.form.get("content")
mydic = {"title": title,
"content": content,
"rowid": rowid}
db.execute("""update entries set
title=:title,
content=:content
where rowid=:rowid""", mydic)
conn.commit()
conn.close()
flash("Changes saved successfully")
return redirect(url_for("edit"))
return redirect(url_for("edit"))
@app.route('/logout', methods=["POST", "GET"])
@login_required
def logout():
if request.method == "POST":
session.clear()
flash('You are logged out')
return redirect(url_for('index'))
return redirect(url_for('login'))
if __name__ == '__main__':
app.run(debug=True)
``` |
{
"source": "JolietCyborgs4241/cyborg_sensing",
"score": 3
} |
#### File: cyborg_sensing/r+d/FindDistance.py
```python
import libjevois as jevois
import cv2
import numpy as np
## Simple example of image processing using OpenCV in Python on JeVois
#
# This module is a basic FRC vision process.
#
# By default, it first gets an image, blurs it, extracts the green channel, thresholds that, and uses the threshold to place
# a mask over the initial image. It then runs an HSV filter on the masked image, erodes and dilates the result, and finally
# finds and filters the contours.
# You can find the constants for all of these using GRIP. Tune the program constants and generate Python code from GRIP. Then,
# paste those constants into the Constructor below. Custom code can also be inserted after all the GRIP process code.
#
# See http://jevois.org/tutorials for tutorials on getting started with programming JeVois in Python without having
# to install any development software on your host computer.
#
# @author <NAME>
#
# @videomapping YUYV 320 240 59.9 YUYV 320 240 59.9 JeVois PythonSandbox
# @email <EMAIL>
# @mainurl http://jevois.org
# @supporturl http://jevois.org/doc
# @otherurl http://iLab.usc.edu
# @license GPL v3
# @distribution Unrestricted
# @restrictions None
# @ingroup modules
class FindDistance:
# ###################################################################################################
## Constructor
def __init__(self):
# Instantiate a JeVois Timer to measure our processing framerate:
self.timer = jevois.Timer("sandbox", 100, jevois.LOG_INFO)
# SPECIAL REPLACED BLUR CONSTANT
self.__blur_type = 0
# ###################################################################################################
# ALL CONSTANTS GO UNDER HERE (make sure to remove the self.__blur_type line)
self.__blur_radius = 6.909739928607854
self.blur_output = None
self.__cv_extractchannel_src = self.blur_output
self.__cv_extractchannel_channel = 1.0
self.cv_extractchannel_output = None
self.__cv_threshold_src = self.cv_extractchannel_output
self.__cv_threshold_thresh = 30.0
self.__cv_threshold_maxval = 255.0
self.__cv_threshold_type = cv2.THRESH_BINARY
self.cv_threshold_output = None
self.__mask_input = self.blur_output
self.__mask_mask = self.cv_threshold_output
self.mask_output = None
self.__normalize_input = self.mask_output
self.__normalize_type = cv2.NORM_MINMAX
self.__normalize_alpha = 0.0
self.__normalize_beta = 255.0
self.normalize_output = None
self.__hsv_threshold_input = self.normalize_output
self.__hsv_threshold_hue = [46.02792342397267, 120.58148236024165]
self.__hsv_threshold_saturation = [157.86767273600026, 255.0]
self.__hsv_threshold_value = [43.786072836645936, 255.0]
self.hsv_threshold_output = None
self.__cv_erode_src = self.hsv_threshold_output
self.__cv_erode_kernel = None
self.__cv_erode_anchor = (-1, -1)
self.__cv_erode_iterations = 3.0
self.__cv_erode_bordertype = cv2.BORDER_CONSTANT
self.__cv_erode_bordervalue = (-1)
self.cv_erode_output = None
self.__cv_dilate_src = self.cv_erode_output
self.__cv_dilate_kernel = None
self.__cv_dilate_anchor = (-1, -1)
self.__cv_dilate_iterations = 1.0
self.__cv_dilate_bordertype = cv2.BORDER_CONSTANT
self.__cv_dilate_bordervalue = (-1)
self.cv_dilate_output = None
self.__find_contours_input = self.cv_dilate_output
self.__find_contours_external_only = True
self.find_contours_output = None
self.__filter_contours_contours = self.find_contours_output
self.__filter_contours_min_area = 400.0
self.__filter_contours_min_perimeter = 100.0
self.__filter_contours_min_width = 0.0
self.__filter_contours_max_width = 1000.0
self.__filter_contours_min_height = 0.0
self.__filter_contours_max_height = 1000.0
self.__filter_contours_solidity = [75.32956685499059, 100]
self.__filter_contours_max_vertices = 1000.0
self.__filter_contours_min_vertices = 0.0
self.__filter_contours_min_ratio = 0.2
self.__filter_contours_max_ratio = 1.0
self.filter_contours_output = None
# END CONSTANTS
# ###################################################################################################
## Process function with USB output
def process(self, inframe, outframe):
# Get the next camera image (may block until it is captured) and here convert it to OpenCV BGR by default. If
# you need a grayscale image instead, just use getCvGRAY() instead of getCvBGR(). Also supported are getCvRGB()
# and getCvRGBA():
source0 = inimg = inframe.getCvBGR()
outimg = inimg = inframe.getCvBGR()
# Start measuring image processing time (NOTE: does not account for input conversion time):
self.timer.start()
#################################################################################################
# BEGIN GRIP CODE
#################################################################################################
"""
Runs the pipeline and sets all outputs to new values.
"""
# Step Blur0:
self.__blur_input = source0
(self.blur_output) = self.__blur(self.__blur_input, self.__blur_type, self.__blur_radius)
# Step CV_extractChannel0:
self.__cv_extractchannel_src = self.blur_output
(self.cv_extractchannel_output) = self.__cv_extractchannel(self.__cv_extractchannel_src, self.__cv_extractchannel_channel)
# Step CV_Threshold0:
self.__cv_threshold_src = self.cv_extractchannel_output
(self.cv_threshold_output) = self.__cv_threshold(self.__cv_threshold_src, self.__cv_threshold_thresh, self.__cv_threshold_maxval, self.__cv_threshold_type)
# Step Mask0:
self.__mask_input = self.blur_output
self.__mask_mask = self.cv_threshold_output
(self.mask_output) = self.__mask(self.__mask_input, self.__mask_mask)
# Step Normalize0:
self.__normalize_input = self.mask_output
(self.normalize_output) = self.__normalize(self.__normalize_input, self.__normalize_type, self.__normalize_alpha, self.__normalize_beta)
# Step HSV_Threshold0:
self.__hsv_threshold_input = self.normalize_output
(self.hsv_threshold_output) = self.__hsv_threshold(self.__hsv_threshold_input, self.__hsv_threshold_hue, self.__hsv_threshold_saturation, self.__hsv_threshold_value)
# Step CV_erode0:
self.__cv_erode_src = self.hsv_threshold_output
(self.cv_erode_output) = self.__cv_erode(self.__cv_erode_src, self.__cv_erode_kernel, self.__cv_erode_anchor, self.__cv_erode_iterations, self.__cv_erode_bordertype, self.__cv_erode_bordervalue)
# Step CV_dilate0:
self.__cv_dilate_src = self.cv_erode_output
(self.cv_dilate_output) = self.__cv_dilate(self.__cv_dilate_src, self.__cv_dilate_kernel, self.__cv_dilate_anchor, self.__cv_dilate_iterations, self.__cv_dilate_bordertype, self.__cv_dilate_bordervalue)
# Step Find_Contours0:
self.__find_contours_input = self.cv_dilate_output
(self.find_contours_output) = self.__find_contours(self.__find_contours_input, self.__find_contours_external_only)
# Step Filter_Contours0:
self.__filter_contours_contours = self.find_contours_output
(self.filter_contours_output) = self.__filter_contours(self.__filter_contours_contours, self.__filter_contours_min_area, self.__filter_contours_min_perimeter, self.__filter_contours_min_width, self.__filter_contours_max_width, self.__filter_contours_min_height, self.__filter_contours_max_height, self.__filter_contours_solidity, self.__filter_contours_max_vertices, self.__filter_contours_min_vertices, self.__filter_contours_min_ratio, self.__filter_contours_max_ratio)
#################################################################################################
# END GRIP CODE
##################################################################################################
# DEFAULT CUSTOM CODE
def getArea(con): # Gets the area of the contour
return cv2.contourArea(con)
def getYcoord(con): # Gets the Y coordinate of the contour
M = cv2.moments(con)
cy = int(M['m01']/M['m00'])
return cy
def getXcoord(con): # Gets the X coordinate of the contour
M = cv2.moments(con)
cy = int(M['m10']/M['m00'])
return cy
def sortByArea(conts) : # Returns an array sorted by area from smallest to largest
contourNum = len(conts) # Gets number of contours
sortedBy = sorted(conts, key=getArea) # sortedBy now has all the contours sorted by area
return sortedBy
##################################################################################################
# PUT YOUR CUSTOM CODE HERE
##################################################################################################
# Draws all contours on original image in red
cv2.drawContours(outimg, self.filter_contours_output, -1, (0, 0, 255), 1)
# Gets number of contours
contourNum = len(self.filter_contours_output)
# Sorts contours by the smallest area first
newContours = sortByArea(self.filter_contours_output)
# Send the contour data over Serial
for i in range (contourNum):
cnt = newContours[i]
x,y,w,h = cv2.boundingRect(cnt) # Get the stats of the contour including width and height
# which contour, 0 is first
if (i > 0):
cnt2 = newContours[i-1]
toSend = ("CON" + str(i) +
" area" + str(getArea(cnt)) + # Area of contour
" x" + str(round((getXcoord(cnt)*1000/320)-500, 2)) + # x-coordinate of contour, -500 to 500 rounded to 2 decimal
" y" + str(round(375-getYcoord(cnt)*750/240, 2)) + # y-coordinate of contour, -375 to 375 rounded to 2 decimal
" h" + str(round(h*750/240, 2)) + # Height of contour, 0-750 rounded to 2 decimal
" w" + str(round(w*1000/320, 2)) + # Width of contour, 0-1000 rounded to 2 decimal
" Distance:" + str( round(( (11.5*596.32)/abs((getXcoord(cnt) - getXcoord(cnt2))) ), 2)))
else:
toSend = ("CON" + str(i) +
" area" + str(getArea(cnt)) + # Area of contour
" x" + str(round((getXcoord(cnt)*1000/320)-500, 2)) + # x-coordinate of contour, -500 to 500 rounded to 2 decimal
" y" + str(round(375-getYcoord(cnt)*750/240, 2)) + # y-coordinate of contour, -375 to 375 rounded to 2 decimal
" h" + str(round(h*750/240, 2)) + # Height of contour, 0-750 rounded to 2 decimal
" w" + str(round(w*1000/320, 2))) # Width of contour, 0-1000 rounded to 2 decimal
jevois.sendSerial(toSend)
# Write a title:
cv2.putText(outimg, "JeVois Code", (3, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
# Write frames/s info from our timer into the edge map (NOTE: does not account for output conversion time):
fps = self.timer.stop()
#height, width, channels = outimg.shape # if outimg is grayscale, change to: height, width = outimg.shape
height, width, channels = outimg.shape
cv2.putText(outimg, fps, (3, height - 6), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)
# Convert our BGR output image to video output format and send to host over USB. If your output image is not
# BGR, you can use sendCvGRAY(), sendCvRGB(), or sendCvRGBA() as appropriate:
outframe.sendCvBGR(outimg)
# outframe.sendCvGRAY(outimg)
##################################################################################################
# END CUSTOM CODE
###################################################################################################
# FUNCTIONS GO HERE (Anything that starts with "@staticmethod")
@staticmethod
def __blur(src, type, radius):
"""Softens an image using one of several filters.
Args:
src: The source mat (numpy.ndarray).
type: The blurType to perform represented as an int.
radius: The radius for the blur as a float.
Returns:
A numpy.ndarray that has been blurred.
"""
ksize = int(2 * round(radius) + 1)
return cv2.blur(src, (ksize, ksize))
#return cv2.medianBlur(src, (ksize, ksize)) # Perform a Median Blur
#return cv2.GaussianBlur(src,(ksize, ksize),0) # Perform a Gaussian Blur
@staticmethod
def __cv_extractchannel(src, channel):
"""Extracts given channel from an image.
Args:
src: A numpy.ndarray.
channel: Zero indexed channel number to extract.
Returns:
The result as a numpy.ndarray.
"""
return cv2.extractChannel(src, (int) (channel + 0.5))
@staticmethod
def __cv_threshold(src, thresh, max_val, type):
"""Apply a fixed-level threshold to each array element in an image
Args:
src: A numpy.ndarray.
thresh: Threshold value.
max_val: Maximum value for THRES_BINARY and THRES_BINARY_INV.
type: Opencv enum.
Returns:
A black and white numpy.ndarray.
"""
return cv2.threshold(src, thresh, max_val, type)[1]
@staticmethod
def __mask(input, mask):
"""Filter out an area of an image using a binary mask.
Args:
input: A three channel numpy.ndarray.
mask: A black and white numpy.ndarray.
Returns:
A three channel numpy.ndarray.
"""
return cv2.bitwise_and(input, input, mask=mask)
@staticmethod
def __normalize(input, type, a, b):
"""Normalizes or remaps the values of pixels in an image.
Args:
input: A numpy.ndarray.
type: Opencv enum.
a: The minimum value.
b: The maximum value.
Returns:
A numpy.ndarray of the same type as the input.
"""
return cv2.normalize(input, None, a, b, type)
@staticmethod
def __hsv_threshold(input, hue, sat, val):
"""Segment an image based on hue, saturation, and value ranges.
Args:
input: A BGR numpy.ndarray.
hue: A list of two numbers the are the min and max hue.
sat: A list of two numbers the are the min and max saturation.
lum: A list of two numbers the are the min and max value.
Returns:
A black and white numpy.ndarray.
"""
out = cv2.cvtColor(input, cv2.COLOR_BGR2HSV)
return cv2.inRange(out, (hue[0], sat[0], val[0]), (hue[1], sat[1], val[1]))
@staticmethod
def __cv_erode(src, kernel, anchor, iterations, border_type, border_value):
"""Expands area of lower value in an image.
Args:
src: A numpy.ndarray.
kernel: The kernel for erosion. A numpy.ndarray.
iterations: the number of times to erode.
border_type: Opencv enum that represents a border type.
border_value: value to be used for a constant border.
Returns:
A numpy.ndarray after erosion.
"""
return cv2.erode(src, kernel, anchor, iterations = (int) (iterations +0.5),
borderType = border_type, borderValue = border_value)
@staticmethod
def __cv_dilate(src, kernel, anchor, iterations, border_type, border_value):
"""Expands area of higher value in an image.
Args:
src: A numpy.ndarray.
kernel: The kernel for dilation. A numpy.ndarray.
iterations: the number of times to dilate.
border_type: Opencv enum that represents a border type.
border_value: value to be used for a constant border.
Returns:
A numpy.ndarray after dilation.
"""
return cv2.dilate(src, kernel, anchor, iterations = (int) (iterations +0.5),
borderType = border_type, borderValue = border_value)
@staticmethod
def __find_contours(input, external_only):
"""Sets the values of pixels in a binary image to their distance to the nearest black pixel.
Args:
input: A numpy.ndarray.
external_only: A boolean. If true only external contours are found.
Return:
A list of numpy.ndarray where each one represents a contour.
"""
if(external_only):
mode = cv2.RETR_EXTERNAL
else:
mode = cv2.RETR_LIST
method = cv2.CHAIN_APPROX_SIMPLE
im2, contours, hierarchy =cv2.findContours(input, mode=mode, method=method)
return contours
@staticmethod
def __filter_contours(input_contours, min_area, min_perimeter, min_width, max_width,
min_height, max_height, solidity, max_vertex_count, min_vertex_count,
min_ratio, max_ratio):
"""Filters out contours that do not meet certain criteria.
Args:
input_contours: Contours as a list of numpy.ndarray.
min_area: The minimum area of a contour that will be kept.
min_perimeter: The minimum perimeter of a contour that will be kept.
min_width: Minimum width of a contour.
max_width: MaxWidth maximum width.
min_height: Minimum height.
max_height: Maximimum height.
solidity: The minimum and maximum solidity of a contour.
min_vertex_count: Minimum vertex Count of the contours.
max_vertex_count: Maximum vertex Count.
min_ratio: Minimum ratio of width to height.
max_ratio: Maximum ratio of width to height.
Returns:
Contours as a list of numpy.ndarray.
"""
output = []
for contour in input_contours:
x,y,w,h = cv2.boundingRect(contour)
if (w < min_width or w > max_width):
continue
if (h < min_height or h > max_height):
continue
area = cv2.contourArea(contour)
if (area < min_area):
continue
if (cv2.arcLength(contour, True) < min_perimeter):
continue
hull = cv2.convexHull(contour)
solid = 100 * area / cv2.contourArea(hull)
if (solid < solidity[0] or solid > solidity[1]):
continue
if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):
continue
ratio = (float)(w) / h
if (ratio < min_ratio or ratio > max_ratio):
continue
output.append(contour)
return output
#BlurType = Enum('BlurType', 'Box_Blur Gaussian_Blur Median_Filter Bilateral_Filter')
``` |
{
"source": "jolitp/automation_scripts",
"score": 2
} |
#### File: file-manipulation/clean-up-undesirable-files/clean-up-undesirable-files.py
```python
import os
from os import path
import shutil
from colorama import Fore
from colorama import Style
DEBUG = False
DEBUG = True
RELEASE = False
RELEASE = True
undesirable_files = [ "video-renamer-and-mover.py",
"remove-videos",
"show-width-and-height-of-videos.py",
"mass-convert-and-resize-and-concatenate.py",
"mark-video-length.py",
"make-org.py",
"check-if-ffmpeg-is-finished.py"
"mark-if-ffmpeg-is-finished.py",
"add-padding-zeros.py",
"clean-up-undesirable-files.py",
"rename-folders.py",
"rename-files.py",
"mark-if-ffmpeg-is-finished",
"check-if-ffmpeg-is-finished",
"remove-empty-folders.py",
"copy-py-files-to-subdirs.py",
"call-command-in-subfolders.py",
"[CourseClub.NET].url",
"[FreeCourseSite.com].url",
"[FCS Forum].url",
"concatenated_video_found.txt",
"vidlist.txt",
"total_length.data",
"accumulated_time_of_videos.txt",
"different_dimentions.txt",
"dimension.csv",
"mass-convert-only.py",
"clean-up-py-files.py",
".srt",
".vtt",
"remove-videos.py"
]
def is_undesirable_file(name:str):
is_undesirable_file = False
for file_name in undesirable_files:
if file_name in name:
is_undesirable_file = True
return is_undesirable_file
video_extensions = ['.mp4', '.m4v', '.mkv', '.ts', '.avi', '.webm', '.flv', '.mov', '.wmv', '.vob']
def is_video(name):
is_video = False
for ext in video_extensions:
if name.lower().endswith(ext):
is_video = True
return is_video
# pretty print an array
def pretty_print_array(array,message,color=Fore.GREEN):
if DEBUG : print(f'{color}========== ' + message + f' ========== {Style.RESET_ALL}')
for index, name in enumerate(array):
start = f'{color}'
reset = f' : {Style.RESET_ALL}'
if DEBUG : print(start + str(index) + reset + str(name))
print()
def pretty_print_value(value,message,color=Fore.BLUE):
if DEBUG : print(f'{color}' + message + f' : {Style.RESET_ALL}' + str(value))
current_dir_path = os.getcwd()
current_dir = os.path.basename(current_dir_path)
# print(current_dir)
all_files_full_path = []
for root, directories, filenames in os.walk(current_dir_path):
for filename in filenames:
all_files_full_path.append(os.path.join(root,filename))
all_undesirable_files = []
for file_path in all_files_full_path:
if is_undesirable_file(file_path):
all_undesirable_files.append(file_path)
this_file_path = os.path.realpath(__file__)
# pretty_print_array(all_files_full_path, "all files")
for file in all_undesirable_files:
path_separated = file.split('/')
current_path = ''
for index, subdir in enumerate(path_separated):
current_path += subdir + '/'
# print(current_path)
if index == len(path_separated) -1:
break
if subdir == 'undesirable_files':
pretty_print_value(subdir, "subdir:")
if 'undesirable_files' in current_path:
# BUG if you have undesirable files inside the undesirable_files folder and inside a nested folder
# the file will be included in the all_undesirable_files array and trigger a exception on the remove method
all_files_full_path.remove(file)
pretty_print_value(file, "file: ")
# pretty_print_array(all_files_full_path, "all files")
for file in all_undesirable_files:
# ignore files in the root directory
file_in_root = False
for elem in undesirable_files:
root_file = current_dir_path + '/' + elem
if file == root_file:
file_in_root = True
if file_in_root:
continue
# print(file)
source = file
# pretty_print_value(source,"source: ")
destination = str(file).replace(current_dir_path , '')
undesirable_files_path = current_dir_path + '/undesirable_files'
if not os.path.exists(undesirable_files_path):
os.mkdir(undesirable_files_path)
destination = undesirable_files_path + destination
# pretty_print_value(destination,"destination: ")
path_separated = destination.split('/')
current_path = ''
for index, subdir in enumerate(path_separated):
current_path += subdir + '/'
# print(current_path)
if index == len(path_separated) -1:
break
# print(os.path.exists(current_path))
if not os.path.exists(current_path):
os.mkdir(current_path)
os.rename(source,destination)
# os.remove(file)
...
```
#### File: helpers/copy-py-files-to-subdirs/copy-py-files-to-subdirs.py
```python
import shutil
import os
from glob import glob
import pathlib
from os import listdir
from os.path import isfile, join
"""
copy all .py files to all subfolders
"""
def is_python_file(name):
is_py = False
if name.lower().endswith('.py'):
is_py = True
return is_py
current_path = pathlib.Path(__file__).parent.absolute()
dirs = os.listdir('.')
dirs = glob(str(current_path) + '/*/')
# print(dirs)
onlyfiles = [f for f in listdir(current_path) if isfile(join(current_path, f))]
# print(onlyfiles)
files_to_copy = []
for file in onlyfiles:
if is_python_file(file):
files_to_copy.append(file)
# print(file)
print(files_to_copy)
for file_to_copy in files_to_copy:
src = str(current_path) + "/" + file_to_copy
print("source: " + src + "\n")
for dir in dirs:
dst = dir
shutil.copy(src, dst)
print("destination: " + dst)
```
#### File: helpers/filter_videos/unit_tests_filter_videos.py
```python
import unittest
# from helpers import filter_videos
# import helpers.filter_videos
import filter_videos as script
class UnitTestFilterVideos(unittest.TestCase):
"""
test filter_video function
"""
# region class TestFilterVideos(unittest.TestCase):
# region def test_1_video_from_1_file(self):
def test_1_video_from_1_file(self):
"""
test: given one file return one video
"""
files : set(str) = {
"single video.mp4"
}
expected_result : set(str) = {
"single video.mp4"
}
videos : set(str) = script.filter_videos(files, debug_function=True)
self.assertSequenceEqual(videos, expected_result)
...
# endregion def test_1_video_from_1_file(self):
# region def test_1_video_from_11_files(self):
def test_1_video_from_11_files(self):
"""
test: given one file return one video
"""
files : set(str) = {
"single video.mp4",
"1st non video file.txt",
"2nd non video file.txt",
"3rd non video file.txt",
"4th non video file.txt",
"5th non video file.txt",
"6th non video file.txt",
"7th non video file.txt",
"8th non video file.txt",
"9th non video file.txt",
"10th non video file.txt",
}
expected_result : set(str) = {
"single video.mp4"
}
videos : set(str) = script.filter_videos(files, debug_function=True)
self.assertSequenceEqual(videos, expected_result)
...
...
# endregion def test_1_video_from_11_files(self):
# region def test_2_videos_from_11_files(self):
def test_2_videos_from_11_files(self):
"""
test: given one file return one video
"""
files : set(str) = [
"1st video.mp4",
"2nd video.mp4",
"1st non video file.txt",
"2nd non video file.txt",
"3rd non video file.txt",
"4th non video file.txt",
"5th non video file.txt",
"6th non video file.txt",
"7th non video file.txt",
"8th non video file.txt",
"9th non video file.txt",
]
expected_result : set(str) = {
"1st video.mp4",
"2nd video.mp4",
}
videos : set(str) = script.filter_videos(files, debug_function=True)
self.assertSequenceEqual(videos, expected_result)
...
...
# endregion def test_2_videos_from_11_files(self):
# region def test_0_videos_from_1_file(self):
def test_0_videos_from_1_file(self):
"""
test: given one file return one video
"""
files : list(str) = [
"1st not video.csv",
"2nd not video.txt",
]
expected_result : set(str) = set()
videos : set(str) = script.filter_videos(files, debug_function=True)
self.assertSequenceEqual(videos, expected_result)
...
...
# endregion def test_0_videos_from_1_file(self):
# region def test_lists_should_equal_even_if_order_does_not_match(self):
def test_lists_should_equal_even_if_order_does_not_match(self):
"""
test: given one file return one video
"""
files : set(str) = {
"1st video.mp4",
"2nd video.mp4",
}
expected_result : set(str) = {
"2nd video.mp4",
"1st video.mp4",
}
videos : set(str) = script.filter_videos(files, debug_function=True)
self.assertSequenceEqual(videos, expected_result)
...
...
# endregion def test_lists_should_equal_even_if_order_does_not_match(self):
# endregion TestFilterVideos(unittest.TestCase):
if __name__ == "__main__":
unittest.main()
```
#### File: helpers/get_absolute_path_before_moving_to_directory/unit_tests_get_absolute_path_before_moving_to_directory.py
```python
import unittest
import get_absolute_path_before_moving_to_directory as script
class UnitTestGetAbsolutePathAfterMovingToDirectory(unittest.TestCase):
"""
tests get_absolute_path_before_moving_to_directory() function
"""
# region class TestGetAbsolutePathAfterMovingToDirectory(unittest.TestCase):
# region def test_(self):
def test_right_parameters_results_right_return(self):
"""
test: given a root directory that is absolute path, and both other parameters
relative paths to directories in the root directory, result should be
root directory + / + destination_directory + / + file_or_folder_to_move
"""
root_directory : str = "/home/user/videos/root_directory"
destination_directory : str = "destination"
file_or_folder_to_move : str = "video_file.mkv"
expected_result : str = "/home/user/videos/root_directory/destination/video_file.mkv"
actual_result : str = script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
self.assertEqual(expected_result, actual_result)
...
# endregion def test_(self):
# region def test_(self):
def test_root_directory_has_trailing_slash(self):
"""
test: given a root directory that is absolute path with a trailing slash,
and both other parameters
relative paths to directories in the root directory, result should be
root directory + / + destination_directory + / + file_or_folder_to_move
"""
root_directory : str = "/home/user/videos/root_directory/"
destination_directory : str = "destination"
file_or_folder_to_move : str = "video_file.mkv"
expected_result : str = "/home/user/videos/root_directory/destination/video_file.mkv"
actual_result : str = script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
self.assertEqual(expected_result, actual_result)
...
# endregion def test_(self):
# region def test_(self):
def test_destination_directory_has_trailing_slash(self):
"""
test: given a root directory that is absolute path with a trailing slash,
and both other parameters
relative paths to directories in the root directory, result should be
root directory + / + destination_directory + / + file_or_folder_to_move
"""
root_directory : str = "/home/user/videos/root_directory"
destination_directory : str = "destination/"
file_or_folder_to_move : str = "video_file.mkv"
expected_result : str = "/home/user/videos/root_directory/destination/video_file.mkv"
actual_result : str = script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
self.assertEqual(expected_result, actual_result)
...
# endregion def test_(self):
# region def test_(self):
def test_folder_to_move_has_trailing_slash(self):
"""
test: given a root directory that is absolute path with a trailing slash,
and both other parameters
relative paths to directories in the root directory, result should be
root directory + / + destination_directory + / + file_or_folder_to_move
"""
root_directory : str = "/home/user/videos/root_directory"
destination_directory : str = "destination"
file_or_folder_to_move : str = "inside_folder/"
expected_result : str = "/home/user/videos/root_directory/destination/inside_folder"
actual_result : str = script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
self.assertEqual(expected_result, actual_result)
...
# endregion def test_(self):
# region def test_(self):
def test_both_destination_directory_and_file_or_folder_to_move_have_trailing_slashes(self):
"""
test: given a root directory that is absolute path with a trailing slash,
and both other parameters
relative paths to directories in the root directory, result should be
root directory + / + destination_directory + / + file_or_folder_to_move
"""
root_directory : str = "/home/user/videos/root_directory"
destination_directory : str = "destination/"
file_or_folder_to_move : str = "inside_folder/"
expected_result : str = "/home/user/videos/root_directory/destination/inside_folder"
actual_result : str = script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
self.assertEqual(expected_result, actual_result)
...
# endregion def test_(self):
# region def test_(self):
def test_both_root_directory_and_file_or_folder_to_move_have_trailing_slashes(self):
"""
test: given a root directory that is absolute path with a trailing slash,
and both other parameters
relative paths to directories in the root directory, result should be
root directory + / + destination_directory + / + file_or_folder_to_move
"""
root_directory : str = "/home/user/videos/root_directory/"
destination_directory : str = "destination"
file_or_folder_to_move : str = "inside_folder/"
expected_result : str = "/home/user/videos/root_directory/destination/inside_folder"
actual_result : str = script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
self.assertEqual(expected_result, actual_result)
...
# endregion def test_(self):
# region def test_(self):
def test_both_root_directory_and_destination_directory_have_trailing_slashes(self):
"""
test: given a root directory that is absolute path with a trailing slash,
and both other parameters
relative paths to directories in the root directory, result should be
root directory + / + destination_directory + / + file_or_folder_to_move
"""
root_directory : str = "/home/user/videos/root_directory/"
destination_directory : str = "destination/"
file_or_folder_to_move : str = "inside_folder"
expected_result : str = "/home/user/videos/root_directory/destination/inside_folder"
actual_result : str = script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
self.assertEqual(expected_result, actual_result)
...
# endregion def test_(self):
# region def test_root_directory_relative_path_raises_ValueError(self):
def test_root_directory_relative_path_raises_value_error(self):
"""
test: if root_directory is not absolute path raise exception ValueError
"""
root_directory : str = "root_directory"
destination_directory : str = "destination"
file_or_folder_to_move : str = "inside_folder"
with self.assertRaises(ValueError) as error:
script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
...
...
self.assertEqual(str(error.exception), 'root_directory is not absolute path')
# endregion def test_root_directory_relative_path_raises_ValueError(self):
# region def test_destination_directory_relative_path_raises_value_error(self):
def test_destination_directory_relative_path_raises_value_error(self):
"""
test: if destination_directory is absolute path raise exception ValueError
"""
root_directory : str = "/home/user/videos/root_directory"
file_or_folder_to_move : str = "inside_folder"
destination_directory : str = "/destination"
with self.assertRaises(ValueError) as error:
script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
...
...
self.assertEqual(str(error.exception), 'destination_directory is absolute path')
# endregion def test_destination_directory_relative_path_raises_value_error(self):
# region def test_file_or_folder_to_move_relative_path_raises_value_error(self):
def test_file_or_folder_to_move_relative_path_raises_value_error(self):
"""
test: if file_or_folder_to_move is absolute path raise exception ValueError
"""
root_directory : str = "/home/user/videos/root_directory"
file_or_folder_to_move : str = "/inside_folder"
destination_directory : str = "destination"
with self.assertRaises(ValueError) as error:
script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
...
...
self.assertEqual(str(error.exception), 'file_or_folder_to_move is absolute path')
# endregion def test_file_or_folder_to_move_relative_path_raises_value_error(self):
# region def test_file_or_folder_to_move_relative_path_raises_value_error(self):
def test_root_directory_relative_path_with_double_dots_raises_value_error(self):
"""
test: if root_directory is absolute with ../ at the start path raise exception ValueError
"""
root_directory : str = "../home/user/videos/root_directory"
file_or_folder_to_move : str = "inside_folder"
destination_directory : str = "destination"
with self.assertRaises(ValueError) as error:
script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
...
...
self.assertEqual(str(error.exception), 'root_directory is not absolute path')
# endregion def test_file_or_folder_to_move_relative_path_raises_value_error(self):
# region def test_file_or_folder_to_move_relative_path_raises_value_error(self):
def test_file_or_folder_to_move_relative_path_with_double_dots_raises_value_error(self):
"""
test: if file_or_folder_to_move is absolute path raise exception ValueError
"""
root_directory : str = "/home/user/videos/root_directory"
file_or_folder_to_move : str = "../inside_folder"
destination_directory : str = "destination"
with self.assertRaises(ValueError) as error:
script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
...
...
self.assertEqual(str(error.exception), 'found ../ in result path, that is not supported')
# endregion def test_file_or_folder_to_move_relative_path_raises_value_error(self):
# region def test_destination_directory_relative_path_with_double_dots_raises_value_error(self):
def test_destination_directory_relative_path_with_double_dots_raises_value_error(self):
"""
test: if destination_directory is absolute path raise exception ValueError
"""
root_directory : str = "/home/user/videos/root_directory"
file_or_folder_to_move : str = "inside_folder"
destination_directory : str = "../destination"
with self.assertRaises(ValueError) as error:
script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
...
...
self.assertEqual(str(error.exception), 'found ../ in result path, that is not supported')
# endregion def test_destination_directory_relative_path_with_double_dots_raises_value_error(self):
# region def test_destination_directory_relative_path_with_double_dots_raises_value_error(self):
def test_2_parameters_are_relative_path_with_double_dots_raises_value_error(self):
"""
test: if destination_directory is absolute path raise exception ValueError
"""
root_directory : str = "/home/user/videos/root_directory"
file_or_folder_to_move : str = "../inside_folder"
destination_directory : str = "../destination"
with self.assertRaises(ValueError) as error:
script.get_absolute_path_before_moving_to_directory(root_directory,
destination_directory,
file_or_folder_to_move,
debug_function=True)
...
...
self.assertEqual(str(error.exception), 'found ../ in result path, that is not supported')
# endregion def test_destination_directory_relative_path_with_double_dots_raises_value_error(self):
# DONE test when root_directory is in the format ../something
# DONE test when file_or_folder_to_move is in the format ../something
# DONE test when destination_directory is in the format ../something
# TODO handle the case where 2 of the parameters have ../something
# TODO test when file_or_folder_to_move have more than one element like folder/inside_folder
# should raise exception
# TODO test when destination_directory have more than one element like folder/inside_folder
# should raise exception
...
# endregion class TestGetAbsolutePathAfterMovingToDirectory(unittest.TestCase):
if __name__ == "__main__":
unittest.main()
```
#### File: helpers/org_log/org_log.py
```python
import os
import inspect
import tokenize
import contextlib
from varname import nameof
from colorama import Fore
from colorama import Style
from datetime import datetime
DEBUG = False
DEBUG = True # comment to turn off
LOGIC_PRINTS = Fore.LIGHTBLACK_EX
STRING_TO_PRINT = ""
DEBUG_LOGIC = False
DEBUG_LOGIC = True # comment to turn off
# DONE get the name of a variable in a string
# see if the name changes whether its inside a function or not
#
# importing: from varname import nameof
#
# use: nameof(variable)
#
# if used inside a function on a parameter variable it gets the name of the parameter variable
"""
https://stackoverflow.com/questions/18425225/getting-the-name-of-a-variable-as-a-string
"""
# DONE get nesting level or indentation level
"""
https://stackoverflow.com/questions/39172306/can-a-line-of-python-code-know-its-indentation-nesting-level
"""
def get_indentation_level():
""" get indentation level of the place where it is called in the script
Parameters:
None
Returns:
indentation_level (int): the indentation level
"""
caller_frame = inspect.currentframe().f_back
filename, caller_lineno, _, _, _ = inspect.getframeinfo(caller_frame)
with open(filename) as f_n:
indentation_level = 0
for token_record in tokenize.generate_tokens(f_n.readline):
token_type, _, (token_lineno, _), _, _ = token_record
if token_lineno > caller_lineno:
break
elif token_type == tokenize.INDENT:
indentation_level += 1
elif token_type == tokenize.DEDENT:
indentation_level -= 1
return indentation_level
# TODO refactor the class to only print to the file once and keep a sting in memory
# printing is taking too long, because of all opening and closing of files
# TODO add line numbers to the functions, and print after the *
class OrgLogger:
"""
Create an Org file with the statements of the script
"""
# def __init__(self) -> None:
# dt_string = datetime.now().strftime("%Y %m %d - %H:%M:%S.%f")
# script_name : str = os.path.basename(__file__)
# cwd = os.getcwd()
# exec_for_folder = '/executions : ' + script_name + '/'
# path : str = cwd + exec_for_folder
# if not os.path.isdir(path):
# os.mkdir(path)
# ...
# self.org_file_path : str = path + dt_string + '.org'
# self.org_file = open(self.org_file_path, 'w+')
# self.add_line("#+TODO: IF_BEGIN IF_END | ")
# self.add_line("#+TODO: ELSE_BEGIN ELSE_END | ")
# self.add_line("#+TODO: FOR_BEGIN FOR_END | ")
# self.add_line("#+TODO: WHILE_BEGIN WHILE_END | ")
# self.add_line("#+TODO: FUNC_BEGIN FUNC_END | ")
# self.add_line("#+TODO: SWITCH_BEGIN SWITCH_END | ")
# #+TODO: TODO | DONE
# #+TODO: REPORT BUG KNOWNCAUSE | FIXED
# #+TODO: | CANCELED
# self.org_file.close()
def __init__(self,
cwd = os.getcwd(),
script_name : str = os.path.basename(__file__)) -> None:
dt_string = datetime.now().strftime("%Y %m %d - %H:%M:%S.%f")
self.start_time = dt_string
self.script_name : str = script_name
self.cwd = cwd
exec_for_folder = '/executions : ' + script_name + '/'
path : str = cwd + exec_for_folder
if not os.path.isdir(path):
os.mkdir(path)
...
self.lines : str = ""
self.org_file_path : str = path + dt_string + '.org'
self.add_line("#+TODO: IF_BEGIN IF_END | ")
self.add_line("#+TODO: ELSE_BEGIN ELSE_END | ")
self.add_line("#+TODO: FOR_BEGIN FOR_END | ")
self.add_line("#+TODO: WHILE_BEGIN WHILE_END | ")
self.add_line("#+TODO: FUNC_BEGIN FUNC_END | ")
self.add_line("#+TODO: SWITCH_BEGIN SWITCH_END | ")
#+TODO: TODO | DONE
#+TODO: REPORT BUG KNOWNCAUSE | FIXED
#+TODO: | CANCELED
def add_line(self,
line_to_add : str
) -> None:
"""
add line to the org file with a new line character at the end
"""
self.lines += line_to_add
# self.org_file = open(self.org_file_path, 'a+')
# self.org_file.write(line_to_add + '\n')
# self.org_file.close()
def print_to_file(self):
"""
print all lines to
"""
dt_string = datetime.now().strftime("%Y %m %d - %H:%M:%S.%f")
end_time = dt_string
# d1 = datetime.datetime.strptime('2011:10:01:10:30:00', '%Y:%m:%d:%H:%M:%S')
# d2 = datetime.datetime.strptime('2011:10:01:11:15:00', '%Y:%m:%d:%H:%M:%S')
# diff = (d2 - d1).total_seconds() / 60
d1 = datetime.strptime(self.start_time, "%Y %m %d - %H:%M:%S.%f")
d2 = datetime.strptime(end_time, "%Y %m %d - %H:%M:%S.%f")
diff = (d2 - d1)
self.lines += "\n* time to run script: " + str(diff) + "\n"
org_file = open(self.org_file_path, 'w')
org_file.write(self.lines)
org_file.close()
...
def add_blank_line(self) -> None:
"""
add line to the org file with a new line character at the end
"""
self.lines += "\n"
def add_variable(self,
variable_value,
variable_name : str,
indentation : int,
comment : str = ''
) -> None:
"""
add a variable to the org file
"""
# string of format: * variable_name = variable_value
# * is indentation level
header : str = '*'*int(indentation + 1) + " "
name : str = str(variable_name)
value_local : str = str(variable_value)
value_type : str = str(type(variable_value))
line : str = header + name + ' <- ' + value_local + '\n ( of type ' + value_type + " )"
self.add_line(line)
if comment:
self.add_line(comment)
# TODO print a table of members of an array if it is an array
if type(variable_value) == list:
table = self.table_of_a_list(variable_value, variable_name)
self.add_line(table)
...
...
def add_if_statement(self,
predicate_value,
predicate_name : str,
indentation : int,
comment : str = '',
begin_or_end : str = 'begin',
inside_or_outside : str = 'outside'
) -> None:
"""add if statement to the org file
put inside the else clause
Args:
predicate_value ([type]):
the value of the predicate of the IF statement
predicate_name (str):
the name of the variable that is the predicate
indentation (int):
the indentation level of the calling code
comment (str, optional):
a comment to insert after the header. Defaults to ''.
begin_or_end (str, optional):
wether it begins or ends the statement. Defaults to 'begin'.
inside_or_outside (str, optional):
wether it is inserted (in the calling code) inside or outside the actual statement
"""
# string of format: * predicate_name = predicate_value
# * is indentation level
marker : str = "IF_" + begin_or_end.upper() + " "
tabs_to_add = 1
if inside_or_outside == 'outside':
tabs_to_add = 2
header : str = '*'*int(indentation + tabs_to_add) + " "
line : str = header + marker + str(predicate_name) + ' == ' + str(predicate_value)
if marker == "IF_BEGIN ":
self.add_blank_line()
self.add_line(line)
elif marker == "IF_END ":
self.add_line(line)
self.add_blank_line()
if comment:
self.add_line(comment)
...
def add_else_statement(self,
predicate_value,
predicate_name : str,
indentation : int,
comment : str = '',
begin_or_end : str = 'begin',
inside_or_outside : str = 'outside'
) -> None:
"""add else statement to the org file
put inside the else clause
Args:
predicate_value ([type]):
the value of the predicate of the IF statement
predicate_name (str):
the name of the variable that is the predicate
indentation (int):
the indentation level of the calling code
comment (str, optional):
a comment to insert after the header. Defaults to ''.
begin_or_end (str, optional):
wether it begins or ends the statement. Defaults to 'begin'.
inside_or_outside (str, optional):
wether it is inserted (in the calling code) inside or outside the actual statement
"""
marker : str = "ELSE_" + begin_or_end.upper() + " "
tabs_to_add = 1
if inside_or_outside == 'outside':
tabs_to_add = 2
header : str = '*'*int(indentation + tabs_to_add) + " "
line : str = header + marker + str(predicate_name) + ' == ' + str(predicate_value)
if marker == "ELSE_BEGIN ":
self.add_blank_line()
self.add_line(line)
elif marker == "ELSE_END ":
self.add_line(line)
self.add_blank_line()
if comment:
self.add_line(comment)
...
...
# TODO add different functions to be put inside and outside of the loop
# the outside one should print the collection
# the inside one should print that the loop is beginning
# and the iteration counter
# and the variables used inside the loop (passed as an array)
def add_for_statement(self,
collection_value,
collection_name : str,
indentation : int,
comment : str = '',
begin_or_end : str ='begin',
inside_or_outside : str = 'outside'
) -> None:
"""
add an if statement to the org file
"""
# string of format: * predicate_name = predicate_value
# * is indentation level
marker : str = "FOR_" + begin_or_end.upper() + " "
tabs_to_add = 1
if inside_or_outside == 'outside':
tabs_to_add = 2
header : str = '*'*int(indentation + tabs_to_add) + " "
line : str = header + marker + str(collection_name) + ' <- ' + str(collection_value)
collection_formatted = ""
is_list = isinstance(collection_value, list)
is_dict = isinstance(collection_value, dict)
if is_list:
collection_formatted : str = self.table_of_a_list(collection_value,
collection_name)
...
elif is_dict:
collection_formatted : str = self.table_of_a_dictionary(collection_value,
collection_name)
...
if marker == "FOR_BEGIN ":
self.add_blank_line()
self.add_line(line)
self.add_line(collection_formatted)
elif marker == "FOR_END ":
self.add_line(line)
self.add_line(collection_formatted)
self.add_blank_line()
if comment:
self.add_line(comment)
...
def add_while_statement(self,
predicate_value,
predicate_name,
indentation,
comment : str = "",
begin_or_end : str = "begin",
inside_or_outside : str = 'outside'
) -> None :
"""adds a while statement to the org file
Args:
predicate_value (variable):
the predicate used in the while loop
predicate_name (str):
the name of the variable used in the while loop
indentation (int):
the indentation level of the calling code
comment (str, optional):
a comment to be put inside the header in the org file. Defaults to "".
begin_or_end (str, optional):
wether or not it is the begin or end of the loop. Defaults to "begin".
"""
marker : str = "WHILE_" + begin_or_end.upper() + " "
tabs_to_add = 1
if inside_or_outside == 'outside':
tabs_to_add = 2
header : str = '*'*int(indentation + tabs_to_add) + " "
line : str = header + marker + str(predicate_name) + ' <- ' + str(predicate)
if marker == "FOR_BEGIN ":
self.add_blank_line()
self.add_line(line)
elif marker == "FOR_END ":
self.add_line(line)
self.add_blank_line()
if comment:
self.add_line(comment)
...
def add_function_delimeter(self,
function_value,
function_name,
list_of_params : list,
list_of_param_names : list,
indentation : int,
comment : str = "",
begin_or_end : str = "begin",
inside_or_outside : str = 'inside'
) -> None:
"""
add a function to the org file
"""
line : str = ""
for index, _ in enumerate(list_of_params):
param_value : str = str(list_of_params[index])
param_name : str = str(list_of_param_names[index])
line += "| " + param_name + " | " + param_value + " |\n"
...
tabs_to_add : int = 1
if inside_or_outside == 'inside':
tabs_to_add = 2
header : str = '*'*int(indentation + tabs_to_add) + " "
marker : str = "FUNC_" + begin_or_end.upper() + " "
header_line : str = header + marker + str(function_name) + ' <- ' + str(function_value)
self.add_blank_line()
self.add_line(header_line)
self.add_line(line)
self.add_blank_line()
if comment:
self.add_line(comment)
...
def table_of_a_list(self,
list_value,
list_name : str
) -> None:
"""
returns a list(one item per line) of a list, with the list name at the top
"""
restult : str = ""
name : str = self.add_characters_to_both_sides_of_word(list_name, "=")
restult = name + "\n"
# TODO put the type of each item
# TODO pad zeroes
for index_a, item_a in enumerate(list_value):
restult += "| " + list_name + "[" + str(index_a) + "] | " + str(item_a) + " |\n"
...
# TODO format the table so each collung align
return restult
...
def add_characters_to_both_sides_of_word(self,
word : str,
character : str,
max_characters : int = 80,
space_before_and_after : bool = True
) -> str:
"""
add characters to both sides of word, until max characters were reached
"""
result : str = ""
word_length = len(word)
n_of_chars_side = int((max_characters - word_length) / 2)
left, right = n_of_chars_side, n_of_chars_side
if space_before_and_after:
result = " " + (left * character) + " " + word + " " + (right * character) + " "
else:
result = (left * character) + " " + word + " " + (right * character)
return result
...
def table_of_a_dictionary(self,
dict_value,
dict_name : str
) -> None:
"""
returns a list(one item per line) of a list, with the list name at the top
"""
restult : str = ""
name : str = self.add_characters_to_both_sides_of_word(dict_name, "=")
restult = name + "\n"
# TODO pad zeroes
for index_a, item_a in enumerate(dict_value):
restult += "| " + dict_name + "[" + str(index_a) + "] | " + str(item_a) + " |\n"
...
# TODO format the table so each collung align
return restult
...
# TODO add logging of exceptions
# TODO add logging of classes
org_logger = OrgLogger()
# variables
variable_in___main__ = "value of variable_in___main__"
org_logger.add_variable(variable_in___main__,
nameof(variable_in___main__),
get_indentation_level())
# if statements
predicate : bool = True
org_logger.add_if_statement(predicate,
nameof(predicate),
get_indentation_level())
if predicate:
variable_inside_1_indent_if : int = 1
org_logger.add_variable(variable_inside_1_indent_if,
nameof(variable_inside_1_indent_if),
get_indentation_level())
...
org_logger.add_if_statement(predicate,
nameof(predicate),
get_indentation_level(),
begin_or_end='end')
# this is the actual comment in the script
variable_with_comment = "value variable_with_comment"
org_logger.add_variable(variable_in___main__,
nameof(variable_in___main__),
get_indentation_level(),
comment="this is the comment passed to the function to be printed")
# for statements
list_of_items = [1,2,3]
org_logger.add_for_statement(list_of_items,
nameof(list_of_items),
get_indentation_level())
for item in list_of_items:
org_logger.add_variable(item,
nameof(item),
get_indentation_level())
...
org_logger.add_for_statement(list_of_items,
nameof(list_of_items),
get_indentation_level(),
begin_or_end='end')
# dictionary
dictionary_var = {
"key1": "value1",
"key2": "value2",
"k:str,v:list": [1,2,3]
}
org_logger.add_for_statement(dictionary_var,
nameof(dictionary_var),
get_indentation_level())
for key, value in dictionary_var.items():
org_logger.add_variable(key,
nameof(key),
get_indentation_level())
org_logger.add_variable(value,
nameof(value),
get_indentation_level())
...
org_logger.add_for_statement(dictionary_var,
nameof(dictionary_var),
get_indentation_level(),
begin_or_end='end')
# functions
def global_function(parameter_a):
org_logger.add_function_delimeter(global_function,
nameof(global_function),
[parameter_a],
[nameof(parameter_a)],
get_indentation_level()
)
local_variable_inside_global_function = []
org_logger.add_variable(local_variable_inside_global_function,
nameof(local_variable_inside_global_function),
get_indentation_level())
org_logger.add_function_delimeter(global_function,
nameof(global_function),
[parameter_a],
[nameof(parameter_a)],
get_indentation_level(),
begin_or_end="end"
)
...
a = 1
global_function(a)
def delimeters_put_outside_of_function():
...
org_logger.add_function_delimeter(delimeters_put_outside_of_function,
nameof(delimeters_put_outside_of_function),
[],
[],
get_indentation_level()
)
delimeters_put_outside_of_function()
org_logger.add_function_delimeter(delimeters_put_outside_of_function,
nameof(delimeters_put_outside_of_function),
[],
[],
get_indentation_level(),
begin_or_end="end"
)
def main():
""" the main function """
...
if __name__ == "__main__":
main()
...
```
#### File: video-manipulation/delete-concatenated-videos/delete-concatenated-videos.py
```python
import pathlib
import os
path_to_current_directory = pathlib.Path(__file__).parent.absolute()
current_directory_name = os.path.basename(path_to_current_directory )
video_extensions = ['.mp4', '.m4v', '.mkv', '.ts', '.avi', '.webm', '.flv', '.mov', '.wmv', '.vob']
def is_video(name):
is_video = False
for ext in video_extensions:
if name.lower().endswith(ext):
is_video = True
return is_video
# check for video file with the same name as the directory in the emmediate directory
video_path = str(path_to_current_directory) + '/' + current_directory_name + '.mp4'
if os.path.isfile(video_path):
print(video_path)
os.remove(video_path)
# check for video file with the same name as the directory in every sub directory
for root, dirs, files in os.walk(".", topdown=True):
for name in files:
if is_video(name):
if name == str(current_directory_name + ".mp4"):
path = str(path_to_current_directory) + '/' + root[2:] + '/' + name
os.remove(path)
```
#### File: get_all_videos_in_a_directory/tests/unit_tests_get_all_videos_in_a_directory.py
```python
import unittest
import importlib.util # needed for importing scripts using the scripts path
# cSpell:disable
python_scripts_folder_path : str = "/home/jolitp/Projects/automation_scripts/"
# cSpell:enable
subfolder : str = "src/multiple_files_operations/get_all_videos_in_a_directory/"
spec = importlib.util.spec_from_file_location("get_all_videos_in_a_directory",
python_scripts_folder_path + subfolder + "get_all_videos_in_a_directory.py")
get_all_videos_in_a_directory_script = importlib.util.module_from_spec(spec)
spec.loader.exec_module(get_all_videos_in_a_directory_script)
class UnitTest_get_all_videos_in_a_directory(unittest.TestCase):
"""
unit tests for .py
"""
# region tests (...):
# region def (...):
def test_given_a_relative_path_should_raise_ValueError(self):
"""
when the function get_all_videos_in_a_directory(...)
is given a relative path it should raise a ValueError exception.
"""
# setup
relative_path : str = "path"
# act
with self.assertRaises(ValueError) as error:
get_all_videos_in_a_directory_script \
.get_all_videos(relative_path)
# assert
self.assertTrue("directory_path must be an absolute path" in str(error.exception))
# endregion def (...):
# endregion tests (...):
if __name__ == "__main__":
print("get_all_videos_in_a_directory.__main__")
unittest.main()
```
#### File: src/convert_videos/convert_videos.py
```python
import os
import sys
from pathlib import Path
import csv
from collections import Counter
from typing import Tuple
import subprocess
from subprocess import *
import datetime
from natsort import natsorted, ns
from rich.console import Console
from rich.traceback import install as install_rich_traceback
from rich.panel import Panel
import snoop
install_rich_traceback()
MAX_NUMBER_OF_FOLDERS = None
CURRENT_FOLDER = None
NUMBER_OF_VIDEOS_IN_EACH_FOLDER = {}
CONSOLE = Console(record=True)
# TODO put function in a library
# region _remove_dot_from_extension
def _remove_dot_from_extension(
extensions
):
"""remove the dot from an extension
Args:
extensions (str or list): the extension
Returns:
the extension without the dot
"""
if isinstance(extensions, str):
ext : str = extensions
extensions = ext.replace(".","")
return extensions
# endregion _remove_dot_from_extension
# TODO put function in a library
# region filter_files_by_extension
def filter_files_by_extension(
files: list ,
extensions: list
):
"""
filter the files in a list to have only files of the given extensions
Args:
files (list):
the list of files
extensions (list):
the list of extensions
Returns:
filtered_files (list):
the list of files with only files of the given extensions
"""
filtered_files = []
for file in files:
file_ext = os.path.splitext(file)[-1].lower()
file_ext = _remove_dot_from_extension(file_ext)
for extension in extensions:
ext = _remove_dot_from_extension(extension).lower()
# print("ext \n", ext)
# print("file_ext \n", file_ext)
if file_ext == ext:
filtered_files.append(file)
return filtered_files
...
# endregion filter_files_by_extension
# TODO put function in a library
# region filter_subtitles
def filter_subtitles(
files: list
):
"""filter a list of files to contain only subtitle type files
the filtering happens based on the extension of the files
Args:
files (list): the list of files
Returns:
videos (list): the list of subtitles
"""
#cSpell:words ttml dfxp
subtitles_extensions = [
"srt",
"vtt",
"ssa",
"ttml",
"sbv",
"dfxp",
]
return filter_files_by_extension(files, subtitles_extensions)
...
# endregion filter_videos
# TODO put function in a library
# region filter_videos
def filter_videos(
files: list
):
"""filter a list of files to contain only video type files
the filtering happens based on the extension of the files
Args:
files (list): the list of files
Returns:
videos (list): the list of videos
"""
#cSpell:words webm vchd rmvb gifv xvid vidx
video_extensions = [
"WEBM",
"MPG","MP2", "MPEG", "MPE", "MPV",
"OGV","OGG",
"MP4", "M4P", "M4V",
"AVI",
"WMV",
"MOV","QT",
"FLV","SWF",
"F4V","F4P","F4A","F4B",
"VCHD",
"RMVB","RM",
"VOB",
"MKV",
"MTS", "M2TS", "TS",
"MNG",
"GIFV",
"GIF",
"DRC",
"XVID",
"VIDX",
"ASF",
"AMV",
"M2V",
"SVI",
"3GP",
"MXF",
"ROQ",
"NSV",
"3G2",
]
return filter_files_by_extension(files, video_extensions)
...
# endregion filter_videos
# region load_video_infos_csv ========================== load_video_infos_csv
def load_video_infos_csv(
file_path: str,
debug_function: bool = None
):
"""load the values of a .csv file
containing the info of all videos
in the directory
Args:
file_path (str): the file path to the file
debug_function (bool, optional): Defaults to None.
Returns:
(list(dict)): the values from .csv file parsed into a dictionary
"""
# debug_function = True # comment to toggle
if debug_function:
print()
print("START load_video_infos_csv")
print("---------=---------=---------=---------=---------=---------=---------=---------=")
print("| def load_video_infos_csv( |")
print(f"| file_path = {file_path}")
print("| |")
video_info_list = []
with open(file_path, "r") as input_file:
csv_reader = csv.DictReader(input_file)
for ordered_dict in csv_reader:
video_info_list.append(ordered_dict)
if debug_function:
print()
print("return video_infos = [ |")
for element in video_info_list:
print("| { |")
for key in element:
print(f"| '{key}': {element[key]}")
print("| } |")
print("| ]")
print("| } |")
print("---------=---------=---------=---------=---------=---------=---------=---------=")
print("END load_video_infos_csv")
return video_info_list
...
# endregion load_video_infos_csv ------------------- load_video_infos_csv
# region assemble_command ============================= assemble_command
def assemble_command(
src_path:Path,
dst_path:Path,
subtitle_path:Path,
output_dimensions:Tuple[int,int]
):
"""
assemble the ffmpeg command to convert videos
Args:
video_info_list (dict): the data for the videos
"""
# ffmpeg -i input.mkv -c:v libx264 -c:a aac -s 1080:720 -aspect 16:9 output.mkv
# ❯ ffmpeg -y -i input.mkv -c:v libx264 -c:a aac -s 1080:720 -aspect 16:9 -vf subtitles=input.vtt output.mkv
ffmpeg_command = ["ffmpeg"]
ffmpeg_flags = ["-n"]
video_codec_flags = ["-c:v", "libx264"]
audio_codec_flags = ["-c:a", "aac"]
width, height = output_dimensions
dimension_flags = ["-s", f"{width}:{height}"]
subs_flags = []
if subtitle_path:
subs_flags = ["-vf", f"subtitles={subtitle_path}"]
src_flags = ["-i", str(src_path)]
dst_flags = [ str(dst_path)]
pv_command = ["pv", str(src_path), "|"]
ffmpeg_command_pv_additions = ["-i","pipe:0", "-v", "warning"]
final_command = []
final_command.extend(pv_command)
final_command.extend(ffmpeg_command)
final_command.extend(ffmpeg_command_pv_additions)
final_command.extend(ffmpeg_flags)
final_command.extend(src_flags)
final_command.extend(video_codec_flags)
final_command.extend(audio_codec_flags)
final_command.extend(dimension_flags)
final_command.extend(subs_flags)
final_command.extend(dst_flags)
return final_command
# endregion assemble_command -------------------------- assemble_command
# region get_src_paths ================================== get_src_paths
def get_src_paths(src_data):
src_paths = []
for video_info in src_data:
src_path = video_info["path"]
src_basename = Path(src_path)
cwd = Path(os.getcwd())
src_path = cwd / src_basename
src_paths.append(src_path)
...
return src_paths
...
# endregion get_src_paths ------------------------------- get_src_paths
# region are_aspect_ratios_the_same ================== same_aspect_ratio
def are_aspect_ratios_the_same(src_data):
same_ar = True
if src_data:
first_ar = src_data[0]["aspect_ratio"]
# CONSOLE.print(first_ar)
for data in src_data:
if data["aspect_ratio"] != first_ar:
same_ar = False
else:
same_ar = None
return same_ar
# endregion are_aspect_ratios_the_same --------------- same_aspect_ratio
# region get_dst_paths ================================== get_dst_paths
# @snoop
def get_dst_paths(video_info_list):
dst_paths = []
for video_info in video_info_list:
src_path = video_info["path"]
src_path = str(src_path).replace("videos", "converted")
src_basename = Path(src_path)
cwd = Path(os.getcwd())
dst_path = cwd / src_basename
dst_paths.append(dst_path)
...
return dst_paths
...
# endregion get_dst_paths ------------------------------- get_dst_paths
# region most_common_dimension ======================= most_common_dimension
def most_common_dimension(src_data:list):
src_dimmensions_list = []
for data in src_data:
dimmension = data["dimensions"]
src_dimmensions_list.append(dimmension)
c = Counter(src_dimmensions_list)
most_common = c.most_common(1)
most_common_dimension = most_common[0][0]
return most_common_dimension
# endregion most_common_dimension -------------------- most_common_dimension
# region simplify_data ================================== simplify_data
def simplify_data(video_info_list:dict):
src_videos_data = []
for video_info in video_info_list:
src_video_path = Path(video_info["full_path"])
src_video_width = int(video_info["width"])
src_video_height = int(video_info["height"])
src_video_dimensions = (src_video_width, src_video_height)
src_video_ar = float(video_info["aspect_ratio"])
src_video_arf:str = video_info["aspect_ratio_fraction"]
src_video_arf = src_video_arf.split(":")
src_video_arf = (int(src_video_arf[0]), int(src_video_arf[1]))
src_data = {
"path" : src_video_path,
"dimensions" : src_video_dimensions,
"aspect_ratio" : src_video_ar,
"aspect_ratio_fraction" : src_video_arf,
}
src_videos_data.append(src_data)
return src_videos_data
# endregion simplify_data ----------------------------- simplify_data
# region get_subs_path_from_video_path == get_subs_path_from_video_path
def get_subs_path_from_video_path(src_video_path:Path):
cwd = os.getcwd()
videos_folder_path = src_video_path.parent
videos_folder_basename = os.path.basename(videos_folder_path)
subs_folder_basename = Path(videos_folder_basename.replace("videos", "subs"))
subs_folder_path = cwd / subs_folder_basename
subsubtitle_file = None
subs_folder_exists = os.path.exists(subs_folder_path)
if subs_folder_exists:
src_video_basename = os.path.basename(src_video_path)
basename_no_ext = os.path.splitext(src_video_basename)[0]
# search for filename in subs directory
files_in_subfolder = os.listdir(subs_folder_path)
for file in files_in_subfolder:
if basename_no_ext in file:
subsubtitle_file = subs_folder_path / Path(file)
return subsubtitle_file
else:
return None
# endregion get_subs_path_from_video_path -- get_subs_path_from_video_path
# region create_converted_folder ======================= create_converted_folder
def create_converted_folder(videos_folder_path:Path):
videos_folder_basename = os.path.basename(videos_folder_path)
videos_folder_parent = videos_folder_path.parent
converted_basename = videos_folder_basename.replace("videos", "converted")
converted_path = videos_folder_parent / Path(converted_basename)
if not os.path.exists(converted_path):
os.mkdir(converted_path)
# endregion create_converted_folder -------------------- create_converted_folder
# region print_entire_line ================================== print_entire_line
def print_entire_line(
text:str,
foreground:str="white",
background:str="black",
):
console_width = CONSOLE.width
remaining_width = console_width - len(text)
output_style = "[" + foreground + " on " + background + "]"
CONSOLE.print(output_style + \
" "*int(remaining_width/2) + \
text + \
" "*int(remaining_width/2) + \
"[/]")
# endregion print_entire_line ----------------------------------- print_entire_line
# region print_info_panel ============================= print_info_panel
def print_info_panel(
video_data,
basename,
src_path,
dst_path,
subs_path,
output_dimension
):
# print info panel
conversion_info = ""
conversion_info += "Paths:\n"
conversion_info += \
f"[yellow]video name[/]: [cyan]{basename}[/]\n"
conversion_info += \
f"[yellow]source path[/]: [cyan]{src_path}[/]\n"
conversion_info += \
f"[yellow]destination path[/]: [cyan]{dst_path}[/]\n"
conversion_info += \
f"[yellow]subs path[/]: [cyan]{subs_path}[/]\n"
conversion_info += "\nData:\n"
for data in video_data:
conversion_info += \
f"[yellow]{data}[/]: [cyan]{video_data[data]}[/]\n"
conversion_info += \
f"[yellow]output dimension[/]: [cyan]{output_dimension}[/]"
CONSOLE.print(Panel(conversion_info, title="video info"))
...
# endregion print_info_panel --------------------------- print_info_panel
# region print_conversion_summary ================== print_conversion_summary
def print_conversion_summary(folder_path,index):
# print panel with conversion summary
current_video_number = index + 1
videos_converted_string = ""
number_of_videos_in_this_folder = \
NUMBER_OF_VIDEOS_IN_EACH_FOLDER[folder_path]["number_of_videos"]
remaining_number_of_videos = \
number_of_videos_in_this_folder - current_video_number
converted_videos_metter = "●" * (index)
current_video_metter = "◐"
remaining_video_metter = "○" * remaining_number_of_videos
videos_converted_string += converted_videos_metter + \
current_video_metter + remaining_video_metter
CONSOLE.print(Panel(videos_converted_string,
title="videos converted"))
# endregion print_conversion_summary --------------- print_conversion_summary
# region convert_video ==================================== convert_video
def convert_video(
index,
videos_data,
folder_path,
src_paths,
dst_paths,
output_dimension
):
max_videos = len(videos_data)
current_video_number = index + 1
src_path = src_paths[index]
subs_path = get_subs_path_from_video_path(src_path)
dst_path = dst_paths[index]
create_converted_folder(folder_path)
command = assemble_command(src_path,
dst_path,
subs_path,
output_dimension)
cmd_string = subprocess.list2cmdline(command)
src_basename = os.path.basename(src_path)
msg = f"converting: {src_basename}"
print_entire_line(msg, "#03014f", "#cccccc")
print_info_panel(videos_data[index],
src_basename,
src_path,
dst_path,
subs_path,
output_dimension)
new_cmd_string = ""
for part in command:
part = add_random_color(part,min=120)
new_cmd_string += " " + part + "\n"
CONSOLE.print(Panel(new_cmd_string, title="command parts"))
CONSOLE.print(Panel(cmd_string, title="command called"))
print_conversion_summary(folder_path,index)
print_progress_bar(
"folder",
CURRENT_FOLDER,
MAX_NUMBER_OF_FOLDERS,
foreground="bold #000000",
background="#aaaaaa")
print_progress_bar(
"video",
current_video_number,
max_videos,
foreground="bold #000000",
background="#aaaaaa")
os.system(cmd_string)
...
# endregion convert_video ------------------------------- convert_video
# region process_folder ================================== process_folder
def process_folder(
folder_path:Path,
debug_function = None
):
"""do the actual logic of the script on a specified folder
Args:
folder_path (Path): the path to the folder
debug_function (bool, optional): Defaults to None.
"""
# debug_function = True # comment to toggle
if debug_function:
print()
print("START === process_folder() === START ")
print()
print("def process_folder(")
print(f"| folder_path:Path = {folder_path}")
print()
csv_file_path = folder_path / ".generated/video_infos.csv"
csv_file_exists = os.path.isfile(csv_file_path)
videos_data = None
if csv_file_exists:
videos_data = load_video_infos_csv(csv_file_path)
if videos_data:
src_data = simplify_data(videos_data)
src_paths = get_src_paths(src_data)
dst_paths = get_dst_paths(src_data)
are_same_ar = are_aspect_ratios_the_same(src_data)
if are_same_ar:
output_dimension = most_common_dimension(src_data)
for index, _ in enumerate(videos_data):
convert_video(
index,
videos_data,
folder_path,
src_paths,
dst_paths,
output_dimension)
else:
CONSOLE.print("[bold red]the aspect ratio of videos are not the same![/]")
with open(folder_path / "_000_different_aspect_ratio", "w") as file:
file.write("different_aspect_ratio")
output_dimension = most_common_dimension(src_data)
for index, _ in enumerate(videos_data):
convert_video(
index,
videos_data,
folder_path,
src_paths,
dst_paths,
output_dimension)
else:
CONSOLE.print(f"[bold red]there are no videos in {folder_path}[/]")
else:
CONSOLE.print("[bold red]no csv file found at[/]: \n{}"\
.format(csv_file_path))
if debug_function:
print()
print("END === process_folder() === END")
print()
# endregion ============= ----------------------------- process_folder
# TODO add to function library
# region add_color =========================================== add_color
def add_color(color, msg):
return "[" + color + "]" + msg + "[/]"
# endregion add_color ---------------------------------------- add_color
# TODO add to function library
# region add_random_color =========================================== add_random_color
def add_random_color(msg, min=0,max=255):
import random
color = ""
r = lambda: random.randint(min,max)
color = '#%02X%02X%02X' % (r(),r(),r())
return "[" + color + "]" + msg + "[/]"
# endregion add_random_color ---------------------------------------- add_random_color
# region print_progress_bar ============================== print_progress_bar
def print_progress_bar(msg, current_value, total_value, background, foreground):
text = f"{msg} : {current_value}/{total_value}"
console_width = CONSOLE.width
remaining_width = console_width - len(text)
output_style = "[" + foreground + " on " + background + "]"
filled = "▰"
empty = "▱"
section_length = console_width / total_value
filled_length = section_length * current_value
empty_length = console_width - filled_length
CONSOLE.print(output_style + \
" "*int(remaining_width/2) + \
text + \
" "*int(remaining_width/2) + \
"[/]")
CONSOLE.print(output_style + \
filled*int(filled_length)+ \
empty*int(empty_length) + \
"[/]")
# endregion print_progress_bar ----------------------------- print_progress_bar
# region get_number_of_videos_in_folders ============= get_number_of_videos_in_folders
def videos_converted_in_each_folder():
global NUMBER_OF_VIDEOS_IN_EACH_FOLDER
for key,value in NUMBER_OF_VIDEOS_IN_EACH_FOLDER.items():
CONSOLE.print("key:{}\nvalue:{}".format(key,value))
...
...
# endregion get_number_of_videos_in_folders --------- get_number_of_videos_in_folders
# region get_number_of_videos_in_folders ============= get_number_of_videos_in_folders
def get_number_of_videos_in_folders(videos_folders:list):
global NUMBER_OF_VIDEOS_IN_EACH_FOLDER
for video_folder in videos_folders:
this_folder_items = os.listdir(video_folder)
this_folder_videos = filter_videos(this_folder_items)
number_of_videos_in_this_folder = len(this_folder_videos)
data = {
"number_of_videos": number_of_videos_in_this_folder,
}
NUMBER_OF_VIDEOS_IN_EACH_FOLDER[video_folder] = data
...
# endregion get_number_of_videos_in_folders --------- get_number_of_videos_in_folders
# region main ========================================================= main
def main(
):
"""main function from script
"""
cwd = Path(os.getcwd())
all_items = os.listdir(cwd)
all_videos_folders = []
for item in all_items:
if os.path.isdir(item):
if "videos" in item:
folder_path = cwd / Path(item)
all_videos_folders.append(folder_path)
global MAX_NUMBER_OF_FOLDERS
MAX_NUMBER_OF_FOLDERS = len(all_videos_folders)
# for index, folder in enumerate(all_videos_folders):
from rich.progress import Progress
length = len(all_videos_folders)
# TODO put the amount of videos in each folder into a global variable
# to use in the summary before converting
get_number_of_videos_in_folders(all_videos_folders)
for index, folder in enumerate(all_videos_folders):
global CURRENT_FOLDER
CURRENT_FOLDER = index + 1
print_progress_bar(
"folder",
CURRENT_FOLDER,
MAX_NUMBER_OF_FOLDERS,
foreground="bold #000000",
background="#cccccc")
process_folder(folder)
# progress.update(process_folder_task, advance=1)
# endregion main ------------------------------------------------------ main
# region current_test ================================================= current_test
def current_test():
input = [
(1920, 1080),
(1920, 1080),
(1080, 720),
]
CONSOLE.print("input {}".format(input))
expected_output = (1920, 1080)
CONSOLE.print("expected_output {}".format(expected_output))
actual_output = most_common_dimension(input)
CONSOLE.print("actual_output {}".format(actual_output))
assertion = expected_output == actual_output
CONSOLE.print("assertion {}".format(assertion))
# endregion current_test ---------------------------------------------- current_test
# TODO put on library
# region change_background_of_html ============================ change_background_of_html
def change_background_of_html(html_file):
lines = None
with open(html_file, "r") as file:
lines = file.readlines()
for index, current_line in enumerate(lines):
if " background-color: #ffffff;\n" in current_line:
lines[index] = current_line.replace("#ffffff","#000000")
...
...
with open(html_file, "w") as file:
for line in lines:
file.write(line)
...
# endregion change_background_of_html ------------------------- change_background_of_html
# region if __name__ == "__main__": =============== if __name__ == "__main__":
if __name__ == "__main__":
print()
msg = "START call_command_in_all_folders.py START"
print(msg)
print()
main()
cwd = Path(os.getcwd())
now = datetime.datetime.now()
now = str(now).split(".")[0]
save_path = cwd / ".generated" / f"{now}.html"
CONSOLE.print("saving output to:",save_path)
from rich.terminal_theme import TerminalTheme
CONSOLE.save_html(save_path)
change_background_of_html(save_path)
# current_test()
msg = "END call_command_in_all_folders.py END"
print()
print(msg)
print()
# endregion if __name__ == "__main__": ------------ if __name__ == "__main__":
```
#### File: src/separate_torrent_types/separate_torrent_types.py
```python
import os
def main():
cwd = os.getcwd()
print(cwd)
...
if __name__ == "__main__":
print()
msg = "START separate_torrent_types START"
print(msg)
main()
msg = "END separate_torrent_types END"
print(msg)
print()
...
```
#### File: src/separate_torrent_types/tests_separate_torrent_types.py
```python
import separate_torrent_types
# from separate_torrent_types.separate_torrent_types import main
def main():
# separate_torrent_types.main()
...
if __name__ == "__main__":
print()
msg = "START tests_separate_torrent_types START"
print(msg)
main()
msg = "END tests_separate_torrent_types END"
print(msg)
print()
...
```
#### File: src/separate_videos_12_hours/separate_videos_12_hours.py
```python
import os
import csv
from pathlib import Path
import time
import datetime
from natsort import natsorted, ns
from rich.console import Console
from rich.table import Table
from rich.traceback import install
import snoop
install()
CONSOLE = Console(record=True)
# region load_video_infos_csv
# region load_video_infos_csv header
def load_video_infos_csv(
file_path: str,
debug_function: bool = None
):
# endregion load_video_infos_csv header
# region load_video_infos_csv docstring
"""load the values of a .csv file
containing the info of all videos
in the directory
Args:
file_path (str): the file path to the file
debug_function (bool, optional): Defaults to None.
Returns:
(list(dict)): the values from .csv file parsed into a dictionary
"""
# endregion load_video_infos_csv docstring
# region load_video_infos_csv implementation
# debug_function = True # comment to toggle
video_info_list = []
with open(file_path, "r") as input_file:
csv_reader = csv.DictReader(input_file)
for ordered_dict in csv_reader:
video_info_list.append(ordered_dict)
return video_info_list
# endregion load_video_infos_csv implementation
# endregion load_video_infos_csv
# region separate_videos_in_sections
# region separate_videos_in_sections header
# @snoop(watch_explode=['video_data'])
def separate_videos_in_sections(
video_data_list: list
):
# endregion separate_videos_in_sections header
# region separate_videos_in_sections docstring
"""separate the videos in sections of 12 hours or less duration
43200 seconds == 12 hours
Args:
video_data_list (list): the list of video data
Returns:
(list) : a list of sections
"""
# endregion separate_videos_in_sections docstring
# region separate_videos_in_sections implementation
sections = []
# 12 hours in sections is actually 43200
# use less to be sure
_12_hours_in_seconds = 43000
_12_hours_in_string = "12:00:00"
acc_duration = 0.0
section = []
for index, video_data in enumerate(video_data_list):
# order = video_data["alphabetical_order"]
duration = float(video_data["duration_seconds"])
# dur_h = video_data["duration_hours"]
# dur_h_obj = time.strptime(dur_h, '%H:%M:%S')
acc_duration += duration
acc_dur_h = datetime.timedelta(seconds=acc_duration)
if acc_duration < _12_hours_in_seconds:
section.append(index)
last_index = len(video_data_list) -1
if index == last_index:
sections.append(section)
...
else:
sections.append(section)
acc_duration = duration
section = [index]
last_index = len(video_data_list) -1
if index == last_index:
sections.append(section)
...
...
return sections
...
# endregion separate_videos_in_sections implementation
# endregion separate_videos_in_sections
# region main
# @snoop
def main():
cwd = Path(os.getcwd())
print(cwd)
all_nested_videos = []
videos_folder_path = cwd / "videos"
csv_file_path = None
move_locations = []
diretories_to_create = []
videos_folder_exists = os.path.isdir(videos_folder_path)
if videos_folder_exists:
all_nested_videos = os.listdir(videos_folder_path)
csv_file_path = \
videos_folder_path / ".generated/video_infos.csv"
csv_file_exists = os.path.isfile(csv_file_path)
if csv_file_exists:
video_data_list = load_video_infos_csv(csv_file_path)
sections = separate_videos_in_sections(video_data_list)
for index, section in enumerate(sections):
videos_folder_numbered_path = \
Path(str(videos_folder_path) + str(index + 1))
diretories_to_create.append(videos_folder_numbered_path)
for index in section:
element_data = video_data_list[index]
full_path = Path(element_data["full_path"])
parent_path = full_path.parent
basename = Path(os.path.basename(full_path))
src = full_path
dst = videos_folder_numbered_path / basename
move_locations.append((src, dst))
...
...
else:
CONSOLE.print()
CONSOLE.print("[red]There is no videos/ folder[/red]")
CONSOLE.print()
for dir in diretories_to_create:
if not os.path.isdir(dir):
CONSOLE.print("creating directory:\n{}"\
.format(dir))
os.mkdir(dir)
...
else:
CONSOLE.print("trying to create directory:\n{}"\
.format(dir))
CONSOLE.print("directory already exists.", style="bold red")
...
...
for loc in move_locations:
src, dst = loc
basename = os.path.basename(src)
print()
CONSOLE.print("moving \"{}\"\nfrom: \"{}\"\nto: \"{}\""\
.format(basename, src, dst))
os.rename(src, dst)
...
# endregion main
# region if __name__ == "__main__":
if __name__ == "__main__":
print()
msg = "START concatenate_videos.py START"
print(msg)
print()
try:
main()
except:
CONSOLE.print_exception()
msg = "END concatenate_videos.py END"
print()
print(msg)
print()
...
# endregion if __name__ == "__main__":
``` |
{
"source": "jolitti/rizoma-utils",
"score": 3
} |
#### File: src/rizomath/interval.py
```python
from dataclasses import dataclass
@dataclass(frozen=True)
class Interval:
start:int
end:int
def __post_init__(self):
"""Assures correct ordering of start and end"""
a,b = self.start,self.end
object.__setattr__(self,"start",min(a,b))
object.__setattr__(self,"end",max(a,b))
def clone(self) -> "Interval":
return Interval(self.start,self.end)
def get_length(self) -> int:
"""
[DEPRECATED]
Length of the interval, ends included
"""
return len(self)
def __len__(self) -> int:
return self.end - self.start + 1
def __lt__(self,other:"Interval") -> bool:
"""
Is this interval completely contained within other? (no ovelapping border)
"""
if not isinstance(other,Interval): return False
a,b,c,d = self.tup() + other.tup()
return a>c and b<d
def __le__(self,other:"Interval") -> bool:
"""
Is this interval completely contained within other? (allows ovelapping border)
"""
if not isinstance(other,Interval): return False
a,b,c,d = self.tup() + other.tup()
return a>=c and b<=d
def tup(self) -> tuple[int,int]:
"""Get tuple of extremes"""
return (self.start,self.end)
def __and__(self,other:"Interval") -> "Interval":
"""
Get the intersection of the two intervals, as an Interval
Returns None if the two intervals don't intersect
"""
if not isinstance(other,Interval): return None
a,b,c,d = self.tup() + other.tup()
if b<c or d<a: return None
new_min, new_max = max(a,c),min(b,d)
return Interval(new_min,new_max)
def __sub__(self,other:"Interval") -> list["Interval"]:
"""
List of 0-2 intervals that belong to this Interval, but not to the other
"""
if not isinstance(other,Interval): return [] # Exclude non-intervals
if self <= other: return [] # Exclude cases when self is completely in other
a,b,c,d = self.tup() + other.tup() # Get quick refs for interval limits
if b<c or d<a: return [self.clone()] # Cover cases where intvs don't overlap
if other < self: # Cover cases where there's exactly two sub-intervals
return [Interval(a,c-1),Interval(d+1,b)]
else:
if d < b: return [Interval(d+1,b)]
else: return [Interval(a,c-1)]
def segment(self,other:"Interval") -> tuple["Interval","Interval","Interval"]:
"""
Get 3-tuple of (Interval|None) representing (leftmost,a&b,rightmost)
"""
if not isinstance(other,Interval): return (None, self.clone(), None)
a,b,c,d = self.tup() + other.tup()
# TODO
def __str__(self) -> str:
"""Common interval representation (e.g. '1..5')"""
return f"{self.start}..{self.end}"
def str_to_iv(s:str) -> Interval:
"""Turn strings of the type "1..5" into an interval"""
_s = s.strip()
a,b,*l = _s.split("..")
a,b = map(int,[a,b])
return Interval(a,b)
``` |
{
"source": "joliva/nlp-nltk-demo",
"score": 2
} |
#### File: nlp-nltk-demo/code/parse.py
```python
import re
from nlputils import *
from udb import *
###############################################################################
### TDT4 internal format:
###
### format = 'TDT4'
### bmeta = { sfile, ... }
### chunks = []
### chunk = [ text, cmeta={ stype,slang,sorg,date,btopic,ntopic, ... } ]
###
### Notes:
### 1. text: text contained in the <TEXT>..</TEXT> element
### 2. cmeta{}: dictionary for storing chunk meta-data
### 3. bmeta{}: dictionary for storing bundle meta-data
###
###############################################################################
### regex templates to extract fields from TDT4 source data
RX_DOC = "<DOC>(.*?)</DOC>"
RX_STYPE = "<SOURCE_TYPE>(.*?)</SOURCE_TYPE>"
RX_SLANG = "<SOURCE_LANG>(.*?)</SOURCE_LANG>"
RX_SORG = "<SOURCE_ORG>(.*?)</SOURCE_ORG>"
RX_DDATE = "<DOC_DATE>(.*?)</DOC_DATE>"
RX_BTOPIC = "<BROAD_TOPIC>(.*?)</BROAD_TOPIC>"
RX_NTOPIC = "<NARROW_TOPIC>(.*?)</NARROW_TOPIC>"
RX_TEXT = "<TEXT>(.*?)</TEXT>"
###############################################################################
### Parse TDT4 source content and return as a UDB encapsulated TDT4 object
###############################################################################
def parse_to_udb (src_data, src_mdata, options):
# options are unused at this time
bundle = udb()
bundle.format = 'TDT4' # TDT4 internal format
bundle.bmeta = src_mdata
### extract chunks and meta data and insert into UDB bundle
### interate to extract DOC elements
rx_doc = re.compile(RX_DOC,re.DOTALL)
iter = rx_doc.finditer(src_data)
for match in iter:
doc = match.group(1)
chunk=[]
cmeta = {} # cmeta dictionary
### find SOURCE_TYPE element
rx_stype = re.compile(RX_STYPE, re.DOTALL)
stype = rx_stype.search(doc)
if stype != None:
cmeta['stype'] = stype.group(1)
else:
cmeta['stype'] = None
print "Warning: SOURCE_TYPE missing in DOC"
### find SOURCE_LANG element
rx_slang = re.compile(RX_SLANG, re.DOTALL)
slang = rx_slang.search(doc)
if slang != None:
cmeta['slang'] = slang.group(1)
else:
cmeta['slang'] = None
print "Warning: SOURCE_LANG missing in DOC"
### find SOURCE_ORG element
rx_sorg = re.compile(RX_SORG, re.DOTALL)
sorg = rx_sorg.search(doc)
if sorg != None:
cmeta['sorg'] = sorg.group(1)
else:
cmeta['sorg'] = None
print "Warning: SOURCE_ORG missing in DOC"
### find DOC_DATE element
rx_ddate = re.compile(RX_DDATE, re.DOTALL)
ddate = rx_ddate.search(doc)
if ddate != None:
cmeta['ddate'] = ddate.group(1)
else:
cmeta['ddate'] = None
print "Warning: DOC_DATE missing in DOC"
### find BROAD_TOPIC element
rx_btopic = re.compile(RX_BTOPIC, re.DOTALL)
btopic = rx_btopic.search(doc)
if btopic != None:
cmeta['btopic'] = btopic.group(1)
else:
cmeta['btopic'] = None
print "Warning: BROAD_TOPIC missing in DOC"
### find NARROW_TOPIC element
rx_ntopic = re.compile(RX_NTOPIC, re.DOTALL)
ntopic = rx_ntopic.search(doc)
if ntopic != None:
cmeta['ntopic'] = ntopic.group(1)
else:
cmeta['ntopic'] = None
print "Warning: NARROW_TOPIC missing in DOC"
### find TEXT element
rx_text = re.compile(RX_TEXT, re.DOTALL)
text = rx_text.search(doc)
if text != None:
chunk_text = text.group(1)
else:
chunk_text = None
print "Warning: TEXT missing in DOC"
chunk.append(chunk_text)
chunk.append(cmeta)
bundle.chunks.append(chunk)
return bundle
``` |
{
"source": "jolivaresc/corpus",
"score": 2
} |
#### File: Corpus/controllers/default.py
```python
import os,sys
def index():
#response.flash = os.getcwd()
return locals()
def other():
'''
esto iría en index
<form action="{{=URL('other')}}">
Your name?
<input name="y_name"/>
<br>
<input type="submit"/>
</form>
x = request.args
y = request.vars
return "totototoo r=%s %s" % (x,SPAN(y))
path = os.getcwd()
response.flash = path
model_tf = tfIdf()
l = model_tf.search(request.vars.word)
probs={}
for i,j in l:
probs[i]=j
#ver como recibir argumentos
response.flash = 'Frase: '+request.vars.word
message = 'WORD %s!' % (request.vars.word)
return dict(form=sorted(probs.items(),reverse=True))
'''
response.flash = os.getcwd()+'/applications/Corpus'
path=os.getcwd()
sys.path.append(os.getcwd()+'/applications/Corpus/controllers')
sys.path.append(os.getcwd()+'/applications/Corpus/controllers/')
os.chdir(os.getcwd()+'/applications/Corpus/controllers/')
from tfIdf import tfIdf
model = tfIdf()
os.chdir(path)
l = model.search(request.vars.Palabra)
probs={}
for i,j in l:
if i != 0:
probs[i]=j
os.chdir(path)#
message = 'WORD %s!' % (request.vars.Palabra)
return dict(form=sorted(probs.items(),reverse=True))
def infmutua():
response.flash = os.getcwd()
path=os.getcwd()
sys.path.append(os.getcwd()+'/applications/Corpus/controllers')
os.chdir(os.getcwd()+'/applications/Corpus/controllers')
from MI import MI
model = MI()
mi = model.eval(request.vars.Palabra1,request.vars.Palabra2)
message = 'WORD %s %s' % (request.vars.Palabra1,request.vars.Palabra2)
os.chdir(path)
return dict(mi=mi)
def colocaciones():#concordancias
response.flash = os.getcwd()
message=request.vars.Query
path=os.getcwd()
sys.path.append(os.getcwd()+'/applications/Corpus/controllers')
sys.path.append(os.getcwd()+'/applications/Corpus/controllers/')
os.chdir(os.getcwd()+'/applications/Corpus/controllers')
from concordance import get
res = get(request.vars.Query).split('\n')
os.chdir(path)
return dict(res=res)
def feelings():
response.flash = os.getcwd()
message=request.vars.Palabra
path=os.getcwd()
sys.path.append(os.getcwd()+'/applications/Corpus/controllers')
sys.path.append(os.getcwd()+'/applications/Corpus/controllers/')
os.chdir(os.getcwd()+'/applications/Corpus/controllers')
from Bayes import Bayes
model = Bayes()
tmp = model.evalStr(request.vars.Palabra)
os.chdir(path)
return dict(tmp=tmp)
def btn1():
#response.flash= os.getcwd()+'/Backend'
form = SQLFORM.factory(Field('Palabra',requires=IS_NOT_EMPTY())).process()
if form.accepted:
redirect(URL('other',vars={'Palabra':form.vars.Palabra}))
return locals()
def btn2():
form = SQLFORM.factory(Field('Palabra1',requires=IS_NOT_EMPTY()),
Field('Palabra2',requires=IS_NOT_EMPTY())).process()
if form.accepted:
redirect(URL('infmutua',vars={'Palabra1':form.vars.Palabra1,'Palabra2':form.vars.Palabra2}))
return dict(form=form)
def btn3():
form = SQLFORM.factory(Field('Query',requires=IS_NOT_EMPTY())).process()
if form.accepted:
redirect(URL('colocaciones',vars={'Query':form.vars.Query}))
return dict(form=form)
def btn4():
response.flash= os.getcwd()+'/Backend'
form = SQLFORM.factory(Field('Palabra',requires=IS_NOT_EMPTY())).process()
if form.accepted:
redirect(URL('feelings',vars={'Palabra':form.vars.Palabra}))
return locals()
def btn5():
return locals()
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/bulk_register
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
also notice there is http://..../[app]/appadmin/manage/auth to allow administrator to manage users
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
```
#### File: Corpus/controllers/MI.py
```python
from numpy import log2
from pickle import load
"""
* Clase que se encarga de ver la información mutua que hay entre dos tokens
* sirve para determinar si es colocación o no
"""
class MI:
def __init__(self):
self.words = load(open("./models/words.d",'r'))
self.ngrams = load(open("./models/ngrams.d","r"))
self.count = self.count()
def count(self):
cnt = 0
for i in self.words:
cnt += self.words[i]
return cnt
def eval(self,str1,str2):
try:
sup = float(self.ngrams[str1+"_"+str2])/float(self.count)
inf = float(self.words[str1]) * float(self.words[str2])
if inf <= 0 or sup <= 0:
return 0
else:
inf = inf/(float(self.count)*float(self.count))
return log2(sup/inf)
except:
return 0
```
#### File: Corpus/controllers/trainer.py
```python
from collections import defaultdict
from collections import Counter
from numpy import log10
from pickle import dump
def toFile(name,obj):
file = open(name,'w')
dump(obj,file)
file.close()
def p(di,lenWords,classFrec=50,l=0.01):
for i in di:
di[i] = (float(di[i])+l)/(float(classFrec)+(float(lenWords)*l))
return di
corpus = open("./corpus/fullcorpus.vrt",'r').read()
corpus = corpus.split("\n")
N = 100.0
dtf = defaultdict(set)
sarcFrec = set()
positivo = {}
negativo = {}
sarcasmo = {}
words = []
doc = 0
tf = {}
idf = {}
docs = {}
for i in corpus:
if i.startswith('<doc'):
doc +=1
tmp = i.split("\"")
docs[doc] = tmp[1]
tf[str(doc)] = 0
elif i.startswith('</doc'):
nothing = ""
else:
tmp = i.split("\t")
tf[str(doc)] += 1
words.append(tmp[0])
if doc < 51:
if tmp[0] in negativo:
negativo[tmp[0]] += 1
else:
negativo[tmp[0]] = 1
else:
if tmp[0] in positivo:
positivo[tmp[0]] += 1
else:
positivo[tmp[0]] = 1
if tmp[3].__eq__("Sarcasmo"):
sarcFrec.add(doc)
if tmp[0] in sarcasmo:
sarcasmo[tmp[0]] += 1
else:
sarcasmo[tmp[0]] = 1
dtf[tmp[0]].add(doc)
if tmp[0]+'_'+str(doc) in tf:
tf[tmp[0]+'_'+str(doc)] += 1
else:
tf[tmp[0]+'_'+str(doc)] = 1
ngrams = []
for i in range(0,len(words)-1):
ngrams.append(words[i]+"_"+words[i+1])
for i in dtf:
tmp = len(list(dtf[i]))
idf[i] = log10(N/float(tmp))
l = len(list(set(words)))
ngrams = Counter(ngrams)
words = Counter(words)
positivo = p(positivo,l)
negativo = p(negativo,l)
sarcasmo = p(sarcasmo,l,classFrec=len(list(sarcFrec)))
toFile("./models/ngrams.d",ngrams)
toFile("./models/words.d",words)
toFile("./models/tf.d",tf)
toFile("./models/idf.d",idf)
toFile("./models/index.d",docs)
toFile("./models/pos.d",positivo)
toFile("./models/neg.d",negativo)
toFile("./models/sar.d",sarcasmo)
``` |
{
"source": "jolivaresc/Flask_newbie",
"score": 2
} |
#### File: jolivaresc/Flask_newbie/data.py
```python
def Articles():
articles = [
{
'id':1,
'title':'Article one',
'body':'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'author':'jloc',
'create':'15-06-2017'
},
{
'id':2,
'title':'Article two',
'body':'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'author':'colj',
'create':'15-02-2017'
},
{
'id':3,
'title':'Article Three',
'body':'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'author':'jloc',
'create':'10-06-2017'
}
]
return articles
```
#### File: jolivaresc/Flask_newbie/Logger.py
```python
from time import strftime
def Logger(msg,app):
logger = open('logger.dat','a+')
logger.write('['+strftime("%c")+'] '+ msg +'\n')
app.logger.info(msg)
logger.close()
``` |
{
"source": "jolivaresc/simple_perceptron",
"score": 3
} |
#### File: jolivaresc/simple_perceptron/perceptron.py
```python
from random import choice,random
import numpy as np
import matplotlib.pyplot as plt
def set_data(target):
x = [np.array([0,0]),
np.array([0,1]),
np.array([1,0]),
np.array([1,1])]
bias = np.array([1 for _ in range(4)])
# inputs: [x1 | x0 | bias]
inputs = np.column_stack((x,bias))
# return data: [x1 | x0 | bias | target]
return [(np.array(i),j) for i,j in zip(inputs,target)]
heaviside = lambda x: 1 if x >= 0 else -1
def train(target,w,eta=0.1,epochs=40):
# Activation function
errors = []
#w_tmp = []
# Updating weights
for _ in range(epochs):
x,expected = choice(target)
result = np.dot(w,x)
error = expected - heaviside(result)
errors.append(error)
w += eta*error*x
return [w,error]
def predict(inputs,w):
# inputs: X + bias
return 1 if np.dot(inputs,w) >= 0 else -1
def run():
# output for a nand gate
target = np.array([1,1,1,-1])
# Random weights
w = np.array([random() for _ in range(3)])
print("random weights: {0}".format(w))
nand = set_data(target)
w,error = train(nand,w,eta=0.1,epochs=65)
print("weights updated: {0}".format(w))
print("Predicting\tAproximation\tResult")
print("{0}\t\t{1:.5f}\t\t{2}".format([0,0],np.dot([0,0,1],w),predict([0,0,1],w)))
print("{0}\t\t{1:.5f}\t\t{2}".format([0,1],np.dot([0,1,1],w),predict([0,1,1],w)))
print("{0}\t\t{1:.5f}\t\t{2}".format([1,0],np.dot([1,0,1],w),predict([1,0,1],w)))
print("{0}\t\t{1:.5f}\t{2}".format([1,1],np.dot([1,1,1],w),predict([1,1,1],w)))
if __name__ == '__main__':
run()
``` |
{
"source": "joliveros/gdax-websocket",
"score": 2
} |
#### File: gdax-websocket/gdax_websocket/instrument.py
```python
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
from builtins import *
from gdax_websocket import constants
from gdax_websocket.websocket import GdaxWebsocket
from pyee import EventEmitter
import json
import alog
__all__ = ['Instrument']
class Instrument(EventEmitter):
def __init__(self,
symbol='BTC-USD',
channels=[],
shouldAuth=False,
max_table_length=constants.MAX_TABLE_LEN,
websocket=None):
EventEmitter.__init__(self)
self.channels = channels
if max_table_length > 0:
self.max_table_length = max_table_length
else:
self.max_table_length = constants.MAX_TABLE_LEN
self.shouldAuth = shouldAuth
self.symbol = symbol
self.websocket = websocket
self.data = {
'orderBookL2': [],
'instrument': []
}
channels = self.channels
symbol = self.symbol
shouldAuth = self.shouldAuth
websocket = self.websocket
self.websocket = GdaxWebsocket()
self.websocket.connect(
shouldAuth=shouldAuth,
websocket=websocket
)
self.websocket.on('subscribe', self.on_subscribe)
self.websocket.on('latency', self.on_latency)
self.channels = []
self.subscribe_to_channels(channels)
self.subscribe_to_instrument_channels(symbol, channels)
self.secureChannels = []
if shouldAuth:
self.subscribe_to_secure_instrument_channels(symbol, channels)
def on_latency(self, message):
alog.debug("# on_latency")
alog.debug(message)
latency = []
if 'latency' not in self.data:
self.data['latency'] = []
if len(self.data['latency']) > self.max_table_length - 1:
self.data['latency'].pop()
latency.append(message)
self.data['latency'] = latency
# calculate average latency
avg_latency = sum(latency)/len(latency)
self.emit('latency', avg_latency)
alog.debug("## avg latency: %s" % (avg_latency))
def get_latency(self):
return self.data['latency']
def subscribe_to_channels(self, channels):
# Subscribe to all channels by default
for channel in constants.CHANNELS:
if len(channels) > 0 and channel not in channels:
channel = None
if channel:
handler_name = "on_%s" % (channel)
handler = {}
if hasattr(self, handler_name):
handler = getattr(self, handler_name)
else:
handler = self.on_channel
self.websocket.subscribe(channel, handler)
def subscribe_to_secure_channels(self, channels):
# Subscribe to all channels by default
for channel in constants.SECURE_CHANNELS:
if len(channels) > 0 and channel not in channels:
channel = None
if channel:
handler_name = "on_%s" % (channel)
handler = {}
if hasattr(self, handler_name):
handler = getattr(self, handler_name)
else:
handler = self.on_channel
self.websocket.subscribe(channel, handler)
def subscribe_to_instrument_channels(self, symbol, channels):
# Subscribe to all channels by default
for channel in constants.INSTRUMENT_CHANNELS:
if len(channels) > 0 and channel not in channels:
channel = None
if channel:
self.subscribe_actions_for_channel(channel, symbol)
def subscribe_to_secure_instrument_channels(self, symbol, channels):
# Subscribe to all channels by default
for channel in constants.SECURE_INSTRUMENT_CHANNELS:
if len(channels) > 0 and channel not in channels:
channel = None
if channel:
self.subscribe_actions_for_channel(channel, symbol)
def subscribe_actions_for_channel(self, channel, symbol):
for action in constants.ACTIONS:
handler_name = "on_%s" % (channel)
handler = {}
if hasattr(self, handler_name):
handler = getattr(self, handler_name)
else:
handler = self.on_action
self.websocket.subscribe_action(action,
channel,
symbol,
handler)
def on_subscribe(self, channel):
self.channels.append(channel)
def all_channels(self):
allChannels = []
for channel in self.channels:
allChannels.append(channel)
for channel in self.secureChannels:
allChannels.append(channel)
return allChannels
def on_channel(self, message):
alog.debug("#on_channel")
alog.debug(message)
for item in message['data']:
self.prepend_to_table(message['table'], item)
def on_action(self, message):
self.emit('action', message)
return
table = message['table']
data = message['data']
alog.debug("on_action")
action = message['action']
if action == 'delete':
for item in data:
self.delete_from_table(table, item)
elif action == 'update' and 'id' in data[0]:
for item in data:
self.update_item_in_table(table, item)
elif action == 'partial' and 'id' not in data[0]:
self.data[table] = data[0]
elif action == 'insert' and 'id' not in data[0]:
self.update_keys_in_table(table, data[0])
elif action == 'partial' or action == 'insert':
for item in data:
self.prepend_to_table(table, item)
else:
self.update_keys_in_table(table, data[0])
self.emit(table, table, self.get_table(table))
def update_keys_in_table(self, table, update):
self.data[table].update(update)
def delete_from_table(self, table, item):
alog.debug('#delete_from_table:%s' % (table))
alog.debug(item)
if table not in self.data:
self.data[table] = []
delete_item = next(_item for _item in self.data['orderBookL2']
if _item['id'] == item['id'])
if delete_item:
self.data[table].remove(delete_item)
def prepend_to_table(self, table, item):
if table not in self.data:
self.data[table] = []
isMaxLength = len(self.data[table]) == self.max_table_length
if isMaxLength and 'orderBook' not in table:
self.data[table].pop()
self.data[table].insert(0, item)
alog.debug('#prepend_to_table')
alog.debug(self.data[table])
def update_item_in_table(self, table, update):
alog.debug("# update_item_in_table")
alog.debug(json.dumps(update))
item_to_update = next(item for item in self.data[table]
if item['id'] == update['id'])
item_to_update.update(update)
def get_table(self, table):
return self.data[table]
def update_instrument(self, action, data):
alog.debug(data)
self.data['instrument'] = data[0]
``` |
{
"source": "joliveros/keras-rl2",
"score": 3
} |
#### File: examples/dqn_orderbook/processor.py
```python
import numpy as np
from rl.core import Processor
class OrderBookFrameProcessor(Processor):
# def process_observation(self, observation):
# assert observation.ndim == 3 # (height, width, channel)
# img = Image.fromarray(observation)
# img = img.resize(INPUT_SHAPE).convert('L') # resize and convert to grayscale
# processed_observation = np.array(img)
# assert processed_observation.shape == INPUT_SHAPE
# return processed_observation.astype('uint8') # saves storage in experience memory
#
# def process_state_batch(self, batch):
# # We could perform this processing step in `process_observation`. In this case, however,
# # we would need to store a `float32` array instead, which is 4x more memory intensive than
# # an `uint8` array. This matters if we store 1M observations.
# processed_batch = batch.astype('float32') / 255.
# return processed_batch
def process_reward(self, reward):
return np.clip(reward, -1., 1.)
``` |
{
"source": "jolks/tfx",
"score": 2
} |
#### File: components/bulk_inferrer/executor_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from google.protobuf import json_format
from tensorflow_serving.apis import prediction_log_pb2
from tfx.components.bulk_inferrer import executor
from tfx.proto import bulk_inferrer_pb2
from tfx.types import standard_artifacts
class ExecutorTest(tf.test.TestCase):
def setUp(self):
super(ExecutorTest, self).setUp()
self._source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
self._output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
self.component_id = 'test_component'
# Create input dict.
self._examples = standard_artifacts.Examples(split='unlabelled')
self._examples.uri = os.path.join(self._source_data_dir,
'csv_example_gen/unlabelled/')
self._model = standard_artifacts.Model()
self._model.uri = os.path.join(self._source_data_dir, 'trainer/current/')
self._model_blessing = standard_artifacts.ModelBlessing()
self._model_blessing.uri = os.path.join(self._source_data_dir,
'model_validator/blessed')
self._model_blessing.set_int_custom_property('blessed', 1)
self._inference_result = standard_artifacts.InferenceResult()
self._prediction_log_dir = os.path.join(self._output_data_dir,
'prediction_logs')
self._inference_result.uri = self._prediction_log_dir
# Create context
self._tmp_dir = os.path.join(self._output_data_dir, '.temp')
self._context = executor.Executor.Context(
tmp_dir=self._tmp_dir, unique_id='2')
def _get_results(self, prediction_log_path):
results = []
filepattern = os.path.join(
prediction_log_path,
executor._PREDICTION_LOGS_DIR_NAME) + '-?????-of-?????.gz'
for f in tf.io.gfile.glob(filepattern):
record_iterator = tf.compat.v1.python_io.tf_record_iterator(
path=f,
options=tf.compat.v1.python_io.TFRecordOptions(
tf.compat.v1.python_io.TFRecordCompressionType.GZIP))
for record_string in record_iterator:
prediction_log = prediction_log_pb2.PredictionLog()
prediction_log.MergeFromString(record_string)
results.append(prediction_log)
return results
def testDoWithBlessedModel(self):
input_dict = {
'examples': [self._examples],
'model': [self._model],
'model_blessing': [self._model_blessing],
}
output_dict = {
'inference_result': [self._inference_result],
}
# Create exe properties.
exec_properties = {
'data_spec':
json_format.MessageToJson(bulk_inferrer_pb2.DataSpec()),
'model_spec':
json_format.MessageToJson(bulk_inferrer_pb2.ModelSpec()),
'component_id':
self.component_id,
}
# Run executor.
bulk_inferrer = executor.Executor(self._context)
bulk_inferrer.Do(input_dict, output_dict, exec_properties)
# Check outputs.
self.assertTrue(tf.io.gfile.exists(self._prediction_log_dir))
results = self._get_results(self._prediction_log_dir)
self.assertTrue(results)
self.assertEqual(
len(results[0].classify_log.response.result.classifications), 1)
self.assertEqual(
len(results[0].classify_log.response.result.classifications[0].classes),
2)
if __name__ == '__main__':
tf.test.main()
``` |
{
"source": "joll05/AdventOfCode2019",
"score": 3
} |
#### File: AdventOfCode2019/Day 11/solution1.py
```python
import computer
import numpy as np
import time
Position = (0, 0)
Canvas = np.full([200, 200], -1, dtype=int)
Canvas[0, 0] = 1
Corners = [(0, 0), (0, 0)]
TileCount = 0
Direction = 0
def AddVectors(vec1, vec2):
if(len(vec1) != len(vec2)):
return None
out = []
for v in range(len(vec1)):
out += [vec1[v] + vec2[v]]
return tuple(out)
def SendInput():
global Canvas
global Position
if(Canvas[Position] == 1):
return 1
else:
return 0
def MoveRobot():
global Direction
global Position
global Corners
if(Direction == 0):
Position = AddVectors(Position, (0, 1))
elif(Direction == 1):
Position = AddVectors(Position, (1, 0))
elif(Direction == 2):
Position = AddVectors(Position, (0, -1))
elif(Direction == 3):
Position = AddVectors(Position, (-1, 0))
print(Position)
if(Position[0] < Corners[0][0] or Position[1] < Corners[0][1]):
Corners[0] = Position
elif(Position[0] > Corners[1][0] or Position[1] > Corners[1][1]):
Corners[1] = Position
Turning = False
def RecieveOutput(out):
global Turning
global Direction
global Canvas
global Position
global TileCount
if(not Turning):
if(Canvas[Position] == -1):
TileCount += 1
Canvas[Position] = out
else:
if(out == 0):
Direction -= 1
else:
Direction += 1
if(Direction < 0):
Direction += 4
elif(Direction > 3):
Direction -= 4
MoveRobot()
Turning = not Turning
computer.Run(RecieveOutput, SendInput)
blackChar = u"\u25A0"
whiteChar = u"\u25A1"
for x in range(Corners[0][0] - 1, Corners[1][0] + 2):
out = ""
for y in range(Corners[0][1] - 1, Corners[1][1] + 2):
if(Canvas[x, y] == 1):
out += whiteChar
else:
out += blackChar
print(out)
time.sleep(0.2)
```
#### File: AdventOfCode2019/Day 12/soultion1.py
```python
class Moon:
position = [None, None, None]
velocity = [None, None, None]
def __init__(self, position):
self.position = list(position)
self.velocity = [0] * len(position)
def ApplyGravity(self, moons):
for moon in moons:
for coord in range(len(moon.position)):
if(self.position[coord] < moon.position[coord]):
self.velocity[coord] += 1
elif(self.position[coord] > moon.position[coord]):
self.velocity[coord] -= 1
def ApplyVelocity(self):
for coord in range(len(self.velocity)):
self.position[coord] += self.velocity[coord]
def GetEnergy(self):
pot = 0
for pos in self.position:
pot += abs(pos)
kin = 0
for vel in self.velocity:
kin += abs(vel)
return pot * kin
f = open("input.txt")
data = f.readlines()
Moons = []
for i in data:
coords = i[1:-2]
coords = coords.split(", ")
for coord in range(len(coords)):
coords[coord] = int(coords[coord][2:])
Moons += [Moon(coords)]
def TimeStep():
global Moons
for moon in Moons:
moon.ApplyGravity(Moons)
for moon in Moons:
moon.ApplyVelocity()
for i in range(1000):
TimeStep()
totalEnergy = 0
for moon in Moons:
totalEnergy += moon.GetEnergy()
print(totalEnergy)
```
#### File: AdventOfCode2019/Day 19/solution1.py
```python
import computer
output = ""
total = 0
x = 0
y = 0
def RecieveOutput(out):
global output
global total
if(out == 0):
output += "."
return
if(out == 1):
output += "#"
total += 1
return
inputIndex = 0
def SendInput():
global inputIndex
inputIndex += 1
if(inputIndex % 2 == 1):
return x
else:
return y
for X in range(200):
for Y in range(200):
x = X
y = Y
computer.Run(RecieveOutput, SendInput)
output += "\n"
print(output)
print(total)
```
#### File: AdventOfCode2019/Day 25/solution1.py
```python
import computer
def GetCommand():
userInput = input()
return userInput + "\n"
def RecieveOutput(out):
print(chr(out), end="")
command = ""
def SendInput():
global command
if(command == ""):
command = GetCommand()
currentChar = command[0]
command = command[1:]
return ord(currentChar)
computer.Run(RecieveOutput, SendInput)
```
#### File: AdventOfCode2019/Day 5/solution2.py
```python
import inspect
f = open("input.txt", "r")
values = f.read().split(",")
pointer = 0
def opcode1(p1, p2, out):
values[out] = str(p1 + p2)
return pointer + 4
def opcode2(p1, p2, out):
values[out] = str(p1 * p2)
return pointer + 4
def opcode3(out):
values[out] = input("Input required: ")
return pointer + 2
def opcode4(p1):
print(p1)
return pointer + 2
def opcode5(p1, p2):
if(p1 != 0):
return p2
return pointer + 3
def opcode6(p1, p2):
if(p1 == 0):
return p2
return pointer + 3
def opcode7(p1, p2, out):
if(p1 < p2):
values[out] = "1"
else:
values[out] = "0"
return pointer + 4
def opcode8(p1, p2, out):
if(p1 == p2):
values[out] = "1"
else:
values[out] = "0"
return pointer + 4
def opcode99():
exit()
while True:
opcode = int(values[pointer][-2:])
func = "opcode%d" % opcode
modes = list(reversed(values[pointer][:-2]))
params = inspect.getfullargspec(eval(func))[0]
args = ""
for i in range(len(params)):
currentValue = values[pointer + 1 + i]
mode = 0
if(params[i] != "out"):
try:
mode = int(modes[i])
except IndexError:
pass
else:
mode = 1
if(i != 0):
args += ","
if(mode == 0):
args += values[int(currentValue)]
elif(mode == 1):
args += currentValue
pointer = eval(func + "(%s)" % args)
```
#### File: AdventOfCode2019/Day 6/solution1.py
```python
f = open("input.txt", "r")
puzzleInput = f.read()
totalOrbitCount = 0
def FindOrbiters(name, orbitCount):
global totalOrbitCount
totalOrbitCount += orbitCount
checkPos = 0
while True:
out = puzzleInput.find(name + ")", checkPos)
if(out == -1):
break
FindOrbiters(puzzleInput[out + 4:out + 7], orbitCount + 1)
checkPos = out + 4
FindOrbiters("COM", 0)
print(totalOrbitCount)
```
#### File: AdventOfCode2019/Day 7/solution2.py
```python
import computer
import itertools
possibleOrders = list(itertools.permutations(range(5)))
inputs = [0, 0]
bestResult = 0
def RecieveOutput(output):
global inputs
inputs[1] = output
index = 0
def SendInput():
global index
index += 1
return(inputs[(index - 1) % len(inputs)])
for i in possibleOrders:
inputs = [0, 0]
index = 0
for j in i:
inputs[0] = j
computer.Run(RecieveOutput, SendInput)
result = inputs[1]
if(result > bestResult):
bestResult = result
print(bestResult)
``` |
{
"source": "Jolle08/Number-Dash",
"score": 2
} |
#### File: Jolle08/Number-Dash/main.py
```python
import sys, os, subprocess, time
from random import randint
def clear():
if os.name == "nt":
os.system('cls')
else:
subprocess.call(['clear'])
pass
def startscreen():
clear()
print " . . "
print "b. 8 8 8888 88 ,8. ,8. 8 888888888o 8 8888888888 8 888888888o. "
print "888o. 8 8 8888 88 ,888. ,888. 8 8888 `88. 8 8888 8 8888 `88. "
print "Y88888o. 8 8 8888 88 .`8888. .`8888. 8 8888 `88 8 8888 8 8888 `88 "
print ".`Y888888o. 8 8 8888 88 ,8.`8888. ,8.`8888. 8 8888 ,88 8 8888 8 8888 ,88 "
print "8o. `Y888888o. 8 8 8888 88 ,8'8.`8888,8^8.`8888. 8 8888. ,88' 8 888888888888 8 8888. ,88' "
print "8`Y8o. `Y88888o8 8 8888 88 ,8' `8.`8888' `8.`8888. 8 8888888888 8 8888 8 888888888P' "
print "8 `Y8o. `Y8888 8 8888 88 ,8' `8.`88' `8.`8888. 8 8888 `88. 8 8888 8 8888`8b "
print "8 `Y8o. `Y8 ` 8888 ,8P ,8' `8.`' `8.`8888. 8 8888 88 8 8888 8 8888 `8b. "
print "8 `Y8o.` 8888 ,d8P ,8' `8 `8.`8888. 8 8888 ,88' 8 8888 8 8888 `8b. "
print "8 `Yo `Y88888P' ,8' ` `8.`8888. 8 888888888P 8 888888888888 8 8888 `88. "
print ""
print "8 888888888o. .8. d888888o. 8 8888 8 "
print "8 8888 `^888. .888. .`8888:' `88. 8 8888 8 "
print "8 8888 `88. :88888. 8.`8888. Y8 8 8888 8 "
print "8 8888 `88 . `88888. `8.`8888. 8 8888 8 "
print "8 8888 88 .8. `88888. `8.`8888. 8 8888 8 "
print "8 8888 88 .8`8. `88888. `8.`8888. 8 8888 8 "
print "8 8888 ,88 .8' `8. `88888. `8.`8888. 8 8888888888888 "
print "8 8888 ,88'.8' `8. `88888. 8b `8.`8888. 8 8888 8 "
print "8 8888 ,o88P' .888888888. `88888. `8b. ;8.`8888 8 8888 8 "
print "8 888888888P' .8' `8. `88888. `Y8888P ,88P' 8 8888 8 "
print ""
print ""
print ""
print " _ _ ____ "
print " / \ __ _ __ _ _ __ ___ ___ | |__ _ _ | _ \ __ _ _ __ ___ ___ _ __ __ _ _ __ __ _ "
print " / _ \ / _` |/ _` | '_ ` _ \ / _ \ | '_ \| | | | | |_) / _` | '_ ` _ \ / _ \| '__/ _` | '_ \ / _` |"
print " / ___ \ | (_| | (_| | | | | | | __/ | |_) | |_| | | __/ (_| | | | | | | (_) | | | (_| | | | | (_| |"
print "/_/ \_\ \__, |\__,_|_| |_| |_|\___| |_.__/ \__, | |_| \__,_|_| |_| |_|\___/|_| \__,_|_| |_|\__,_|"
print " |___/ |___/ "
print ""
print "Coded by <NAME> at Pamorana (<EMAIL>)."
print "Copyright (C) 2015 <NAME>. Licensed under The MIT License (MIT)"
print ""
time.sleep(3)
clear()
start(0,0)
def start(rand,maxguess):
maxrand = 1000
if rand == 0:
rand = randint(1, maxrand)
maxguess = 12
print "Guess a number between 1 and %s.\nYou have %s guesses to get it right." % (maxrand,maxguess)
print "====================================================================="
guess = raw_input("Your guess:> ")
while not guess.isdigit():
print "====================================================================="
print "\"%s\" is not an integer." % (guess)
print "====================================================================="
guess = raw_input("Try again:> ")
guess = int(guess)
testinput(rand,guess,maxrand,maxguess)
def testinput(rand,guess,maxrand,maxguess):
if guess > 0 and guess < maxrand + 1:
gameif(rand,guess,maxrand,maxguess)
else:
clear()
error = "Please guess a number which is between 1 and %s!" % (maxrand)
print error.upper()
time.sleep(3)
clear()
start(rand,maxguess)
def gameif(rand,guess,maxrand,maxguess):
if guess == rand:
maxguess = maxguess - 1
clear()
print "You won!"
print "Guesses left: " + str(maxguess)
print "====================================================================="
time.sleep(3)
clear()
start(0,0)
elif guess > rand:
maxguess = maxguess - 1
clear()
print "Smaller than %s!" % (guess)
print "Guesses left: " + str(maxguess)
print "====================================================================="
gamelose(rand,guess,maxrand,maxguess)
elif guess < rand:
maxguess = maxguess - 1
clear()
print "Bigger than %s!" % (guess)
print "Guesses left: " + str(maxguess)
print "====================================================================="
gamelose(rand,guess,maxrand,maxguess)
def gamelose(rand,guess,maxrand,maxguess):
if maxguess == 0:
clear()
print "Game over!"
print "The random number was %s." % (rand)
print "====================================================================="
time.sleep(3)
clear()
start(0)
else:
guess = raw_input("Your guess:> ")
while not guess.isdigit():
print "====================================================================="
print "\"%s\" is not an integer." % (guess)
print "====================================================================="
guess = raw_input("Try again:> ")
guess = int(guess)
testinput(rand,guess,maxrand,maxguess)
startscreen()
``` |
{
"source": "JolleJolles/pyutilspack",
"score": 3
} |
#### File: pyutilspack/pythutils/mathutils.py
```python
from __future__ import division
import math
import numpy as np
def uneven(value):
"""Returns the closest uneven value equal to or lower than provided"""
if value == 0:
newvalue = 1
else:
newvalue = value -1 if value % 2 == 0 else value
return newvalue
def closenr(n, m) :
"""Find the number closest to n and divisible by m"""
q = int(n / m)
n1 = m * q
if((n * m) > 0) :
n2 = (m * (q + 1))
else :
n2 = (m * (q - 1))
if (abs(n - n1) < abs(n - n2)) :
return n1
return n2
def seqcount(start, stop, steplength):
"""
Returns a sequence of numbers between a start and stop value with a
certain steplength
"""
step = (stop - start) / steplength
step = int(np.ceil(step))
sequence = range(start, stop, step)
return sequence
def maxsteps(value, maxval = 500):
"""
Returns the maximum number of steps and stepsize for a value that may be
divided up to a maximum value, all being rounded value. For example, if one
wants to get the maximum number of steps to get to a value of 100 where it
can be maximally divided 7 times, it will return nsteps=5, stepsize=20.
"""
valrange = [value-3,value-2,value-1,value]
nsteps = 1
for val in valrange:
for _,n in enumerate(reversed(list(range(1, maxval)))):
if val % n == 0:
if n > nsteps:
nsteps = int(n)
stepsize = int(val/nsteps)
break
return(nsteps, stepsize)
def get_weights(w = 1.7, length = 20):
"""Returns a list of weights, based on quadratic function"""
return [w**i for i in range(length, 0, -1)]
def sort_twoPoint(coords):
"""Fixes the coordinates of a two-point to be LT to BR"""
x1 = min(coords[0][0], coords[1][0])
y1 = min(coords[0][1], coords[1][1])
x2 = max(coords[0][0], coords[1][0])
y2 = max(coords[0][1], coords[1][1])
return ((x1,y1),(x2,y2))
def sort_points(pts):
"""
Initializes a list of coordinates that will be ordered from top-left to
bottom-left in clockwise order
"""
rect = np.zeros((4, 2), dtype = "float32")
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def points_to_vec(pt1, pt2, flip = False):
"""
Converts the coordinate of two points (pt1 > pt2) to a vector
flip: bool, default = False
If the coordinate system should be flipped such that higher y-coords
are lower (e.g. needed when working with images in opencv).
"""
vx = pt2[0] - pt1[0]
vy = pt1[1] - pt2[1] if flip else pt2[1] - pt1[1]
return vx, vy
def angle_to_vec(angle):
"""
Converts an angle in degrees to a vector. Uses a coordinate system that
points north and ranges from -180 to 180 degrees.
"""
vx = np.round(np.sin(np.radians(angle)), 3)
vy = np.round(np.cos(np.radians(angle)), 3)
return vx, vy
def points_to_angle(pt1, pt2 = None, flip = False):
"""
Returns the angle of a vector from the origin to a single point or the angle
between two points. Uses a coordinate system that points north and ranges
from -180 to 180 degrees.
flip: bool, default = False
If the coordinate system should be flipped such that higher y-coords
are lower (e.g. needed when working with images in opencv).
"""
vx, vy = pt1 if pt2 is None else points_to_vec(pt1, pt2, flip)
angle = np.round(np.arctan2(vx, vy) * 180 / np.pi,2)
return angle
def midpoint(pt1, pt2):
"""Computes the midpoint between two points"""
x = pt2[0]+int((pt1[0]-pt2[0])/2)
y = pt2[1]+int((pt1[1]-pt2[1])/2)
return (x,y)
def ptsToDist(pt1, pt2):
"""Computes the distance between two points"""
if None in pt1 or None in pt2:
dist = None
else:
vx, vy = points_to_vec(pt1, pt2)
dist = np.linalg.norm([(vx, vy)])
return dist
def diff_series(series, period = 1):
"""Shifts a pandas series upwards with a given period"""
series2 = series.shift(periods = -period)
return series2 - series
def dist_to_segment(pt, segment):
"""
Calculates the distance between a point and a non-infinite line segment
Explanation
----------
To calculate the closest distance to a line segment, we first need to check
if the point projects onto the line segment. If it does, then we calculate
the orthogonal distance from the point to the line. If the point does not
project to the line segment, we calculate the distance to both endpoints
and take the shortest distance.
Input
----------
point : tuple of the point's coordinates
line : tuple of segment endpoints' coordinates
Returns
-------
dist : minimum distance to the end point on the line
coords : the relative x and y coordinates to the line
"""
# unit vector
segment = np.array(segment)
uline = segment[1] - segment[0]
norm_uline = uline / np.linalg.norm(uline)
# compute the perpendicular distance to the theoretical infinite line
dott_product = np.cross(segment[1] - segment[0], segment[0] - pt)
dist_infline = (np.linalg.norm(dott_product / np.linalg.norm(uline)))
diff = (norm_uline[0] * (pt[0] - segment[0][0])) + \
(norm_uline[1] * (pt[1] - segment[0][1]))
x_seg = np.round((norm_uline[0] * diff) + segment[0][0])
y_seg = np.round((norm_uline[1] * diff) + segment[0][1])
linept1dis = np.linalg.norm(segment[0] - pt)
linept2dis = np.linalg.norm(segment[1] - pt)
endpoint_dist = min(linept1dis, linept2dis)
# decide if the intersection point falls on the line segment
lp1_x = line[0][0]
lp1_y = line[0][1]
lp2_x = line[1][0]
lp2_y = line[1][1]
is_betw_x = lp1_x <= x_seg <= lp2_x or lp2_x <= x_seg <= lp1_x
is_betw_y = lp1_y <= y_seg <= lp2_y or lp2_y <= y_seg <= lp1_y
if is_betw_x and is_betw_y:
dist = dist_infline
coords = (x_seg, y_seg)
else:
# if not, then return the minimum distance to the segment endpoints
dist = endpoint_dist
coords = (line[0][0],line[0][1]) if linept1dis<=linept2dis else (line[1][0],line[1][1])
return dist, coords
def maxrect(dims, maxdims = (1640,1232), decimals = 2):
"""
Computes the maximal-sized rectangle with the same radio of dimensions as
the rectangle provided, within the maximum dimensions
"""
ratio = float(dims[1])/dims[0]
maxpix = maxdims[0]*maxdims[1]
w = round(math.sqrt(maxpix/ratio),decimals)
h = round(w*ratiod,decimals)
return (w,h)
```
#### File: pyutilspack/pythutils/mediautils.py
```python
from __future__ import division
from __future__ import print_function
import os
import cv2
import numpy as np
from pythutils.fileutils import get_ext
from pythutils.mathutils import closenr, sort_points
def check_media(source, internal=False):
"""Runs some basic checks on a mediafile or stream"""
ext = get_ext(str(source))
ftype = None
if ext in [".mov",".mp4",".avi"]:
ftype = "vid"
if ext in [".jpg", ".png", ".jpeg", ".bmp"]:
ftype = "img"
if type(source) == int:
ftype = "stream"
if ftype == None:
print("File neither video or image file..")
return False
if ftype == "img" or ftype == "vid":
filedir = os.path.dirname(source)
if filedir != "":
if not os.path.isdir(filedir):
print("File directory does not exist..")
return False
if not os.path.isfile(source):
print("File does not exist..")
return False
if ftype == "vid" or ftype == "stream":
cap = cv2.VideoCapture(source)
flag, frame = cap.read()
if not flag:
print("Video source opened but failed to read images..")
return False
if not internal:
print("Mediafile okay.. ", end = "")
return True
def getimg(mediafile):
"""Acquires a numpy array from a video or image"""
try:
cap = cv2.VideoCapture(mediafile)
_, img = cap.read()
except:
img = cv2.imread(mediafile)
return img
def get_vid_params(mediafile):
"""Gets video parameters from file or video instance"""
if type(mediafile) is str:
if get_ext(mediafile) not in [".mov",".mp4",".avi"]:
raise TypeError("File not a video..")
mediafile = cv2.VideoCapture(mediafile)
if not mediafile.read()[0]:
raise RuntimeError("Video could not be read..")
fps = int(mediafile.get(cv2.CAP_PROP_FPS))
width = int(mediafile.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(mediafile.get(cv2.CAP_PROP_FRAME_HEIGHT))
fcount = int(mediafile.get(cv2.CAP_PROP_FRAME_COUNT))
return fps, width, height, fcount
def videowriter(filein, w, h, fps, resizeval = 1):
"""Creates a vidout instance using the opencv VideoWriter class"""
ext = get_ext(filein)
fileout = filein[:-len(ext)]+".mp4" if ext!="" else filein+".mp4"
viddims = (w, h) if resizeval == 1 else (int(w*resizeval), int(h*resizeval))
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
vidout = cv2.VideoWriter(fileout, fourcc, fps, viddims)
return vidout
def safe_framecount(vidfile):
"""Saves video frame counter that counts frame-by-frame"""
cap = cv2.VideoCapture(vidfile)
vidlength = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
count = 0
while True:
ret, frame = cap.read()
if not ret:
break
count += 1
print("video had", vidlength-count, "non-existing frames.. ", end = "")
return count
def crop(image, pt1, pt2=None):
"""Crops image based on based on top left and bottom right corner"""
if pt2 == None:
pt2 = pt1[1]
pt1 = pt1[0]
cropped = image[pt1[1]:pt2[1], pt1[0]:pt2[0]]
return cropped
def fourpt_transform(image, pts):
"""
Perspective transform a section of an image based on four coordinates
to obtain a top-down view
"""
rect = sort_points(pts)
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
dst = np.array([[0, 0], [maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warped
def checkroi(roi, resolution):
"""Make sure roi coordinates are within resolution"""
x1 = max(roi[0][0],1)
y1 = max(roi[0][1],1)
x2 = min(roi[1][0],resolution[0])
y2 = min(roi[1][1],resolution[1])
return ((x1,y1),(x2,y2))
def zoom_to_roi(zoom, resolution):
"""Gets region of interest coordinates from x,y,w,h zoom parameters"""
x1 = int(zoom[0] * resolution[0])
x2 = int((zoom[0]+zoom[2]) * resolution[0])
y1 = int(zoom[1] * resolution[1])
y2 = int((zoom[1]+zoom[3]) * resolution[1])
return ((x1,y1),(x2,y2))
def roi_to_zoom(roi, resolution):
"""Gets x,y,w,h zoom parameters from region of interest coordinates"""
((x1,y1),(x2,y2)) = roi
z0 = round(x1 / resolution[0],2)
z1 = round(y1 / resolution[1],2)
z2 = round((x2-x1) / resolution[0],2)
z3 = round((y2-y1) / resolution[1],2)
return (z0, z1, z2, z3)
def picamconv(resolution, maxres = (1632, 1232)):
"""Adapts video resolution to work with raspberry pi camera"""
width = min(closenr(resolution[0],32), maxres[0])
height = min(closenr(resolution[1],16), maxres[1])
return (width, height)
def fix_vidshape(res1,res2):
"""Compares two resolutions and get missing x and y coords"""
xmin,ymin = 0,0
xmult = (res2[0]/res1[0])
ymult = (res2[1]/res1[1])
if xmult > ymult:
xmin = int((res2[0]-(res1[0]*ymult))/2)
if ymult > xmult:
ymin = int((res2[0]-(res1[0]*xmult))/2)
return xmin, ymin
def newdims(img = None, resize = 1, dims = None):
"""Returns new dimensions of an image array based on resize value"""
if dims is None:
if img is None:
print("No img or dims provided..")
return
else:
dims = (img.shape[1],img.shape[0])
width = int(dims[0] * resize)
height = int(dims[1] * resize)
return (width, height)
def imgresize(img, resize = 1, dims = None, back = False):
"""
Returns resized image based on resizevalue or provided dimensions
Parameters
----------
img : numpy array
resize : float, default = 1
Multiplier for image size
dims : tuple, default = None
Dimensions of the to-be returned image
back : bool, default = False
If the inverse of the resize value should be used
"""
if dims is None:
resize = 1/resize if back else resize
dims = newdims(img, resize)
interpol = cv2.INTER_CUBIC if resize > 1 else cv2.INTER_AREA
img = cv2.resize(img, dims, interpolation = interpol)
return img
def add_transimg(bgimg, transimg, offsets):
"""
Adds a semi-transparent (4-channel) image to a 3-channel background
image. Images need to be arrays.
"""
h, w, c = transimg.shape
fix = np.zeros((h, w, 3), np.uint8)
a = transimg[:, :, 3] / 255 #alpha
o = offsets
fix[:,:,0] = (1.-a)*bgimg[o[1]:o[1]+h, o[0]:o[0]+w, 0]+a*transimg[:,:,0]
fix[:,:,1] = (1.-a)*bgimg[o[1]:o[1]+h, o[0]:o[0]+w, 1]+a*transimg[:,:,1]
fix[:,:,2] = (1.-a)*bgimg[o[1]:o[1]+h, o[0]:o[0]+w, 2]+a*transimg[:,:,2]
bgimg[o[1]:o[1]+h, o[0]:o[0]+w] = fix
return bgimg
``` |
{
"source": "Jollerprutt/sam_common",
"score": 2
} |
#### File: sam_actions/scripts/gps_fix_server.py
```python
import rospy
from rospy import ROSException
from std_msgs.msg import Header, Bool
from std_srvs.srv import SetBool
from geometry_msgs.msg import PoseWithCovarianceStamped, Point, Quaternion
from sensor_msgs.msg import NavSatFix, NavSatStatus
from sam_msgs.msg import GetGPSFixAction, GetGPSFixFeedback, GetGPSFixResult
from sam_msgs.msg import PercentStamped
import actionlib
import tf_conversions
import tf
from tf.transformations import quaternion_from_euler, quaternion_multiply
from geodesy import utm
import math
import numpy as np
class GPSFixServer(object):
_feedback = GetGPSFixFeedback()
_result = GetGPSFixResult()
def __init__(self, name):
self.last_gps_pos = None
self.last_dr_pos = None
self._action_name = name
self._as = actionlib.SimpleActionServer(self._action_name, GetGPSFixAction, execute_cb=self.execute_cb, auto_start=False)
self.pose_pub = rospy.Publisher('/initialpose', PoseWithCovarianceStamped, queue_size=10)
self.lcg_disable_pub = rospy.Publisher('/sam/ctrl/lcg/pid_enable', Bool, queue_size=10)
self.vbs_disable_pub = rospy.Publisher('/sam/ctrl/vbs/pid_enable', Bool, queue_size=10)
self.lcg_pub = rospy.Publisher('/sam/core/lcg_cmd', PercentStamped, queue_size=10)
self.vbs_pub = rospy.Publisher('/sam/core/vbs_cmd', PercentStamped, queue_size=10)
self.listener = tf.TransformListener()
self._as.start()
def start_stop_dvl(self, value, value_string):
try:
rospy.wait_for_service('/sam/core/start_stop_dvl', timeout=3.)
start_stop_dvl = rospy.ServiceProxy('/sam/core/start_stop_dvl', SetBool)
resp = start_stop_dvl(value)
if not resp.success:
self._feedback.status = "Service call returned false, failed to %s dvl" % value_string
rospy.loginfo("Service call returned false, failed to %s dvl", value_string)
except (rospy.ServiceException, ROSException), e:
self._feedback.status = "Service call failed, failed to %s dvl" % value_string
rospy.loginfo("Service call failed: %s, failed to %s dvl", e, value_string)
#finally:
# self._feedback.status = "Did %s dvl" % (value_string)
self._as.publish_feedback(self._feedback)
def estimate_position(self, fixes, covars):
try:
now = rospy.Time(0)
(world_trans, world_rot) = self.listener.lookupTransform("world_utm", "world_local", now)
except (tf.LookupException, tf.ConnectivityException):
self._feedback.status = "Could not get transform between %s and %s" % ("world_utm", "world_local")
rospy.loginfo("Could not get transform between %s and %s" % ("world_utm", "world_local"))
self._as.publish_feedback(self._feedback)
# easting, northing is in world_utm coordinate system,
# we need to transform it to world or world_local
pos = np.zeros((len(fixes), 3))
for i, fix in enumerate(fixes):
utm_point = utm.fromLatLong(fix[0], fix[1])
easting = utm_point.easting
northing = utm_point.northing
utm_zone = utm_point.zone
pos[i, :] = np.array([easting-world_trans[0], northing-world_trans[1], 0.])
# use the cov to weight the means in the future
estimate = np.mean(pos, axis=0)
return estimate
def execute_cb(self, goal):
rospy.loginfo("Got action callback...")
self._feedback.status = "Shutting down controllers and DVL"
self._as.publish_feedback(self._feedback)
header = Header()
timeout = goal.timeout
required_gps_msgs = goal.required_gps_msgs
self.start_stop_dvl(False, "stop")
# Disable controllers
self.vbs_disable_pub.publish(False)
self.lcg_disable_pub.publish(False)
# Sleep to make sure controllers are down
rospy.sleep(0.1)
# Set VBS to 0
self.vbs_pub.publish(0., header)
# Set LCG to 0
self.lcg_pub.publish(0., header)
good_fixes = []
good_vars = [] # NOTE: covariances are in m^2
# Get GPS fixes until we are in a good place
gps_topic = "/sam/core/gps"
start_time = rospy.get_time()
while rospy.get_time() - start_time < timeout and len(good_fixes) < required_gps_msgs:
try:
gps_msg = rospy.wait_for_message(gps_topic, NavSatFix, 3.)
except rospy.ROSException:
rospy.loginfo("Could not get gps message on %s, aborting...", gps_topic)
self._feedback.status = "Could not get gps message on %s..." % gps_topic
self._as.publish_feedback(self._feedback)
continue
if gps_msg.status.status != NavSatStatus.STATUS_NO_FIX:
self._feedback.status = "Good fix, now has %d msgs" % len(good_fixes)
good_fixes.append(np.array([gps_msg.latitude, gps_msg.longitude]))
good_vars.append(np.array([gps_msg.position_covariance[:2], gps_msg.position_covariance[3:5]]))
else:
self._feedback.status = "No fix, now has %d msgs" % len(good_fixes)
self._as.publish_feedback(self._feedback)
if len(good_fixes) < required_gps_msgs:
self._result.status = "Timeout, not enough msgs"
self._as.set_aborted(self._result)
return
else:
self._feedback.status = "Done listening, got %d msgs" % len(good_fixes)
self._as.publish_feedback(self._feedback)
self.start_stop_dvl(True, "start")
gps_pos = self.estimate_position(good_fixes, good_vars)
corrected_rot = [0., 0., 0., 1.] # Start with 0 yaw
if self.last_dr_pos is not None and self.last_gps_pos is not None:
self._feedback.status = "Found previous positions, doing heading estimation"
self._as.publish_feedback(self._feedback)
try:
now = rospy.Time(0)
(dr_trans, dr_rot) = self.listener.lookupTransform("world_local", "sam/base_link", now)
except (tf.LookupException, tf.ConnectivityException):
self._feedback.status = "Could not get transform between %s and %s" % ("world_local", "sam/base_link")
rospy.loginfo("Could not get transform between %s and %s" % ("world_local", "sam/base_link"))
self._as.publish_feedback(self._feedback)
rospy.sleep(0.3)
gps_diff = gps_pos - self.last_gps_pos
#gps_diff = 1./np.linalg.norm(gps_diff)*gps_diff
gps_trajectory_yaw = math.atan2(gps_diff[1], gps_diff[0])
dr_diff = np.array((dr_trans[0] - self.last_dr_pos[0], dr_trans[1] - self.last_dr_pos[1]))
#dr_diff = 1./np.linalg.norm(dr_diff)*dr_diff
dr_trajectory_yaw = math.atan2(dr_diff[1], dr_diff[0])
yaw_correction = gps_trajectory_yaw - dr_trajectory_yaw
# to get the actual yaw, we need to look at the
# the difference in odom between last time and this time
# note that we need to get the new estimated yaw
# after publishing this to get the corrected one
self._feedback.status = "Estimated GPS yaw: %f, DR yaw: %f, Yaw corr: %f" % (gps_trajectory_yaw, dr_trajectory_yaw, yaw_correction)
self._as.publish_feedback(self._feedback)
rospy.sleep(0.3)
corrected_rot = quaternion_multiply(quaternion_from_euler(0., 0., yaw_correction), dr_rot)
self._feedback.status = "Waiting for filter to update"
self._as.publish_feedback(self._feedback)
pose_msg = PoseWithCovarianceStamped()
pose_msg.header = header
pose_msg.header.frame_id = "world_local"
pose_msg.pose.pose.position = Point(*gps_pos.tolist())
pose_msg.pose.pose.orientation = Quaternion(*corrected_rot)
self.pose_pub.publish(pose_msg)
rospy.sleep(.5)
self._feedback.status = "Getting updated pose"
self._as.publish_feedback(self._feedback)
try:
now = rospy.Time(0)
(trans, rot) = self.listener.lookupTransform("world_local", "sam/base_link", now)
self.last_dr_pos = trans
except (tf.LookupException, tf.ConnectivityException):
self._feedback.status = "Could not get transform between %s and %s" % ("world_local", "sam/base_link")
rospy.loginfo("Could not get transform between %s and %s" % ("world_local", "sam/base_link"))
self._as.publish_feedback(self._feedback)
rospy.sleep(0.3)
self.last_gps_pos = gps_pos
self._result.status = "Finished setting position"
self._as.set_succeeded(self._result)
if __name__ == "__main__":
rospy.init_node('gps_fix_server', anonymous=False) #True)
check_server = GPSFixServer(rospy.get_name())
rospy.spin()
``` |
{
"source": "jollescott/hawking-radiation",
"score": 3
} |
#### File: src/manim/scenes.py
```python
from manimlib.imports import *
class VirtualParticles(Scene):
def get_sine_wave(self, dx=0):
return FunctionGraph(
lambda x: np.sin((x+dx)-(PI/2)),
x_min=-4, x_max=4, color=RED
)
def get_cosine_wave(self, dx=0):
return FunctionGraph(
lambda x: np.cos(x+dx),
x_min=-4, x_max=4, color=BLUE
)
def construct(self):
# Waves
sine_function = self.get_sine_wave()
cosine_function = self.get_cosine_wave()
d_theta = ValueTracker(0)
def update_sine_wave(func):
func.become(
self.get_sine_wave(dx=d_theta.get_value())
)
return func
def update_cosine_wave(func):
func.become(
self.get_cosine_wave(dx=d_theta.get_value())
)
return func
sine_function.add_updater(update_sine_wave)
cosine_function.add_updater(update_cosine_wave)
# Particles
anti_particle = Circle(color=RED, fill_color=RED, fill_opacity=1)
anti_particle.scale(0.2)
anti_particle.move_to(DOWN)
particle = Circle(color=BLUE, fill_color=BLUE, fill_opacity=1)
particle.scale(0.2)
particle.move_to(UP)
self.add(sine_function)
self.add(anti_particle)
self.add(cosine_function)
self.add(particle)
self.play(d_theta.increment_value, PI, ApplyMethod(anti_particle.shift, [0, 2, 0]), ApplyMethod(
particle.shift, [0, -2, 0]), rate_func=linear)
self.play(d_theta.increment_value, PI, ApplyMethod(anti_particle.shift, [0, -2, 0]), ApplyMethod(
particle.shift, [0, 2, 0]), rate_func=linear)
```
#### File: pygame-simulation/animation/trail.py
```python
from pygame import draw
from animation import vector
import numpy as np
class trail:
color = (255, 255, 255)
fade_color = (0, 0, 0)
thickness = 1
max_length = 40
min_point_dist = 1
def __init__(self):
self.points = []
self.__current_length = 0
def __insert_point(self, point, add_length):
self.points.insert(0, point)
self.__current_length += add_length
def add_point(self, point):
# add point
if (len(self.points) == 0):
self.__insert_point(point, 0)
else:
add_dist = vector.get_distance(point, self.points[0])
if add_dist >= self.min_point_dist:
self.__insert_point(point, add_dist)
# remove points if trail is too long
while self.__current_length > self.max_length and len(self.points) > 1:
self.__current_length -= vector.get_distance(
self.points[-1],
self.points[-2]
)
self.points.pop(-1)
def get_length(self):
return self.__current_length
def draw(self, surface):
n = len(self.points)
for i in range(n - 1):
point = self.points[i]
next_point = self.points[i + 1]
p = i / n
draw.line(
surface,
(
(1 - p) * self.color[0] + p * self.fade_color[0],
(1 - p) * self.color[1] + p * self.fade_color[1],
(1 - p) * self.color[2] + p * self.fade_color[2],
),
np.array(point, dtype=int),
next_point,
self.thickness
)
```
#### File: animation/world/universe.py
```python
import time
from animation import vector
import random
import math
from animation.world.particle import particle
from animation.world.black_hole import black_hole
import numpy as np
class universe:
min_spawn_time = 0.10
max_spawn_time = 0.5
spawn_on_horizon_chance = 0.15
particle_spawn_velocity = 100
def __init__(self, size, scale):
self._size = size
self.black_hole = black_hole(np.array(size) * 0.5, 0, scale)
print(self.black_hole.get_radius())
self._particles = []
self.__particle_spawn_timer = time.perf_counter()
def update(self, elapsed_time):
# loop thorugh all particles
index = 0
while index < len(self._particles):
p = self._particles[index]
cp = p.connected_particle
# remove if collided
if cp and particle.collide(p, cp):
self._particles.pop(index)
self._particles.remove(cp)
# update particle
p.update(
elapsed_time,
(0, 0) # self.black_hole.calculate_particle_force(p)
)
index += 1
self.__particle_spawn_timer -= elapsed_time
# check if it is time to spawn a particle pair
if self.__particle_spawn_timer <= 0 and self.black_hole.mass > 0:
self._spawn_particle_pair()
self.__particle_spawn_timer = random.uniform(self.min_spawn_time,
self.max_spawn_time)
if self.black_hole.mass > 0:
self.black_hole.update(elapsed_time, self._particles)
def _spawn_particle_pair(self):
"""
Spawns a pair of particles with opposite charge.
"""
on_horizon = random.uniform(0, 1) < self.spawn_on_horizon_chance
particles = (
self._create_on_event_horizon() if on_horizon
else self._create_outside_event_horizon()
)
self._particles.extend(particles)
def _create_on_event_horizon(self):
"""
Returns a pair of particles with opposite charge positioned on the
event horizon.
"""
# spawn center relative to the center of the black hole
spawn_offset = vector.rand_vector_rot(self.black_hole.get_radius())
spawn_center = vector.add(self.black_hole.position, spawn_offset)
rand_rot = random.choice((-1, 1)) * random.uniform(math.pi / 4,
math.pi / 2)
reg_par = particle(
# charge
True,
# position
vector.add(
spawn_center,
vector.change_length(spawn_offset, particle.radius)
),
# velocity
vector.new_vector(
self.particle_spawn_velocity,
vector.get_rotation(spawn_offset) + rand_rot
)
)
neg_par = particle(
# charge
False,
# position
vector.subtract(
spawn_center,
vector.change_length(spawn_offset, particle.radius)
),
# velocity
vector.new_vector(
self.particle_spawn_velocity,
vector.get_rotation(spawn_offset) + math.pi - rand_rot
)
)
reg_par.connected_particle = neg_par
neg_par.connected_particle = reg_par
return (reg_par, neg_par)
def _create_outside_event_horizon(self):
"""
Returns a pair of particles with opposite charge positioned outside the
event horizon.
"""
min_horizon_dist = 40
spawn_center = vector.add(
self.black_hole.position,
vector.rand_vector_rot(
random.uniform(
self.black_hole.get_radius() + min_horizon_dist,
max(self._size) / 2
)
)
)
direction_rot = random.uniform(0, 2 * math.pi)
max_side_velocity = self.particle_spawn_velocity / 2
side_velocity = vector.new_vector(
random.uniform(-max_side_velocity, max_side_velocity),
direction_rot + math.pi / 2
)
reg_par = particle(
# charge
True,
# position
vector.add(
spawn_center,
vector.new_vector(particle.radius + 1, direction_rot)
),
# velocity
vector.add(
vector.new_vector(self.particle_spawn_velocity, direction_rot),
side_velocity
)
)
neg_par = particle(
# charge
False,
# position
vector.add(
spawn_center,
vector.new_vector(particle.radius, direction_rot + math.pi)
),
# velocity
vector.add(
vector.new_vector(
self.particle_spawn_velocity,
direction_rot + math.pi
),
side_velocity
)
)
reg_par.connected_particle = neg_par
neg_par.connected_particle = reg_par
return (reg_par, neg_par)
def draw(self, surface):
self.black_hole.draw(surface)
# draw all particles
for p in self._particles:
p.draw(surface)
``` |
{
"source": "jollescott/mimer",
"score": 2
} |
#### File: management/commands/remind.py
```python
from django.core.management.base import BaseCommand, CommandError
from quiz.models import QuizUser
from webpush import send_user_notification
class Command(BaseCommand):
help = 'Syncs Database with Sana Assets.'
def add_arguments(self, parser):
parser.add_argument('-d', '--debug', action='store_true')
def handle(self, *args, **options):
users = QuizUser.objects.all()
payload = {'head': 'Ditt dagliga test på Grönländska glosor!',
'body': 'Klicka för att påbörja ett nytt test. Tack igen för att du deltar i vårt gymnasiearbete :)'}
for user in users:
try:
send_user_notification(user, payload, 1000)
except BaseException as ex:
print(ex)
``` |
{
"source": "jollierfinbean/bleak_ruuvitag",
"score": 3
} |
#### File: examples/influxdb_collector/main.py
```python
import asyncio
import datetime
from typing import NamedTuple
from bleak_ruuvitag.core import scan
from aioinflux import (
InfluxDBClient,
TAGENUM,
TAG,
FLOAT,
INT,
STR,
lineprotocol
)
influxdb_client_configuration = {
'host': 'influxdb.lan',
'port': 80,
'db': 'mydb',
'username': 'myuser',
'password': '<PASSWORD>'
}
@lineprotocol
class RuuviTagMeasurementTest1(NamedTuple):
name: TAG
temperature: FLOAT
humidity: FLOAT
pressure: INT
voltage: INT
async def main():
client = InfluxDBClient(**influxdb_client_configuration)
async for result in scan(5):
measurement = RuuviTagMeasurementTest1(
name=result.name,
temperature=result.temperature,
humidity=result.humidity,
pressure=result.pressure,
voltage=result.voltage,
)
asyncio.create_task(client.write(measurement))
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
``` |
{
"source": "jollitu/pythonkeeper",
"score": 3
} |
#### File: pythonkeeper/app/app.py
```python
from flask import Flask
pythonkeeper = Flask(__name__)
@pythonkeeper.route("/register")
def register(username, password, email):
return "ABC"
@pythonkeeper.route("/login")
def login(usernameOrEmail, password):
return "<PASSWORD>"
``` |
{
"source": "j-ollivier/showcase_site",
"score": 3
} |
#### File: showcase_site/main/models.py
```python
from django.db import models
from django.utils import timezone
# Create your models here.
###############################################################################
class ContactMessage(models.Model):
'''
If a customer wants to message the shop.
'''
class Meta:
verbose_name = 'Message privé'
verbose_name_plural = 'Message privés'
# Attributes
uid = models.AutoField(
primary_key= True, db_index= True)
nom = models.CharField(
max_length = 100)
contenu = models.TextField(
)
remote_addr = models.CharField(
max_length = 400)
timestamp = models.DateTimeField(
default = timezone.now)
# Methods
def __str__(self):
return str(self.uid)
``` |
{
"source": "j-ollivier/sonov-main",
"score": 2
} |
#### File: sonov-main/scripts/test.py
```python
# check the GET method. If GET
# give context
# generate template
# return
# check the GET method. If POST
# check the source site through regular expression matching.
# if youtube is found, source_site is 'youtube'
# if soundcloud is found, source_site is 'soundcloud'
# if vimeo is found, source_site is 'soundcloud'
# if nothing is found
# add a message instance
# return redirect to frontpage
# initiate the form object with POST data
# if the form is not valid
# add a message instance
# return redirect to frontpage
# if the form is valid
# continue
# initiate a new Son
# populate with manually added title, thumbnail,
# populate with automatic source_site, source_id_string
```
#### File: j-ollivier/sonov-main/views.py
```python
import time
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from .models import *
from forum.models import ForumMember
import operator #for sorting objects from different tables in one aggregated list
from .forms import UploadSonForm, SubscribeForm
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from subprocess import call
from django.shortcuts import redirect
import youtube_dl
from os import chdir
import re
from .scripts import GetNextPostTime, GetYoutubeID
#####################################################################
list_of_contributors = [ i.username for i in User.objects.filter( is_staff = True ) ]
#####################################################################
def FrontPage(request):
'''
Landing page for the whole blog. It has a search form which
looks up titles, tags, article texts.
'''
all_tags = Tag.objects.all().order_by('title')
# registered users get to see all sons before they're posted
if request.user.is_authenticated:
last_sons = [i for i in Son.objects.select_related(
).all().order_by(
'created_date').reverse()]
else:
last_sons = [i for i in Son.objects.select_related(
).filter(is_visible=True).order_by(
'created_date').reverse()]
paginator = Paginator(last_sons, 12)
page = request.GET.get('page')
sons_to_display = paginator.get_page(page)
# 1er avril
if time.strftime("%m") == "04" and time.strftime("%d") == "01" :
for son in sons_to_display:
if random.randint( 1,10 ) >= 7:
son.source_id_string = 'eJuhX8LoH1s'
son.short_desc = '1er Avril :^) Recharge la page pour retenter le son'
context={
'all_tags': all_tags,
'sons_to_display': sons_to_display,
'player_enabled' : False,
'colorbox_enabled' : True,
}
template = loader.get_template('main/frontpage.html')
return HttpResponse(template.render(context, request))
#####################################################################
def TagList(request):
'''
List of all tags and categories
'''
all_tags = Tag.objects.all().order_by('category' , 'title')
contributor_category = ForumMember.objects.filter( rank = 9999 )
context={
'contributor_category' : contributor_category,
'categories' : Tag.category_choices,
'tags_to_display': all_tags,
'player_enabled' : False,
'colorbox_enabled' : True,
}
template = loader.get_template('main/taglist.html')
return HttpResponse(template.render(context, request))
#####################################################################
def Playlist(request, tag_title):
'''
Displays all sons from a tag list and allows playing.
If the special tag 'shuffle' is passed, we pick 20 random sons
in the list and play them all.
'''
if tag_title == 'shuffle':
playlist_content = Son.objects.filter(
is_visible = True).order_by(
'?')[:20]
page_title = 'Shuffle 20 !'
elif tag_title == 'timeline':
playlist_content = Son.objects.filter(
is_visible = True).order_by(
'-created_date')
page_title = 'La totale !'
elif tag_title in list_of_contributors:
page_title = 'Sons postés par {}'.format( tag_title )
playlist_content = Son.objects.filter(
posted_by__username = tag_title )
else:
tag = Tag.objects.get(title = tag_title)
playlist_content = Son.objects.filter(
tags = tag, is_visible = True)
page_title = tag.title
context={
'playlist_content': playlist_content,
'page_title': page_title,
'player_enabled' : True,
'colorbox_enabled' : False,
}
template = loader.get_template('main/playlist.html')
return HttpResponse(template.render(context, request))
#####################################################################
def ClipList(request):
'''
Landing page for the whole blog. It has a search form which
looks up titles, tags, article texts.
'''
all_clips = Son.objects.filter(
tags__title = 'Super-Clip', is_visible = True).order_by(
'created_date').reverse()
paginator = Paginator(all_clips, 12)
page = request.GET.get('page')
clips_to_display = paginator.get_page(page)
context={
'clips_to_display': clips_to_display,
'player_enabled' : False,
'colorbox_enabled' : True,
}
template = loader.get_template('main/clip_list.html')
return HttpResponse(template.render(context, request))
#####################################################################
def UploadSon(request):
'''
Tool to upload a son and get the corresponding mp3
'''
if request.user.is_staff:
if request.method == 'GET' and not request.user.username.startswith('BE_'):
context = {
'upload_son_form': UploadSonForm(),
}
template = loader.get_template('main/upload_son.html')
return HttpResponse(template.render(context, request))
elif request.method == "POST":
form = UploadSonForm(request.POST, request.FILES)
if form.is_valid():
new_son = Son()
new_son.title = form.cleaned_data['title']
new_son.source_site = form.cleaned_data['source_site']
new_son.thumbnail = form.cleaned_data['thumbnail']
new_son.source_url = form.cleaned_data['source_url']
new_son.source_id_string = GetYoutubeID(form.cleaned_data['source_url'])
# after the audio is DLd from the source site
# we rename the file as its source_id_string
new_son.audio_file = 'static/main/audio/{}.mp3'.format(
new_son.source_id_string)
new_son.is_visible = False
new_son.created_date = GetNextPostTime()
new_son.short_desc = form.cleaned_data['short_desc']
new_son.posted_by = form.cleaned_data['posted_by']
new_son.save()
if form.cleaned_data['tags']:
for tag in form.cleaned_data['tags']:
new_son.tags.add(tag)
else:
return HttpResponseRedirect('/')
# we put the mp3 in the DL list
with open( '/home/common/sonov_django/dl_list.txt' , 'a' ) as ofi:
ofi.write( new_son.source_url )
return HttpResponseRedirect('/')
else:
context = {
'upload_son_form': UploadSonForm(),
'errors' : [i for i in form.errors],
'player_enabled' : False,
'colorbox_enabled' : False,
}
template = loader.get_template('main/upload_son.html')
return HttpResponse(template.render(context, request))
else:
return HttpResponseRedirect('/')
else:
return HttpResponseRedirect('/')
#####################################################################
def Subscribe( request ):
'''
Page to subscribe to the newsletter.
'''
if request.method == 'GET':
# protoype to limite spam
if Subscriber.objects.all().count() < 50:
subscribe_form = SubscribeForm()
sub_message = "Nous envoyons un petit mail toutes les \
semaines pour vous tenir au courant des sons qui ont été \
postés, ainsi que les nouvelles du site. "
else:
subscribe_form = None
sub_message = "Notre boîte d'envoi de newsletter est \
arrivé à capacité maximale ! Revenez bientôt pour \
vous inscrire :) désolé pour le contretemps."
context = {
'subscribe_form': subscribe_form,
'sub_message' : sub_message,
}
template = loader.get_template('main/subscribe.html')
return HttpResponse(template.render(context, request))
elif request.method == "POST":
form = SubscribeForm( request.POST )
if form.is_valid():
new_subscriber = Subscriber()
new_subscriber.email = form.cleaned_data[ 'email' ]
new_subscriber.save()
template = loader.get_template('main/subscribe_ok.html')
context={}
return HttpResponse(template.render(context, request))
else:
return HttpResponseRedirect('/')
#####################################################################
def SoundcloudIframe(request, soundcloud_id):
'''
Test for embedding soundcloud within colorbox
'''
context={
'soundcloud_id': soundcloud_id,
}
template = loader.get_template('main/soundcloud_iframe.html')
return HttpResponse(template.render(context, request))
#####################################################################
def YoutubeIframe(request, youtube_id):
'''
Test for embedding soundcloud within colorbox
'''
context={
'youtube_id': youtube_id,
}
template = loader.get_template('main/youtube_iframe.html')
return HttpResponse(template.render(context, request))
#####################################################################
def VimeoIframe(request, vimeo_id):
'''
Test for embedding soundcloud within colorbox
'''
context={
'vimeo_id': vimeo_id,
}
template = loader.get_template('main/vimeo_iframe.html')
return HttpResponse(template.render(context, request))
``` |
{
"source": "j-ollivier/vaste-roadmap",
"score": 2
} |
#### File: j-ollivier/vaste-roadmap/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from precise_bbcode.fields import BBCodeTextField
#####################################################################
class Theme(models.Model):
'''
Todo items are listed under a theme. Aka category, directory,
etc.
'''
# Attributes
uid = models.AutoField(
primary_key = True, db_index = True)
name = models.CharField(
max_length = 100)
authorized_user = models.ManyToManyField(
User,
related_name= 'theme_authorized_user')
author = models.ForeignKey(
User,
models.SET_NULL,
null = True,
related_name = 'theme_author')
created_date = models.DateTimeField(
default = timezone.now)
# Methods
def __str__(self):
return str(self.name)
def SubThemeActivity(self):
# fetching all subthemes
subthemes = SubTheme.objects.filter(theme = self)
subtheme_activity = list(EventLog.objects.filter(
entity_type = 'sous-thème',
entity_uid__in = [i.uid for i in subthemes]))
return subtheme_activity
#####################################################################
class SubTheme(models.Model):
'''
Todo subthemes are listed under a theme. Aka category,
directory, etc.
'''
# Attributes
uid = models.AutoField(
primary_key = True, db_index = True)
name = models.CharField(
max_length = 100)
author = models.ForeignKey(
User,
models.SET_NULL,
null = True,
related_name = 'subtheme_author')
theme = models.ForeignKey(
Theme,
on_delete = models.CASCADE,
related_name= 'subtheme_theme')
order = models.PositiveIntegerField(
)
created_date = models.DateTimeField(
default = timezone.now)
# Methods
def __str__(self):
return str(self.name)
def ItemCount(self):
item_count = Item.objects.filter(
subtheme = self).count()
return item_count
def ItemCountActive(self):
item_count_active = Item.objects.filter(
subtheme = self, is_active = True).count()
return item_count_active
def ItemActivity(self):
# fetching all subthemes
items = Item.objects.filter(subtheme = self)
item_activity = EventLog.objects.filter(
entity_type = 'item',
entity_uid__in = [i.uid for i in items])
return item_activity
#####################################################################
class Item(models.Model):
'''
A todo item, possibily having a related Item to nest in.
'''
class Meta:
ordering = ['-created_date']
# Attributes
uid = models.AutoField(
primary_key = True, db_index = True)
name = BBCodeTextField(
)
subtheme = models.ForeignKey(
SubTheme,
on_delete = models.CASCADE,
related_name= 'item_subtheme')
is_active = models.BooleanField(
default = True)
is_important = models.BooleanField(
default = False)
created_date = models.DateTimeField(
default = timezone.now)
completed_date = models.DateTimeField(
null = True)
attributed_to = models.ForeignKey(
User,
null = True,
blank = True,
on_delete = models.SET_NULL,
related_name = 'item_user')
# Methods
def __str__(self):
return str(self.name)
def ItemCommentActivity(self):
# fetching all subthemes
comments = ItemComment.objects.filter(item = self)
item_comment_activity = EventLog.objects.filter(
entity_type = 'item',
entity_uid__in = [i.uid for i in comments])
return item_comment_activity
#####################################################################
class ItemComment(models.Model):
'''
Comments and logging utilities for each item
'''
class Meta:
ordering = ['created_date']
# Attributes
uid = models.AutoField(
primary_key = True, db_index = True)
name = models.CharField(
max_length = 200
)
item = models.ForeignKey(
Item,
on_delete = models.CASCADE,
related_name= 'item_comment_item')
author = models.ForeignKey(
User,
on_delete = models.CASCADE,
related_name = 'item_comment_author')
created_date = models.DateTimeField(
default = timezone.now)
# Methods
def __str__(self):
return str(self.name)
#####################################################################
class EventLog(models.Model):
'''
To keep track of every event, a log entry is available each
time an event happens in
'''
class Meta:
ordering = ['-created_date']
# Attributes
uid = models.AutoField(
primary_key = True, db_index = True)
author = models.ForeignKey(
User,
on_delete = models.CASCADE,
related_name = 'log_author')
theme = models.ForeignKey(
Theme,
on_delete = models.CASCADE,
related_name = 'log_theme')
entity_uid = models.PositiveIntegerField(
)
entity_type = models.CharField(
max_length = 100)
value = models.TextField(
null = True)
action = models.CharField(
max_length = 200)
created_date = models.DateTimeField(
default = timezone.now)
# Methods
def __str__(self):
return str(self.uid)
```
#### File: j-ollivier/vaste-roadmap/views.py
```python
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.template import loader
from .models import *
from .forms import *
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from django.contrib import messages
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
@login_required
#####################################################################
def Home(request):
'''
If user is authenticated, he can access the page. If he has
accessible themes, he can see the one where his access is
granted.
'''
themes = Theme.objects.filter(
authorized_user = request.user).order_by(
'name')
context = {
'page_title': 'Themes',
'themes': themes,
'page_title' : 'Feuilles de route',
}
template = loader.get_template('roadmap/home.html')
return HttpResponse(template.render(context, request))
@login_required
#####################################################################
def ThemeView(request, theme_uid):
'''
Display the content of the folder linked to the Galery object
'''
theme = Theme.objects.get(pk = theme_uid)
if request.user in theme.authorized_user.all():
context = {
'theme': theme,
'new_sub_theme_form' : NewSubThemeForm(),
'subthemes': SubTheme.objects.filter(
theme = theme).order_by('order').select_related(),
'page_title' : theme.name,
}
template = loader.get_template('roadmap/theme_view.html')
return HttpResponse(template.render(context, request))
else:
return HttpResponseRedirect(
'/nope')
@login_required
#####################################################################
def AddItem(request, subtheme_uid):
'''
Display the content of the folder linked to the Galery object
'''
subtheme = SubTheme.objects.get(pk = subtheme_uid)
if request.method == "POST" and request.user in subtheme.theme.authorized_user.all():
form = NewItemForm(request.POST)
if form.is_valid():
new_item = Item()
new_item.name = form.cleaned_data['name']
new_item.subtheme = subtheme
new_item.is_active = True
new_item.created_date = timezone.now()
theme = subtheme.theme
new_item.save()
# log it
log = EventLog()
log.author = request.user
log.entity_type = 'item'
log.value = str(new_item.name)[0:40]
log.entity_uid = Item.objects.all().order_by('created_date').last().uid
log.action = 'Création'
log.theme = theme
log.save()
return HttpResponseRedirect(
'/roadmap/view/{}'.format(subtheme.theme.uid))
else:
return HttpResponseRedirect(
'/nope')
else:
context = {
'subtheme' : subtheme,
'new_item_form' : NewItemForm(),
'page_title' : 'Nouvel item',
}
template = loader.get_template('roadmap/add_item.html')
return HttpResponse(template.render(context, request))
@login_required
#####################################################################
def AddItemComment(request, item_uid):
'''
Display the content of the folder linked to the Galery object
'''
item = Item.objects.get(pk = item_uid)
subtheme = item.subtheme
if request.method == "POST" and request.user in subtheme.theme.authorized_user.all():
form = NewItemCommentForm(request.POST)
if form.is_valid():
new_item = ItemComment()
new_item.name = form.cleaned_data['name']
new_item.item = item
new_item.author = User.objects.get(pk = request.user.id)
new_item.timestamp = timezone.now()
new_item.save()
# log it
log = EventLog()
log.author = request.user
log.entity_type = 'commentaire'
log.entity_uid = ItemComment.objects.all().order_by('created_date').last().uid
log.value = str(new_item.name)[0:40]
log.action = 'Création'
log.theme = subtheme.theme
log.save()
return HttpResponseRedirect(
'/roadmap/view/{}'.format(item.subtheme.theme.uid))
else:
return HttpResponseRedirect(
'/nope')
else:
context = {
'item': item,
'new_item_comment_form' : NewItemCommentForm(),
'page_title' : 'Nouveau commentaire',
}
template = loader.get_template('roadmap/add_item_comment.html')
return HttpResponse(template.render(context, request))
@login_required
#####################################################################
def AddSubTheme(request, theme_uid):
'''
Display the content of the folder linked to the Galery object
'''
theme = Theme.objects.get(pk = theme_uid)
if request.method == "POST" and request.user in theme.authorized_user.all():
form = NewSubThemeForm(request.POST)
if form.is_valid():
new_subtheme = SubTheme()
new_subtheme.name = form.cleaned_data['name']
new_subtheme.order = 0
new_subtheme.author = User.objects.get(pk = request.user.id)
new_subtheme.theme = theme
new_subtheme.timestamp = timezone.now()
new_subtheme.theme = theme
new_subtheme.save()
# Give it the right order
for subtheme in SubTheme.objects.filter(theme = theme):
subtheme.order += 1
subtheme.save()
# log it
log = EventLog()
log.author = request.user
log.entity_type = 'sous-thème'
log.entity_uid = SubTheme.objects.all().order_by('created_date').last().uid
log.action = 'Création'
log.theme = theme
log.value = str(new_subtheme.name)[0:40]
log.save()
return HttpResponseRedirect(
'/roadmap/view/{}'.format(theme.uid))
else:
return HttpResponseRedirect(
'/nope')
else:
return HttpResponseRedirect('/roadmap/view/{}'.format(theme.uid))
@login_required
#####################################################################
def ItemStatusSwitch(request, item_uid, item_action):
'''
An todo item is_active status can be switched with this view.
'''
item = Item.objects.get(pk=item_uid)
subtheme = item.subtheme
theme = subtheme.theme
if item_action == 'active_switch' and request.user in theme.authorized_user.all():
if item.is_active == True:
item.is_active = False
item.is_important = False
item.completed_date = timezone.now()
item.save()
# log it
log = EventLog()
log.author = request.user
log.entity_type = 'item'
log.entity_uid = Item.objects.all().order_by('created_date').last().uid
log.action = 'Complétion'
log.theme = theme
log.save()
else:
item.is_active = True
item.completed_date = None
item.save()
# log it
log = EventLog()
log.author = request.user
log.entity_type = 'item'
log.entity_uid = Item.objects.all().order_by('created_date').last().uid
log.action = 'Réactivation'
log.theme = theme
log.save()
elif item_action == 'importance_switch' and request.user in theme.authorized_user.all():
if item.is_important == True:
item.is_important = False
item.save()
# log it
log = EventLog()
log.author = request.user
log.entity_type = 'item'
log.entity_uid = Item.objects.all().order_by('created_date').last().uid
log.action = 'Priorité abaissée'
log.theme = theme
log.save()
else:
item.is_important = True
item.save()
# log it
log = EventLog()
log.author = request.user
log.entity_type = 'item'
log.entity_uid = Item.objects.all().order_by('created_date').last().uid
log.action = 'Priorité élevée'
log.theme = theme
log.save()
else:
return HttpResponseRedirect(
'/nope')
return HttpResponseRedirect('/roadmap/view/{}'.format(theme.uid))
@login_required
#####################################################################
def SubThemeOrderChange(request, subtheme_uid, subtheme_action):
'''
Users are allowed to change the order of subthemes.
This view handles the ordrer change and the order change
of the other subthemes to adapt to the new order value of
the changed subtheme.
'''
subtheme = SubTheme.objects.get(pk = subtheme_uid)
if subtheme_action == 'to_up':
order_modificator = -1
elif subtheme_action == 'to_down':
order_modificator = 1
else:
return HttpResponseRedirect('/nope')
if request.user in subtheme.theme.authorized_user.all():
# check if order is already at minimum value
if subtheme.order <= 1:
return HttpResponseRedirect(
'/roadmap/view/{}'.format(subtheme.theme.uid))
else:
pass
# Do the modification
try:
subtheme.order += order_modificator
subtheme_to_swap = SubTheme.objects.get(
theme = subtheme.theme, order = subtheme.order)
subtheme.save()
except ObjectDoesNotExist:
return HttpResponseRedirect(
'/roadmap/view/{}'.format(subtheme.theme.uid))
# rearrange the item
subtheme_to_swap.order += -order_modificator
subtheme_to_swap.save()
return HttpResponseRedirect(
'/roadmap/view/{}'.format(subtheme.theme.uid))
else:
return HttpResponseRedirect('/nope')
``` |
{
"source": "Jollokim/Snake",
"score": 3
} |
#### File: Jollokim/Snake/main.py
```python
import pygame
import snake
import apple
pygame.init()
pygame.font.init()
canvasLength = 480
canvasHeight = 480
win = pygame.display.set_mode((canvasLength, canvasHeight))
pygame.display.set_caption("Slange")
x = 50
y = 50
width = 20
height = 20
snake = snake.Snake(win)
apple1 = apple.Apple(win, snake)
apple2 = apple.Apple(win, snake)
def drawGrid():
for k in range(2):
for i in range(0, canvasLength, 20):
if k == 0:
pygame.draw.line(win, (50, 50, 50), (i, 0), (i, canvasLength), 1)
else:
pygame.draw.line(win, (50, 50, 50), (0, i), (canvasHeight, i), 1)
def drawGameOver():
myfont = pygame.font.SysFont('Sans', 75)
myfont2 = pygame.font.SysFont('Sans', 30)
win.blit(myfont.render('Game Over!', False, (255, 255, 255)), (100, 100))
win.blit(myfont2.render('Press \'SPACE\' to try again', False, (255, 255, 255)), (120, 170))
win.blit(myfont2.render('Apples eaten: ' + str(snake.appleEaten), False, (255, 255, 255)), (150, 200))
def drawGameWin():
myfont = pygame.font.SysFont('Sans', 50)
win.blit(myfont.render('You have a big snake!', False, (255, 255, 255)), (65, 100))
win.blit(myfont.render('Thanks for playing!', False, (255, 255, 255)), (65, 150))
myfont2 = pygame.font.SysFont('Sans', 30)
win.blit(myfont2.render('Apples eaten: ' + str(snake.appleEaten), False, (255, 255, 255)), (150, 200))
run = True
while run:
pygame.time.delay(50)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
snake.setDirection(pygame.key.get_pressed())
win.fill((0, 0, 0))
drawGrid()
apple1.render()
apple2.render()
snake.render()
if not snake.snakeAlive:
drawGameOver()
if snake.victory:
drawGameWin()
pygame.display.update()
```
#### File: Jollokim/Snake/snake.py
```python
import pygame
class Snake:
dim = 20
vel = 5
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
STARTX = 240
STARTY = 240
def __init__(self, win):
self.snakeX = []
self.snakeY = []
self.snakeDirection = []
for i in range(5):
self.snakeX.append(Snake.STARTX)
self.snakeY.append(Snake.STARTY + (25 * i))
self.snakeDirection.append(Snake.UP)
self.currentLength = len(self.snakeX)
self.snakeCorner = []
self.snakeMove = True
self.snakeAlive = True
self.re_to_cha_Dir = True
self.win = win
self.lastWait = 6
self.lastDirection = None
self.waitTails = 0
self.growthRate = 2
self.victory = False
self.appleEaten = 0
self.cheatCode = [0, 0, 0]
def _forward(self):
for i in range(len(self.snakeX)):
for ci in range(len(self.snakeCorner)):
# when snake part passes corner
if self.snakeY[i] == self.snakeCorner[ci][1] and self.snakeX[i] == self.snakeCorner[ci][0]:
self.snakeDirection[i] = self.snakeCorner[ci][2]
if i == 0:
self.re_to_cha_Dir = True
if i == len(self.snakeX) - 1:
del self.snakeCorner[0]
break
if self.snakeDirection[i] == Snake.UP:
self.snakeY[i] -= Snake.vel
elif self.snakeDirection[i] == Snake.DOWN:
self.snakeY[i] += Snake.vel
elif self.snakeDirection[i] == Snake.LEFT:
self.snakeX[i] -= Snake.vel
elif self.snakeDirection[i] == Snake.RIGHT:
self.snakeX[i] += Snake.vel
# runs twice when multiple extra snake blocks are added
elif self.lastWait < 4:
self.lastWait += 1
break
elif self.lastWait == 4:
self.snakeDirection[self.currentLength] = self.lastDirection
self.currentLength += 1
self.lastWait += 1
self.waitTails -= 1
if self.waitTails > 0:
self.lastWait = 0
break
def _calcNextCorner(self, pos, rounding):
if rounding % 2 == 0:
diff = pos % 20
nextCornerPos = pos - diff
return nextCornerPos
else:
modulodiff = pos % 20
diff = 20 - modulodiff
nextCornerPos = pos + diff
return nextCornerPos
def setDirection(self, keys):
if keys[pygame.K_UP] & self.re_to_cha_Dir:
if self.snakeDirection[0] != Snake.UP and self.snakeDirection[0] != Snake.DOWN:
self.re_to_cha_Dir = False
self.snakeCorner.append(
[self._calcNextCorner(self.snakeX[0], self.snakeDirection[0]), self.snakeY[0], Snake.UP])
elif keys[pygame.K_DOWN] & self.re_to_cha_Dir:
if self.snakeDirection[0] != Snake.UP and self.snakeDirection[0] != Snake.DOWN:
self.re_to_cha_Dir = False
self.snakeCorner.append(
[self._calcNextCorner(self.snakeX[0], self.snakeDirection[0]), self.snakeY[0], Snake.DOWN])
elif keys[pygame.K_RIGHT] & self.re_to_cha_Dir:
if self.snakeDirection[0] != Snake.RIGHT and self.snakeDirection[0] != Snake.LEFT:
self.re_to_cha_Dir = False
self.snakeCorner.append(
[self.snakeX[0], self._calcNextCorner(self.snakeY[0], self.snakeDirection[0]), Snake.RIGHT])
elif keys[pygame.K_LEFT] & self.re_to_cha_Dir:
if self.snakeDirection[0] != Snake.RIGHT and self.snakeDirection[0] != Snake.LEFT:
self.re_to_cha_Dir = False
self.snakeCorner.append(
[self.snakeX[0], self._calcNextCorner(self.snakeY[0], self.snakeDirection[0]), Snake.LEFT])
elif keys[pygame.K_SPACE] and (not self.snakeAlive or self.victory):
self.__init__(self.win)
elif keys[pygame.K_w]:
self.cheatCode[0] = "W"
elif keys[pygame.K_i]:
self.cheatCode[1] = "I"
elif keys[pygame.K_n]:
self.cheatCode[2] = "N"
cheat = ""
for ch in self.cheatCode:
cheat += str(ch)
if cheat == "WIN":
self.victory = True
self.appleEaten = 99999999
def _checkDead(self):
if self.snakeX[0] < 0 or self.snakeX[0] > 480 - Snake.dim or self.snakeY[0] < 0 or self.snakeY[
0] > 480 - Snake.dim:
self.snakeAlive = False
return
headx = self.snakeX[0]
heady = self.snakeY[0]
headDir = self.snakeDirection[0]
for i in range(1, len(self.snakeX)):
if (headx + Snake.dim > self.snakeX[i] and headx + Snake.dim < self.snakeX[
i] + Snake.dim) and headDir == Snake.RIGHT and (
heady > self.snakeY[i] - Snake.dim and heady < self.snakeY[i] + Snake.dim):
self.snakeAlive = False
return
elif (headx < self.snakeX[i] + Snake.dim and headx > self.snakeX[i]) and headDir == Snake.LEFT and (
heady > self.snakeY[i] - Snake.dim and heady < self.snakeY[i] + Snake.dim):
self.snakeAlive = False
return
elif (heady + Snake.dim > self.snakeY[i] and heady + Snake.dim < self.snakeY[
i] + Snake.dim) and headDir == Snake.DOWN and (
headx > self.snakeX[i] - Snake.dim and headx < self.snakeX[i] + Snake.dim):
self.snakeAlive = False
return
elif (heady < self.snakeY[i] + Snake.dim and heady > self.snakeY[i]) and headDir == Snake.UP and (
headx > self.snakeX[i] - Snake.dim and headx < self.snakeX[i] + Snake.dim):
self.snakeAlive = False
return
def snakeGrow(self):
print("snake grows!")
lastSnakeX = self.snakeX[-1]
lastSnakeY = self.snakeY[-1]
if self.waitTails == 0:
self.lastWait = 0
self.lastDirection = self.snakeDirection[-1]
for i in range(self.growthRate):
self.snakeX.append(lastSnakeX)
self.snakeY.append(lastSnakeY)
self.snakeDirection.append(None)
self.waitTails += self.growthRate
def render(self):
if self.snakeMove & self.snakeAlive & (not self.victory):
self._forward()
self._checkDead()
for i in range(len(self.snakeX)):
if i == 0:
pygame.draw.rect(self.win, (0, 100, 0), (self.snakeX[i], self.snakeY[i], Snake.dim, Snake.dim))
else:
pygame.draw.rect(self.win, (0, 255, 0), (self.snakeX[i], self.snakeY[i], Snake.dim, Snake.dim))
cheat = ""
for ch in self.cheatCode:
cheat += str(ch)
if cheat == "WIN":
self.victory = True
def __str__(self):
return "ssssssssss"
``` |
{
"source": "Jollokim/SudokuSolver",
"score": 2
} |
#### File: Jollokim/SudokuSolver/main.py
```python
from kivy.app import App
from kivy.config import Config
from controller.controller import Controller
from view.gui import Gui
class MyApp(App):
title = "Sudoku Solver"
icon = "sudoku.png"
def build(self):
Config.set('graphics', 'width', '700')
Config.set('graphics', 'height', '700')
gui = Gui()
global controller
controller = Controller(gui)
return gui
if __name__ == '__main__':
MyApp().run()
```
#### File: SudokuSolver/model/spot.py
```python
class Spot:
def __init__(self):
self.num = ""
self.possible_nums = []
for num in range(1,10):
self.possible_nums.append(num)
self.possible_counter = 0
self.unchangeable = False
self.legal_number = True
def pluss_one(self):
try:
number = int(self.num)
except:
self.num = 1
self.unchangeable = True
return
self.unchangeable = True
number += 1
if number > 9:
number = ""
self.num = number
if self.num == "":
self.unchangeable = False
def __str__(self):
if self.num == "":
return "empty"
return str(self.num)
``` |
{
"source": "Jolly23/wx_pay_python",
"score": 2
} |
#### File: Jolly23/wx_pay_python/example.py
```python
from hashlib import sha1
from time import time
from flask import jsonify
from wx_pay import WxPay, WxPayError
def wx_js_config():
"""
生成前端 调用微信js的配置参数
"""
config_args = {
'noncestr': WxPay.nonce_str(),
'jsapi_ticket': 'xxxxxx',
# jsapi_ticket 一个类似ACCESS_TOKEN的参数,
# 详见 https://mp.weixin.qq.com/wiki?action=doc&id=mp1421141115&t=0.6103989146089088#jssdkshiyongbuzhou
'timestamp': int(time()),
'url': 'http://www.example.com/pay/goods=3' # 使用js_api的网页网址
}
raw = [(k, str(config_args[k]) if isinstance(config_args[k], (int, float)) else config_args[k])
for k in sorted(config_args.keys())]
s = "&".join("=".join(kv) for kv in raw if kv[1])
return {
'signature': sha1(s).hexdigest(),
'timestamp': config_args['timestamp'],
'nonce_str': config_args['noncestr']
}
def create_pay_example():
"""
生成微信JS接口下单所需要的参数 example
"""
wx_pay = WxPay(
wx_app_id='WX_APP_ID', # 微信平台appid
wx_mch_id='WX_MCH_ID', # 微信支付商户号
wx_mch_key='WX_MCH_KEY',
# wx_mch_key 微信支付重要密钥,请登录微信支付商户平台,在 账户中心-API安全-设置API密钥设置
wx_notify_url='http://www.example.com/pay/weixin/notify'
# wx_notify_url 接受微信付款消息通知地址(通常比自己把支付成功信号写在js里要安全得多,推荐使用这个来接收微信支付成功通知)
# wx_notify_url 开发详见https://pay.weixin.qq.com/wiki/doc/api/jsapi.php?chapter=9_7
)
try:
pay_data = wx_pay.js_pay_api(
openid=u'***user_openid***', # 付款用户openid
body=u'***商品名称/付款显示名称***', # 例如:饭卡充值100元
total_fee=100 # total_fee 单位是 分, 100 = 1元
# spbill_create_ip='172.16.31.10' # 若不使用flask框架,则需要传入调用微信支付的用户ip地址
)
print pay_data
# 订单生成后将请将返回的json数据 传入前端页面微信支付js的参数部分
return jsonify(pay_data)
except WxPayError, e:
return e.message, 400
def order_query_example():
"""
查询订单 example
"""
wx_pay = WxPay(
wx_app_id='WX_APP_ID',
wx_mch_id='WX_MCH_ID',
wx_mch_key='WX_MCH_KEY',
wx_notify_url='http://www.example.com/pay/weixin/notify'
)
data = wx_pay.order_query(
# 下面两个参数二选一
out_trade_no=u'***商户订单号***',
# transaction_id=u'***微信订单号***'
)
def close_order_example():
"""
关闭订单 example
"""
wx_pay = WxPay(
wx_app_id='WX_APP_ID',
wx_mch_id='WX_MCH_ID',
wx_mch_key='WX_MCH_KEY',
wx_notify_url='http://www.example.com/pay/weixin/notify'
)
data = wx_pay.close_order(
out_trade_no=u'***商户订单号***'
)
def refund_example():
"""
申请退款 example
"""
wx_pay = WxPay(
wx_app_id='WX_APP_ID',
wx_mch_id='WX_MCH_ID',
wx_mch_key='WX_MCH_KEY',
wx_notify_url='WX_NOTIFY_URL'
)
data = wx_pay.refund(
# 证书获取方法请阅读:https://pay.weixin.qq.com/wiki/doc/api/tools/cash_coupon.php?chapter=4_3
# api_client_cert_path: 微信支付商户证书(apiclient_cert.pem)的本地保存路径
api_cert_path='/home/xxx/SERVER/ext_file/apiclient_cert.pem',
# api_client_cert_path: 微信支付商户证书(apiclient_key.pem)的本地保存路径
api_key_path='/home/xxx/SERVER/ext_file/apiclient_key.pem',
out_trade_no=u'***商户订单号***',
# out_refund_no=u'***商户退款单号***', 商户退款单号可自动生成,按需使用
total_fee=500, # 支付时下单总金额 单位分
refund_fee=500, # 要退款的金额 单位分
)
def refund_query_example():
"""
退款查询 example
"""
wx_pay = WxPay(
wx_app_id='WX_APP_ID',
wx_mch_id='WX_MCH_ID',
wx_mch_key='WX_MCH_KEY',
wx_notify_url='http://www.example.com/pay/weixin/notify'
)
data = wx_pay.refund_query(
# 以下传入参数四选一即可
out_refund_no=u'***商户退款单号***',
# out_trade_no=u'***商户订单号***',
# transaction_id=u'***微信订单号***',
# refund_id=u'***微信退款单号***',
)
def download_bill_example():
"""
下载对账单 example
"""
wx_pay = WxPay(
wx_app_id='WX_APP_ID',
wx_mch_id='WX_MCH_ID',
wx_mch_key='WX_MCH_KEY',
wx_notify_url='http://www.example.com/pay/weixin/notify'
)
print wx_pay.download_bill(
bill_date='20161228', # 对账单日期
bill_type='ALL' # 账单类型(ALL-当日所有订单信息,[默认]SUCCESS-当日成功支付的订单, REFUND-当日退款订单)
)
def send_red_pack_to_user_example():
"""
向个人用户发红包example
"""
wx_pay = WxPay(
wx_app_id='WX_APP_ID',
wx_mch_id='WX_MCH_ID',
wx_mch_key='WX_MCH_KEY',
wx_notify_url='WX_NOTIFY_URL'
)
wx_pay.send_red_pack(
# 证书获取方法请阅读:https://pay.weixin.qq.com/wiki/doc/api/tools/cash_coupon.php?chapter=4_3
# api_cert_path: 微信支付商户证书(apiclient_cert.pem)的本地保存路径
api_cert_path='/home/xxx/SERVER/ext_file/apiclient_cert.pem',
# api_cert_path: 微信支付商户证书(apiclient_key.pem)的本地保存路径
api_key_path='/home/xxx/SERVER/ext_file/apiclient_key.pem',
send_name=u'微信支付测试', # 红包名称
re_openid=u'***to_user_openid***', # 要接收红包的用户openid
total_amount=100, # total_fee 单位是 分, 100 = 1元, 最大499元
wishing=u'感谢参与测试', # 祝福语
client_ip=u'172.16.17.32', # 调用微信发红包接口服务器公网IP地址
act_name=u'微信支付测试系统', # 活动名称
remark=u'感谢参与' # 备注
)
def enterprise_payment_to_wallet():
"""
直接转账到客户微信钱包
"""
wx_pay = WxPay(
wx_app_id='WX_APP_ID',
wx_mch_id='WX_MCH_ID',
wx_mch_key='WX_MCH_KEY',
wx_notify_url='WX_NOTIFY_URL'
)
wx_pay.enterprise_payment(
# 证书获取方法请阅读:https://pay.weixin.qq.com/wiki/doc/api/tools/cash_coupon.php?chapter=4_3
# api_cert_path: 微信支付商户证书(apiclient_cert.pem)的本地保存路径
api_cert_path='/home/xxx/SERVER/ext_file/apiclient_cert.pem',
# api_cert_path: 微信支付商户证书(apiclient_key.pem)的本地保存路径
api_key_path='/home/xxx/SERVER/ext_file/apiclient_key.pem',
openid=u'***to_user_openid***', # 要接收转账的用户openid
check_name=True, # 是否强制校验收款用户姓名
# 如果check_name为True,下面re_user_name必须传入
# 如果check_name为False,请删除下一行参数re_user_name
re_user_name=u'***客户的真实姓名***', # 校验不成功付款会是失败
amount=100, # total_fee 单位是 分, 100 = 1元, 单用户 单笔上限/当日上限:2W/2W
desc=u'充值失败退款', # 付款原因
spbill_create_ip='172.16.17.32', # 调用微信企业付款接口服务器公网IP地址
)
def swiping_card_example():
"""
刷卡支付 example
"""
wx_pay = WxPay(
wx_app_id='WX_APP_ID',
wx_mch_id='WX_MCH_ID',
wx_mch_key='WX_MCH_KEY',
wx_notify_url='http://www.example.com/pay/weixin/notify'
)
wx_pay.swiping_card_payment(
body=u'***商品名称/付款显示名称***', # 例如:综合超市
total_fee=100, # total_fee 消费金额 单位是 分
auth_code='131336161431593669', # 获取到的客户微信付款码
spbill_create_ip='172.16.17.32', # 调用微信企业付款接口服务器公网IP地址
)
if __name__ == "__main__":
pass
``` |
{
"source": "JollyBanny/sample-django",
"score": 3
} |
#### File: twitter/api/permissions.py
```python
from rest_framework.permissions import BasePermission, SAFE_METHODS
class IsTweetAuthReadOnly(BasePermission):
def has_object_permission(self, request, view, tweet):
if request.method in SAFE_METHODS:
return True
if request.user and \
request.user.is_authenticated and \
tweet.author == request.user:
return True
return False
``` |
{
"source": "jollychang/atlassian-python-api",
"score": 3
} |
#### File: atlassian-python-api/examples/bitbucket-project.py
```python
from atlassian import Bitbucket
def html(project):
html = """<tr>
<td>{project_key}</td>
<td>{project_name}</td>
<td><ul>""".format(**project)
for user in project['project_administrators']:
html += '\n\t<li><a href="mailto:{email}">{name}</a></li>'.format(**user)
return html + '</ul></td></tr>\n'
bitbucket = Bitbucket(
url='http://localhost:7990',
username='admin',
password='<PASSWORD>')
data = bitbucket.project('DEMO')
print(html(data))
``` |
{
"source": "jollycoin/jollycoin",
"score": 3
} |
#### File: jollycoin/jollycoin/merkle.py
```python
import hashlib
import binascii
class Merkle(object):
def __init__(self, hash_type="sha256"):
hash_type = hash_type.lower()
self.hash_function = getattr(hashlib, hash_type)
self.reset_tree()
def reset_tree(self):
self.leaves = list()
self.levels = None
self.is_ready = False
def add_leaf(self, values, do_hash=False):
# check if single leaf
if not isinstance(values, tuple) and not isinstance(values, list):
values = [values]
for v in values:
if do_hash:
v = v.encode('utf-8')
v = self.hash_function(v).hexdigest()
v = bytearray.fromhex(v)
self.leaves.append(v)
def _calculate_next_level(self):
solo_leave = None
# number of leaves on the level
N = len(self.levels[0])
# if odd number of leaves on the level
if N % 2 == 1:
solo_leave = self.levels[0][-1]
N -= 1
new_level = []
for l, r in zip(self.levels[0][0:N:2], self.levels[0][1:N:2]):
new_level.append(self.hash_function(l+r).digest())
if solo_leave is not None:
new_level.append(solo_leave)
# prepend new level
self.levels = [new_level, ] + self.levels
def make_tree(self):
if self.leaves:
self.levels = [self.leaves]
while len(self.levels[0]) > 1:
self._calculate_next_level()
def get_merkle_root(self):
if self.levels is not None:
return self.levels[0][0].hex()
return None
if __name__ == '__main__':
m = Merkle()
m.add_leaf('123', True)
m.add_leaf('234', True)
m.add_leaf('345')
m.make_tree()
r = m.get_merkle_root()
print(f'r: {r!r}')
``` |
{
"source": "Jollyfant/EPOS-TURTLE",
"score": 2
} |
#### File: EPOS-TURTLE/classes/Annotation.py
```python
from EPOS import Node
class Annotation(Node):
def __init__(self, *args):
self.type = self.oa.Annotation
Node.__init__(self, args)
```
#### File: EPOS-TURTLE/classes/Catalog.py
```python
from EPOS import Node
class Catalog(Node):
def __init__(self, *args):
self.type = self.dcat.Catalog
Node.__init__(self, args)
```
#### File: EPOS-TURTLE/classes/Frequency.py
```python
from EPOS import Node
class Frequency(Node):
def __init__(self, *args):
self.type = self.dct.Frequency
Node.__init__(self, args)
```
#### File: EPOS-TURTLE/classes/HydraIriTemplateMapping.py
```python
from EPOS import Node
class HydraIriTemplateMapping(Node):
def __init__(self, *args):
self.type = self.hydra.IriTemplateMapping
Node.__init__(self, args)
```
#### File: EPOS-TURTLE/classes/LinguisticSystem.py
```python
from EPOS import Node
class LinguisticSystem(Node):
def __init__(self, *args):
self.type = self.dct.LinguisticSystem
Node.__init__(self, args)
```
#### File: EPOS-TURTLE/classes/MediaTypeOrExtent.py
```python
from EPOS import Node
class MediaTypeOrExtent(Node):
def __init__(self, *args):
self.type = self.dct.MediaTypeOrExtent
Node.__init__(self, args)
```
#### File: EPOS-TURTLE/classes/ProvenanceStatement.py
```python
from EPOS import Node
class ProvenanceStatement(Node):
def __init__(self, *args):
self.type = self.dct.ProvenanceStatement
Node.__init__(self, args)
``` |
{
"source": "Jollyfant/psd-module",
"score": 2
} |
#### File: Jollyfant/psd-module/psd.py
```python
import numpy as np
from obspy import read, UTCDateTime, Stream
from zlib import adler32
import ctypes
from calc import compressSpectrum, smoothSpectrum, getInstrumentResponse, psdWelch
from constants import (DB_REFERENCE, MINIMUM_PERIOD, NUMBER_OF_FREQUENCIES,
OCTAVE_PERIOD_STEP, SEGMENT_LENGTH)
from sdsfile import SDSFile
class PSDCollector():
"""
Calculates spectra for an ObsPy trace
Settings are internal in calculateSpectrum
"""
def __init__(self, connect_sql=True):
"""Initialize a PSDCollector.
Parameters
----------
connect_sql : `bool`
Whether or not to connect to an SQL database (default `True`).
"""
self.frequencies = self.setupFrequencies()
if connect_sql:
import pymysql.cursors
self.connection = pymysql.connect(
user="root",
password="password",
host="localhost",
db="test"
)
def setupFrequencies(self):
"""
def setupFrequencies
Sets up the frequencies to calculate the PSDs for
"""
# This is going to give us nice frequencies at 1, 2, 4Hz
steps = np.arange(NUMBER_OF_FREQUENCIES) * OCTAVE_PERIOD_STEP
numbers = MINIMUM_PERIOD * 2 ** steps
return np.reciprocal(numbers)
def calculateSpectrum(self, trace, frequencies,
frequency_responses=None, corresponding_frequencies=None):
"""
def calculateSpectrum
Calculates power spectrum of a trace using Welch's method
Can be either psdWelch, psdWelchScipy, psdWelchMlab
"""
fs = trace.stats.sampling_rate
# Number of points in the Welch FFT (13 segments per hour with 75% overlap)
# ObsPy goes to previous power of 2 (small difference)
nfft = int(SEGMENT_LENGTH * fs / 4.0)
# Load the instrument response from cacher if necessary
resp = frequency_responses
freqs = corresponding_frequencies
if resp is None or freqs is None:
resp, freqs = getInstrumentResponse(trace.stats)
# Calculate the spectra: import right function from calc.py
Pxx = psdWelch(
trace.data,
fs,
resp,
nfft=nfft,
overlap=0.75,
reference=DB_REFERENCE
)
# Smooth the spectrum over a full octave
return smoothSpectrum(freqs, Pxx, frequencies)
def readData(self, SDSFile):
"""
def readData
Reads mSEED data from disk from multiple files and creates a single stream
"""
# Create an empty stream to fill
ObspyStream = Stream()
# Read neighbouring files
for neighbour in SDSFile.neighbours:
# Read from 0h of this day, most likely in the previous day file,
# until half an hour in the next day [psd segment end]
st = read(
neighbour.filepath,
starttime=UTCDateTime(SDSFile.start),
endtime=UTCDateTime(SDSFile.end) + 0.5 * SEGMENT_LENGTH,
nearest_sample=False
)
# Concatenate all the traces
for tr in st:
if tr.stats.npts != 0:
ObspyStream.extend([tr])
# No data found
if not ObspyStream:
raise ValueError("No data for processing.")
# Simple clean up to remove overlaps and merge traces
# This does not fill any gaps
ObspyStream.merge(-1)
return ObspyStream
def storeObjects(self, psdObjects):
"""
def storeObject
Stores the prepared object to the database
# Make table
CREATE TABLE PSD (
network CHAR(2) NOT NULL,
station CHAR(5) NOT NULL,
location CHAR(2) NOT NULL,
channel CHAR(3) NOT NULL,
quality CHAR(1) NOT NULL,
start DATETIME NOT NULL,
shift SMALLINT(2) SIGNED NOT NULL,
offset TINYINT(1) SIGNED NOT NULL,
spectrum VARBINARY(255) NOT NULL
) ENGINE = InnoDB;
# Add index
CREATE INDEX idx_network ON PSD (network, station, location, channel, start);
"""
try:
with self.connection.cursor() as cursor:
# Currently store Infrasound / Seismic in same table called PSD
# TODO Need to store something else? E.g. data hash? Can be in another table pointing to daily 48 segments
variables = "network, station, location, channel, quality, start, shift, offset, spectrum"
values = "%s, %s, %s, %s, %s, %s, %s, %s, %s"
sql = "INSERT INTO PSD (%s) VALUES (%s)" % (variables, values)
cursor.executemany(sql, psdObjects)
# TODO check if object already exists
self.connection.commit()
finally:
self.connection.close()
def getTrace(self, data, segmentStart):
"""
def getTrace
Extracts a trace from the data that fits the PSD segment
"""
# TODO TEST TEST TEST
# Cut trace to an hour long segment
stream = data.slice(
segmentStart,
segmentStart + SEGMENT_LENGTH - 1E-6,
nearest_sample=False
)
# Only a single trace may remain
if len(stream) != 1:
stream._cleanup(misalignment_threshold=0.5)
# If _cleanup() doesn't fix it, throw an error
if len(stream) != 1:
raise ValueError("Number of traces is not one.")
# Get the first trace
trace = stream[0]
# Confirm the number of points is as expected
# Otherwise there must be a gap and thus we decide to skip the segment
if trace.stats.npts != int(trace.stats.sampling_rate * SEGMENT_LENGTH):
raise ValueError(
"Number of samples in segment length is not expected.")
return trace
def getResponseFromFDSN(self, sds_file):
"""Get the instrumental response from FDSN.
Taken from responseCacher/cache.py.
Parameters
----------
sds_file : `SDSFile`
Returns
-------
responses : `list` of `dict`
Each element of the list is a dictionary with the following fields:
* "start_time" : `obspy.UTCDateTime`
* "end_time" : `obspy.UTCDateTime`
* "response" : `numpy.ndarray`
* "frequencies" : `numpy.ndarray`
"""
inventory = sds_file.inventory
if inventory is None:
raise Exception("Could not find inventory for this file")
# Verify that only one station is returned
# (different responses are modeled as separate channels with same code)
contents = inventory.get_contents()
if (len(contents["networks"]) != 1 or len(contents["stations"]) != 1):
raise Exception("Inventory not valid for this file")
# Get all responses
response_list = []
for channel in inventory[0][0]:
response_dict = {}
# Store time frame for this response
response_dict["start_time"] = channel.start_date
response_dict["end_time"] = channel.end_date # TODO is end_date None or inexistent?
# Evaluate the response to VEL for infrasound channels
# Seismic stations should go to ACC to be consistent with the NLNM, NHNM
output = "ACC"
if channel.code.endswith("DF"):
output = "VEL"
# Call evalresp to evaluate the response
# NFFT must be same as in Welch's method!
# We use 13 segments with 75% overlap
fs = channel.sample_rate
resp, freqs = channel.response.get_evalresp_response(
t_samp=np.reciprocal(fs),
nfft=int(fs * SEGMENT_LENGTH / 4.0),
output=output
)
response_dict["response"] = resp
response_dict["frequencies"] = freqs
# Drop phase imaginary information
resp = np.abs(resp)
response_list.append(response_dict)
return response_list
def process(self, SDSFile, cache_response=True):
"""Process a given SDSFile to extract PSDs.
Parameters
----------
SDSFile : `SDSFile`
SDS file data/metadata
cache_response : `bool`
Whether to use the `responseCacher` (default `True`).
Returns
-------
`list` of `tuple`
Each element of the list corresponds to a segment of the PPSD computation,
and it contains (network, station, location, channel, quality, start of segment,
shift, offset, and the binary data).
"""
# Load data to an ObsPy trace
data = self.readData(SDSFile)
# Start and end times
segmentStart = UTCDateTime(SDSFile.start)
segmentEnd = UTCDateTime(SDSFile.end) + 0.5 * SEGMENT_LENGTH
psdObjects = list()
# Get instrument response if needed
all_responses = []
if not cache_response:
all_responses = self.getResponseFromFDSN(SDSFile)
# Loop over all the hour long segments until the end is reached
while segmentStart + SEGMENT_LENGTH <= segmentEnd:
# Find the appropriate response for the segment time
resp = None
freqs = None
for response in all_responses:
if (response["start_time"] < segmentStart
and (response["end_time"] is None
or segmentStart + SEGMENT_LENGTH < response["end_time"])):
resp = response["response"]
freqs = response["frequencies"]
# If no response contains segment and is not cached, skip this segment
if not cache_response and (resp is None or freqs is None):
segmentStart = segmentStart + 0.5 * SEGMENT_LENGTH
continue
# Get the trace in this time range
# TODO a lot of testing!
try:
trace = self.getTrace(data, segmentStart)
except ValueError:
segmentStart = segmentStart + 0.5 * SEGMENT_LENGTH
continue
# Internal spectrum calculation: the returned spectrum is smoothed
spectrum = self.calculateSpectrum(trace, self.frequencies, resp, freqs)
# Compress data to a BLOB for MySQL
offset, shift, binary = compressSpectrum(spectrum)
# Compute a short and fast but not very safe checksum from response
resp_checksum = adler32(resp) & 0xffffffff
resp_checksum = ctypes.c_int32(resp_checksum).value
# Save all metadata in a record
psd_record = {
"fileId": SDSFile.filename,
"checksum": SDSFile.checksum,
"checksumInventory": resp_checksum,
"net": SDSFile.net,
"sta": SDSFile.sta,
"loc": SDSFile.loc,
"cha": SDSFile.cha,
"quality": SDSFile.quality,
"ts": segmentStart.datetime,
"te": (segmentStart + SEGMENT_LENGTH).datetime,
"shift": shift,
"offset": offset,
"bin": binary
}
psdObjects.append(psd_record)
# Modify the start time of the trace with 50% overlap
segmentStart = segmentStart + 0.5 * SEGMENT_LENGTH
# Check if at least one document has been processed
if not psdObjects:
raise Exception("Unable to process PSD for any segment")
# Add checksum_prev to first segment and checksum_next to the last one,
# no matter if they are at the beggining of the day or not. This way,
# we can check if the file needs to be processed when previous/next
# files are added/modified, in all possible cases.
psdObjects[0]["checksum_prev"] = SDSFile.previous.checksum
psdObjects[-1]["checksum_next"] = SDSFile.next.checksum
return psdObjects
def processAndStore(self, SDSFile):
"""Process a given SDSFile to extract PSDs and saves the data in the
configured SQL database.
Parameters
----------
SDSFile : `SDSFile`
SDS file data/metadata
"""
psd_objects = self.process(SDSFile)
psd_objects = [(rec["net"],
rec["sta"],
rec["loc"],
rec["cha"],
rec["quality"],
rec["ts"].isoformat(),
rec["shift"],
rec["offset"],
rec["bin"]) for rec in psd_objects]
self.storeObjects(psd_objects)
``` |
{
"source": "jolly-good-toolbelt/jiratools",
"score": 3
} |
#### File: jiratools/jiratools/error_logger.py
```python
from typing import List, Iterable
from jiratools import format_as_code_block
from jiratools.helpers import add_comment
from jiratools.formatting import (
DataArray,
format_autoupdate_jira_msg,
format_as_jira_table,
)
DataHeaders = List[str]
class JiraEntry:
"""Typing mock for Jira Entry data."""
jira_id: str
error_message: str
def add_jira_error_comment(jira_id: str, error_msg: str, **format_kwargs) -> str:
"""
Add a comment to a JIRA with a formatted error message.
Args:
jira_id: the Issue ID of the JIRA to be updated
error_msg: the raw error message to include in the comment
**format_kwargs: formatting keyword args
to be passed to jiratools.formatting.format_jira_msg
Returns:
the Issue ID of the JIRA which received the comment
"""
add_comment(
jira_id,
format_autoupdate_jira_msg(format_as_code_block(error_msg), **format_kwargs),
)
return jira_id
def add_jira_comment_with_table(
jira_id: str,
data_headers: DataHeaders,
data_array: DataArray,
msg_prefix: str = "",
**format_kwargs
) -> str:
"""
Add a comment to a JIRA with a formatted data table.
Args:
jira_id: the Issue ID of the JIRA to be updated
data_headers: a list of header column names
data_array: An array of lists of strings,
representing table rows. e.g.::
[["a", "b", "c"], ["d", "e", "f"]]
**format_kwargs: formatting keyword args
to be passed to jiratools.formatting.format_jira_msg
Returns:
str: the Issue ID of the JIRA which received the comment
"""
message_with_table = "{}{}".format(
msg_prefix, format_as_jira_table(data_headers, data_array)
)
add_comment(
jira_id, format_autoupdate_jira_msg(message_with_table, **format_kwargs)
)
return jira_id
def update_jira_for_errors(
jiras: Iterable[JiraEntry], *errors: str, **format_kwargs
) -> List[str]:
"""
Auto-Update JIRAs if errors are found that match the jira list.
Args:
jiras: an iterable of objects
that each contain two required attributes
- jira_id: the Issue Id of the JIRA to be updated
- error_message: an error message substring which, if found,
will trigger and update of the JIRA with the actual error message.
*errors: an error message to be checked against the ``jiras`` for a match
**format_kwargs: formatting keyword args
to be passed to jiratools.formatting.format_jira_msg
Returns:
A list of JIRA Issue IDs that were updated.
"""
jiras_commented = []
for jira in jiras:
for error in errors:
if jira.error_message and jira.error_message in error:
jiras_commented.append(
add_jira_error_comment(jira.jira_id, error, **format_kwargs)
)
return jiras_commented
```
#### File: jiratools/jiratools/example_config.py
```python
import os
import shutil
import sys
from jgt_common import error_if
from .utils import CONFIG_FILENAME, SAMPLE_CONFIG_FILENAME
def cli_example_config(install: bool, install_if_missing: bool) -> None:
"""Build example config."""
error_if(
not os.path.exists(SAMPLE_CONFIG_FILENAME),
message="Missing example config file: {}".format(SAMPLE_CONFIG_FILENAME),
)
prefix = "Copying" if install else "Would copy"
print("{} {} to {}".format(prefix, SAMPLE_CONFIG_FILENAME, CONFIG_FILENAME))
if install or install_if_missing:
config_exists = os.path.exists(CONFIG_FILENAME)
message = "Not installing, config file already in place: {}".format(
CONFIG_FILENAME
)
if install:
error_if(config_exists, message=message)
elif install_if_missing:
print(message)
exit()
shutil.copy(SAMPLE_CONFIG_FILENAME, CONFIG_FILENAME)
else:
with open(SAMPLE_CONFIG_FILENAME) as in_file:
shutil.copyfileobj(in_file, sys.stdout)
``` |
{
"source": "JollyJavelin/DoryenRogueTest",
"score": 3
} |
#### File: JollyJavelin/DoryenRogueTest/DoryenRogueTest.py
```python
import libtcodpy as libtcod
SCREEN_WIDTH = 80
SCREEN_HEIGHT = 50
MAP_WIDTH = 80
MAP_HEIGHT = 45
LIMIT_FPS = 20
color_dark_wall = libtcod.Color(0, 0, 100)
color_dark_ground = libtcod.Color(50, 50, 150)
class Tile:
def __init__(self, blocked, block_sight = None):
self.blocked = blocked
if block_sight is None: block_sight = blocked
self.block_sight = block_sight
class Rect:
def __init__(self, x, y, w, h):
self.x1 = x
self.y1 = y
self.x2 = x + w
self.y2 = y + h
class Object:
def __init__(self, x, y, char, color):
self.x = x
self.y = y
self.char = char
self.color = color
def move(self, dx, dy):
if not map[self.x + dx][self.y + dy].blocked:
self.x += dx
self.y += dy
def draw(self):
libtcod.console_set_default_foreground(con, self.color)
libtcod.console_put_char(con, self.x, self.y, self.char, libtcod.BKGND_NONE)
def clear(self):
libtcod.console_put_char(con, self.x, self.y, ' ', libtcod.BKGND_NONE)
def make_map():
global map
map = [[Tile(False)
for y in range(MAP_HEIGHT)]
for x in range(MAP_WIDTH)]
map[30][22].blocked = True
map[30][22].block_sight = True
map[50][22].blocked = True
map[50][22].block_sight = True
def render_all():
for y in range(MAP_HEIGHT):
for x in range(MAP_WIDTH):
wall = map[x][y].block_sight
if wall:
libtcod.console_set_char_background(con, x, y, color_dark_wall, libtcod.BKGND_SET)
else:
libtcod.console_set_char_background(con, x, y, color_dark_ground, libtcod.BKGND_SET)
for object in objects:
object.draw()
libtcod.console_blit(con, 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 0, 0)
def handle_keys():
key = libtcod.console_wait_for_keypress(True)
if key.vk == libtcod.KEY_ENTER and key.lalt:
libtcod.console_set_fullscreen(not libtcod.console_is_fullscreen())
elif key.vk == libtcod.KEY_ESCAPE:
return True
if libtcod.console_is_key_pressed(libtcod.KEY_UP):
player.move(0, -1)
elif libtcod.console_is_key_pressed(libtcod.KEY_DOWN):
player.move(0, 1)
elif libtcod.console_is_key_pressed(libtcod.KEY_LEFT):
player.move(-1, 0)
elif libtcod.console_is_key_pressed(libtcod.KEY_RIGHT):
player.move(1, 0)
libtcod.console_set_custom_font('arial10x10.png', libtcod.FONT_TYPE_GREYSCALE | libtcod.FONT_LAYOUT_TCOD)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, '<NAME>', False)
libtcod.sys_set_fps(LIMIT_FPS)
con = libtcod.console_new(SCREEN_WIDTH, SCREEN_HEIGHT)
player = Object(SCREEN_WIDTH/2, SCREEN_HEIGHT/2, '@', libtcod.white)
npc = Object(SCREEN_WIDTH/2 - 5, SCREEN_HEIGHT/2, '@', libtcod.yellow)
objects = [npc, player]
make_map()
while not libtcod.console_is_window_closed():
render_all()
libtcod.console_flush()
for object in objects:
object.clear()
exit = handle_keys()
if exit:
break
``` |
{
"source": "jollyjiyoun/DescEmb",
"score": 2
} |
#### File: DescEmb/datasets/dataset.py
```python
import os
import logging
import random
import collections
import torch
import torch.utils.data
import numpy as np
import pandas as pd
from transformers import AutoTokenizer
logger = logging.getLogger(__name__)
class BaseDataset(torch.utils.data.Dataset):
def __init__(
self,
input_path,
data,
eval_data,
fold,
split,
value_embed_type,
task,
seed,
ratio,
):
assert (
task not in ["mlm", "w2v"]
or not (data == 'pooled' and eval_data)
), "--eval_data should be set if pooled-learning on prediction tasks"
self.data = data
if task in ["mlm", "w2v"]:
eval_data = data
self.input_path = input_path
self.split = split
self.prefix = (
eval_data if (
data == 'pooled' and split != 'train'
) else data
)
self.data_path = os.path.join(self.input_path, self.prefix)
self.label_path = os.path.join(self.input_path, "label")
self.ext = "_" + str(value_embed_type) + ".npy"
self.task = task
self.seed = seed
self.labels = None
self.tokenizer = AutoTokenizer.from_pretrained("emilyalsentzer/Bio_ClinicalBERT")
self.ratio = '' if ratio == '100' else '_' + ratio
if fold:
self.fold = fold
else:
self.fold = os.path.join(
self.input_path, "fold", "{}_{}_fold_split{}.csv".format(
self.prefix, self.seed, self.ratio
)
)
def __len__(self):
raise NotImplementedError()
def __getitem__(self, index):
raise NotImplementedError()
def get_fold_indices(self):
if self.split == 'train':
hit = 1
elif self.split == 'valid':
hit = 2
elif self.split == 'test':
hit = 0
df = pd.read_csv(self.fold)
splits = df[self.task].values
idcs = np.where(splits == hit)[0]
return idcs
def mask_tokens(self, inputs: torch.Tensor, special_tokens_mask: torch.Tensor):
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.
"""
labels = inputs.clone()
# We sample a few tokens in each sequence for MLM training (with probability `self.textencoder_mlm_probability`)
probability_matrix = torch.full(labels.shape, self.mlm_prob)
if special_tokens_mask is None:
special_tokens_mask = torch.tensor(
self.tokenizer.get_special_tokens_mask(
labels, already_has_special_tokens=True
),
dtype=torch.bool
)
else:
special_tokens_mask = special_tokens_mask.bool()
probability_matrix.masked_fill_(special_tokens_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
while torch.equal(masked_indices, torch.zeros(len(masked_indices)).bool()):
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
class Dataset(BaseDataset):
def __init__(
self,
input_path,
data,
eval_data,
fold,
split,
value_embed_type,
task,
seed,
ratio,
):
super().__init__(
input_path=input_path,
data=data,
eval_data=eval_data,
fold=fold,
split=split,
value_embed_type=value_embed_type,
task=task,
seed=seed,
ratio=ratio,
)
hit_idcs = self.get_fold_indices()
self.sequential_lengths = None
self.value = np.load(
file=os.path.join(self.data_path, "value.npy")
)
self.value = self.value[hit_idcs]
if self.data == 'pooled':
self.input_idcs = np.load(
file=os.path.join(self.data_path, "pooled_input_index{}".format(self.ext)),
)
else:
self.input_idcs = np.load(
file=os.path.join(self.data_path, "{}_input_index{}".format(self.prefix, self.ext))
)
self.input_idcs = self.input_idcs[hit_idcs]
self.sequential_lengths = np.load(
file=os.path.join(self.data_path, f"seq_len.npy"),
)
self.sequential_lengths = self.sequential_lengths[hit_idcs]
self.label = np.load(
file=os.path.join(
self.label_path, "{}_{}_label.npy".format(self.prefix, self.task)
).format(self.prefix, self.task),
)
self.label = torch.tensor(self.label[hit_idcs], dtype=torch.long)
logger.info(f"loaded {len(self.input_idcs)} {self.split} samples")
def __len__(self):
return len(self.input_idcs)
def __getitem__(self, index):
input_idcs = torch.LongTensor(self.input_idcs[index])
seq_len = torch.LongTensor(self.sequential_lengths).unsqueeze(-1)[index]
label = torch.LongTensor(self.label).unsqueeze(-1)[index]
value = torch.Tensor(self.value[index])
return {
'input_ids': input_idcs,
'seq_len': seq_len,
'value': value,
'label': label
}
def collator(self, samples):
samples = [s for s in samples if s["input_ids"] is not None]
if len(samples) == 0:
return {}
input = {"input_ids": torch.stack([s["input_ids"] for s in samples])}
input["seq_len"] = torch.stack([s["seq_len"] for s in samples])
input["value"] = torch.stack([s["label"] for s in samples])
out = {"label": torch.stack([s["label"] for s in samples])}
out["net_input"] = input
return out
class TokenizedDataset(BaseDataset):
def __init__(
self,
input_path,
data,
eval_data,
fold,
split,
value_embed_type,
task,
seed,
ratio,
):
super().__init__(
input_path=input_path,
data=data,
eval_data=eval_data,
fold=fold,
split=split,
value_embed_type=value_embed_type,
task=task,
seed=seed,
ratio=ratio,
)
hit_idcs = self.get_fold_indices()
col_names = ['input_ids', 'token_type_ids', 'attention_mask']
self.offset_orders = None
self.sequential_lengths = None
self.value = np.load(
file=os.path.join(self.data_path, "value.npy")
)
self.value = self.value[hit_idcs]
self.input_ids, self.token_type_ids, self.attention_mask = (
np.load(
file=os.path.join(self.data_path, f"{col}{self.ext}"),
) for col in col_names
)
self.input_ids = self.input_ids[hit_idcs]
self.token_type_ids = self.token_type_ids[hit_idcs]
self.attention_mask = self.attention_mask[hit_idcs]
self.sequential_lengths = np.load(
file=os.path.join(self.data_path, "seq_len.npy"),
)
self.sequential_lengths = self.sequential_lengths[hit_idcs]
self.label = np.load(
file=os.path.join(
self.label_path, "{}_{}_label.npy".format(self.prefix, self.task)
).format(self.prefix, self.task),
)
self.label = torch.tensor(self.label[hit_idcs], dtype=torch.long)
logger.info(f"loaded {len(self.input_ids)} {self.split} samples")
def __len__(self):
return len(self.input_ids)
def __getitem__(self, index):
input_ids = torch.LongTensor(self.input_ids[index])
token_type_id = torch.LongTensor(self.token_type_ids[index])
attn_mask = torch.LongTensor(self.attention_mask[index])
seq_len = (torch.LongTensor(self.sequential_lengths).unsqueeze(-1)[index])
label = torch.LongTensor(self.label).unsqueeze(-1)[index]
value = torch.Tensor(self.value[index])
return {
'input_ids': input_ids,
'token_type_ids': token_type_id,
'attention_mask': attn_mask,
'seq_len': seq_len,
'value': value,
'label': label
}
def collator(self, samples):
samples = [s for s in samples if s["input_ids"] is not None]
if len(samples) == 0:
return {}
input = {"input_ids": torch.stack([s["input_ids"] for s in samples])}
input["token_type_ids"] = torch.stack([s["token_type_ids"] for s in samples])
input["attention_mask"] = torch.stack([s["attention_mask"] for s in samples])
input["seq_len"] = torch.stack([s["seq_len"] for s in samples])
input["value"] = torch.stack([s["value"] for s in samples])
out = {"label": torch.stack([s["label"] for s in samples])}
out["net_input"] = input
return out
class MLMTokenizedDataset(BaseDataset):
def __init__(
self,
input_path,
data,
eval_data,
fold,
split,
value_embed_type,
task,
seed,
ratio,
mlm_prob
):
super().__init__(
input_path=input_path,
data=data,
eval_data=eval_data,
fold=fold,
split=split,
value_embed_type=value_embed_type,
task=task,
seed=seed,
ratio=ratio,
)
self.mlm_prob = mlm_prob
col_names = ['input_ids', 'token_type_ids', 'attention_mask']
self.input_ids, self.token_type_ids, self.attention_mask = (
np.load(
file=os.path.join(self.data_path, f"{col}_unique_code.npy")
) for col in col_names
)
logger.info(f"loaded {len(self.input_ids)} {self.split} samples")
def __len__(self):
return len(self.input_ids)
def __getitem__(self, index):
input_ids = torch.LongTensor(self.input_ids[index])
token_type_id = torch.LongTensor(self.token_type_ids[index])
attn_mask = torch.LongTensor(self.attention_mask[index])
input_ids, mlm_labels = self.mask_tokens(input_ids, special_tokens_mask=None)
return {
'input_ids': input_ids,
'token_type_ids': token_type_id,
'attention_mask': attn_mask,
'mlm_labels': mlm_labels,
}
def collator(self, samples):
samples = [s for s in samples if s["input_ids"] is not None]
if len(samples) == 0:
return {}
input = {"input_ids": torch.stack([s["input_ids"] for s in samples])}
input["token_type_ids"] = torch.stack([s["token_type_ids"] for s in samples])
input["attention_mask"] = torch.stack([s["attention_mask"] for s in samples])
out = {"label": torch.stack([s["mlm_labels"] for s in samples])}
out["net_input"] = input
return out
class Word2VecDataset(BaseDataset):
def __init__(
self,
input_path,
data,
eval_data,
fold,
split,
value_embed_type,
task,
seed,
ratio,
):
super().__init__(
self,
input_path,
data,
eval_data,
fold,
split,
value_embed_type,
task,
seed,
ratio,
)
input_idcs = np.load(
file=os.path.join(self.data_path, "{}_input_index{}".format(self.prefix, self.ext))
)
input_idcs = self.indexing(input_idcs, self.data, self.seed)
self.pos_pair, self.neg_pair = self.preprocess(input_idcs)
self.pos_pair.pop(0)
self.index_dict = {i: k for i, k in enumerate(self.pos_pair.keys())}
def __len__(self):
return len(self.pos_pair)
def __getitem__(self, item):
item = self.index_dict[item]
try:
pos = random.sample(self.pos_pair[item], 5)
except ValueError:
pos = random.choices(self.pos_pair[item], k=5)
try:
neg = random.sample(self.neg_pair[item], 30)
except ValueError:
neg = random.choices(self.neg_pair[item], k=30)
return torch.LongTensor([item]), torch.LongTensor(pos), torch.LongTensor(neg)
def indexing(self, data, dataname, seed):
hit = 1
df = pd.read_csv(os.path.join(self.path, 'fold', f'{dataname}_{seed}_fold_split.csv'))
splits = df[self.task].values
idcs = np.where(splits == hit)[0]
data = data[idcs]
return data
def preprocess(self, mimic):
pos_pair = {}
skip_window=15
for index in range(mimic.shape[0]):
data = mimic[index]
data_index = 0
span = 2 * skip_window + 1 # [ skip_window, target, skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for m in range(skip_window):
try:
pos_pair[buffer[m]].extend(list(set(list(buffer))))
except KeyError:
pos_pair[buffer[m]] = list(set(list(buffer)))
for i in range(np.nonzero(data)[0].max() - skip_window):
key = buffer[skip_window]
if buffer[skip_window] == 0:
continue
try:
pos_pair[key].extend(list(set(list(buffer))))
except KeyError:
pos_pair[key] = list(set(list(buffer)))
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
data_index = (data_index + len(data) - span) % len(data)
pos_pair = {k: list(set(v)) for k, v in pos_pair.items()}
# negative_pair
max_num = mimic.max()
neg_pair = {k:list(set(v) ^ set(list(np.arange(3, max_num)))) for k, v in pos_pair.items()}
return pos_pair, neg_pair
```
#### File: DescEmb/models/__init__.py
```python
import importlib
import os
import pdb
MODEL_REGISTRY = {}
# should be: MODEL_REGISTRY = {}
# pdb.set_trace()
# build model instance
# only execute build_model() if model argument passed by user is defined
# for main model (not embed model, predict model)
def build_model(args):
model = None
model_type = getattr(args, "model", None)
if model_type in MODEL_REGISTRY:
model = MODEL_REGISTRY[model_type]
assert model is not None, (
f"Could not infer model type from {model_type}. "
f"Available models: "
+ str(MODEL_REGISTRY.keys())
+ " Requested model type: "
+ model_type
)
return model.build_model(args)
# a function decorator that adds model name, class to MODEL_REGISTRY
def register_model(name):
"""
New model types can be added with the :func:`register_model`
function decorator.
For example:
@register_model('descemb_bert')
class BertTextEncoder(nn.Module):
(...)
Args:
name (str): the name of the model
"""
def register_model_cls(cls):
if name in MODEL_REGISTRY:
raise ValueError(f"Cannot register duplicate model ({name})")
MODEL_REGISTRY[name] = cls
return cls
return register_model_cls
def import_models(models_dir, namespace):
# for each file in models/ directory
# : __pycache__, __init__.py, codeemb.py, descemb.py, ehr_model.py, rnn.py, word2vec.py
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
# model_name: codeemb, descemb, ehr_model, rnn, word2vec
model_name = file[: file.find(".py")] if file.endswith(".py") else file
# models.codeemb, models.descemb, models.ehr_model, models.rnn, models.word2vec
importlib.import_module(namespace + "." + model_name)
# automatically import any Python files in the models/ directory
# models_dir: models/ directory
models_dir = os.path.dirname(__file__)
# 1. import and define all model classes
# 2. run register_model() for each class, and add model instance to MODEL_REGISTRY
# 3. when model.build_model is executed, check if model name exists in MODEL_REGISTRY
import_models(models_dir, "models")
# MODEL_REGISTRY dictionary should be filled with models
# each model class instance will be created
# pdb.set_trace()
```
#### File: DescEmb/modules/identity_layer.py
```python
import torch.nn as nn
class IdentityLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, source):
return source
``` |
{
"source": "jollyjonson/multitask-f0",
"score": 3
} |
#### File: multitask-f0/deepsalience/compute_hcqts.py
```python
from __future__ import print_function
import argparse
import csv
from joblib import Parallel, delayed
import librosa
import numpy as np
import sys
def get_hcqt_params():
"""Static function to store HCQT parameters.
Returns
-------
bins_per_octave : int
Number of bins per octave in HCQT
n_octaves : int
Number of octaves in HCQT
harmonics : list
List of harmonics to compute in HCQT
sr : float
Sample rate to load input audio signal.
fmin : float
Minimum frequency in HCQT (in Hz)
hop_length : int
Hop length (in samples) at sample rate `sr` of the HCQT
"""
bins_per_octave = 60
n_octaves = 6
harmonics = [1, 2, 3, 4, 5]
sr = 22050
fmin = 32.7
hop_length = 256
return bins_per_octave, n_octaves, harmonics, sr, fmin, hop_length
def compute_hcqt(audio_fpath):
"""Compute the harmonic CQT from a given audio file
Parameters
----------
audio_fpath : str
Path to input audio file
Returns
-------
hcqt : np.ndarray
HCQT as a numpy array
"""
# get parameters and load audio
(bins_per_octave, n_octaves, harmonics,
sr, f_min, hop_length) = get_hcqt_params()
y, fs = librosa.load(audio_fpath, sr=sr)
# compute cqt for each minimum frequency h*f_min
cqt_list = []
shapes = []
for h in harmonics:
cqt = librosa.cqt(
y, sr=fs, hop_length=hop_length, fmin=f_min*float(h),
n_bins=bins_per_octave*n_octaves,
bins_per_octave=bins_per_octave
)
cqt_list.append(cqt)
shapes.append(cqt.shape)
# adjust number of time frames across harmonics to the minimum length
shapes_equal = [s == shapes[0] for s in shapes]
if not all(shapes_equal):
min_time = np.min([s[1] for s in shapes])
new_cqt_list = []
for i in range(len(cqt_list)):
new_cqt_list.append(cqt_list[i][:, :min_time])
cqt_list = new_cqt_list
# compute log amplitude and normalize between 0 and 1
log_hcqt = ((1.0/80.0) * librosa.core.amplitude_to_db(
np.abs(np.array(cqt_list)), ref=np.max)) + 1.0
return log_hcqt
def get_hcqt(input_audio, output_npy):
"""Compute and save an HCQT
Parameters
----------
input_audio : str
Path to input audio file
output_npy : str
Path to save HCQT npy file
"""
print("Computing HCQT for {}".format(input_audio))
try:
hcqt = compute_hcqt(input_audio)
np.save(output_npy, hcqt.astype(np.float32))
except:
print("Something went wrong for input_audio = {}".format(input_audio))
print("Unexpected error:", sys.exc_info()[0])
def main(args):
"""Main method to compute HCQTs in parallel
"""
# expects `args.file_paths` to be a path to a tab delimited file
# with each line containing an `input_audio` `output_npy` pair.
file_pairs = []
with open(args.file_paths, 'r') as fhandle:
reader = csv.reader(fhandle, delimiter='\t')
for line in reader:
file_pairs.append(line)
# compute HCQTs in parallel
Parallel(n_jobs=args.n_jobs, verbose=5)(
delayed(get_hcqt)(input_audio, output_npy) \
for input_audio, output_npy in file_pairs
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Compute and save HCQTs for a set of files.")
parser.add_argument("file_paths",
type=str,
help="Tab delimited file containing "
"input_audio/output_npy filepaths.")
parser.add_argument("n_jobs",
type=int,
help="Number of jobs to run in parallel.")
main(parser.parse_args())
```
#### File: multitask-f0/deepsalience/compute_multitask_data.py
```python
from __future__ import print_function
import argparse
import csv
import glob
import json
import medleydb as mdb
from medleydb import mix
import mir_eval
import numpy as np
import os
import compute_training_data as C
EXCEPTIONS = {
'AnEndlessSporadic_Anything_STEM_04': 'synthesizer',
'Anberlin_TheFeelGoodDrag_STEM_04': 'synthesizer',
'ArcadeFire_BlackMirror_STEM_02': 'acoustic guitar',
'ArcadeFire_BlackMirror_STEM_07': 'acoustic guitar',
'BillyIdol_WhiteWedding_STEM_05': 'distorted electric guitar',
'Blink182_AllTheSmallThings_STEM_05': 'clean electric guitar',
'Blondie_OneWayOrAnother_STEM_06': 'distorted electric guitar',
'BlueOysterCult_DontFearTheReaper_STEM_05': 'clean electric guitar',
'LilyAllen_NotFair_STEM_04': 'clean electric guitar',
'TheAllAmericanRejects_DirtyLittleSecret_STEM_04': 'distorted electric guitar',
'TheAllmanBrothersBand_RamblinMan_STEM_06': 'clean electric guitar',
'TheLastGoodnight_PicturesOfYou_STEM_02': 'distorted_electric guitar'
}
def save_singlef0_output(times, freqs, output_path):
"""save singlef0 output to a csv file."""
with open(output_path, 'w') as fhandle:
csv_writer = csv.writer(fhandle, delimiter='\t')
for t, f in zip(times, freqs):
csv_writer.writerow([t, f])
def multif0_to_timefreq(times, freqs):
"""Unroll a multif0 annotation of the form (t, [f1, f2, f3])
to a list of (t, f) where t may be repeated.
Parameters
----------
times : list
Time stamps
freqs : list of lists
all frequencies for a given time stamp
Returns
-------
t_unrolled : list
Unrolled time stamps
f_unrolled : list
Unrolled frequency values
"""
t_unrolled = []
f_unrolled = []
for t, f_list in zip(times, freqs):
for f in f_list:
if f == 0:
continue
t_unrolled.append(t)
f_unrolled.append(f)
return t_unrolled, f_unrolled
def get_replace_info(mtrack, replace_path):
"""Go through repalced stems and get annotation and mixing info.
Parameters
----------
mtrack : MultiTrack
Medleydb multitrack object
replace_path : str
Path to where the resynthesized audio and annotations live
Returns
-------
replace_stem_annotations : dictionary
Dictionary keyed by stem id mapping to 'times', 'freqs', and 'tags'
for any relevant annotations.
replace_altindices : dictionary
Dictionary keyed by stem id mapping to the replaced path.
Used by mix.mix_multitrack for replacing stems.
stem_indices : list
List of stem indices for replaced stems
"""
# glob for all replaced stems for this multitrack
replace_stems = glob.glob(os.path.join(
replace_path, '{}*_replace.wav'.format(mtrack.track_id)
))
# initialize
stem_indices = []
replace_stem_annotations = {}
replace_altindices = {}
# loop over resynthesized stems
for stem_path in replace_stems:
# path where annotation should live
annot_path = os.path.join(
replace_path,
"{}_vamp_pyin_pyin_smoothedpitchtrack.csv".format(
os.path.basename(stem_path).split('.')[0])
)
# if annotation doesn't exist, cry!
if not os.path.exists(annot_path):
print("[Warning] Couldn't find annotation for {}".format(stem_path))
continue
# parse stem index from file name
stem_id = int(os.path.basename(stem_path).split('_')[3].split('.')[0])
# load resynth annotation
times, freqs = mir_eval.io.load_ragged_time_series(
annot_path, delimiter=','
)
annot_t, annot_f = multif0_to_timefreq(times, freqs)
tags = ['multif0', 'vocal', 'melody']
stem_indices.append(stem_id)
# add annotation to dictionary
replace_stem_annotations[stem_id] = {
'times': annot_t, 'freqs': annot_f, 'tags': tags
}
# add resynth file to replacement dictionary
replace_altindices[stem_id] = stem_path
return replace_stem_annotations, replace_altindices, stem_indices
def get_resynth_info(mtrack, resynth_path, stem_indices):
"""Go through resynthesized stems and get annotation and mixing info.
Parameters
----------
mtrack : MultiTrack
Medleydb multitrack object
resynth_path : str
Path to where the resynthesized audio and annotations live
stem_indices : list
List of indices already used
Returns
-------
resynth_stem_annotations : dictionary
Dictionary keyed by stem id mapping to 'times', 'freqs', and 'tags'
for any relevant annotations.
resynth_altindices : dictionary
Dictionary keyed by stem id mapping to the resynthesized path.
Used by mix.mix_multitrack for replacing stems.
stem_indices_guitar : list
List of stem indices containing any kind of resynthesized guitar
stem_indices_piano : list
List of stem indices containing any kind of resynthesized piano
"""
# glob for all resynth stems for this multitrack
resynth_stems = glob.glob(os.path.join(
resynth_path, '{}*_resynth.wav'.format(mtrack.track_id)
))
# initialize
stem_indices_piano = []
stem_indices_guitar = []
resynth_stem_annotations = {}
resynth_altindices = {}
# loop over resynthesized stems
for stem_path in resynth_stems:
# path where annotation should live
annot_path = os.path.join(
resynth_path,
"{}.txt".format(os.path.basename(stem_path).split('.')[0])
)
# parse stem index from file name
stem_id = int(os.path.basename(stem_path).split('_')[3].split('.')[0])
stem = mtrack.stems[stem_id]
if stem_id in stem_indices:
continue
# if annotation doesn't exist, cry!
if not os.path.exists(annot_path):
print("[Warning] Couldn't find annotation for {}".format(stem_path))
continue
# load resynth annotation
times, freqs = mir_eval.io.load_ragged_time_series(annot_path)
annot_t, annot_f = multif0_to_timefreq(times, freqs)
tags = ['multif0']
# apply tags based on whether instrument is piano or guitar
basename = os.path.basename(stem.audio_path.split('.')[0])
if basename in EXCEPTIONS.keys():
instrument = EXCEPTIONS[basename]
else:
instrument = stem.instrument
if 'piano' in instrument:
tags.append('piano')
stem_indices_piano.append(stem_id)
elif ('electric piano' in instrument or
'synthesizer' in instrument):
tags.append('keys')
stem_indices_piano.append(stem_id)
elif ('acoustic guitar' in instrument or
'clean electric guitar' in instrument or
'distorted electric guitar' in instrument):
tags.append('guitar')
stem_indices_guitar.append(stem_id)
else:
print("[Warning] resynth stem is instrument {}! skipping".format(
instrument))
continue
# add annotation to dictionary
resynth_stem_annotations[stem_id] = {
'times': annot_t, 'freqs': annot_f, 'tags': tags
}
# add resynth file to replacement dictionary
resynth_altindices[stem_id] = stem_path
return (resynth_stem_annotations, resynth_altindices,
stem_indices_guitar, stem_indices_piano)
def get_orig_stem_info(mtrack, stem_indices):
"""Go through original stems and get annotation and mixing info.
Parameters
----------
mtrack : MultiTrack
Medleydb multitrack object
stem_indices : list
List of stem indices already included
Returns
-------
orig_stem_annotations : dictionary
Dictionary keyed by stem id mapping to 'times', 'freqs', and 'tags'
for any relevant annotations.
stem_annot_activity : dictionary
Dictionary keyed by stem id mapping to annotation activation information
orig_stem_indices : list
List of stem indices to include in mix.
"""
orig_stem_indices = []
stem_annot_activity = {}
orig_stem_annotations = {}
# go through the rest of the stems
for stem in mtrack.stems.values():
# skip this stem if it was resynthesized
if stem.stem_idx in stem_indices:
continue
# skip this stem if it has more than one instrument
if len(stem.instrument) > 1 or len(stem.f0_type) > 1:
continue
# skip this stem if it is a polyphonic instrument
if stem.f0_type[0] == 'p':
continue
# if stem is unpitched, just add it to the mix
if stem.f0_type[0] == 'u':
orig_stem_indices.append(stem.stem_idx)
# if stem is mono, add it to the mix and get its annotation
if len(stem.instrument) == 1 and stem.f0_type[0] == 'm':
orig_stem_indices.append(stem.stem_idx)
annot_t, annot_f, annot_activation = C.get_stem_annotation(
stem, mtrack
)
stem_annot_activity[stem.stem_idx] = annot_activation
tags = ['multif0']
if stem.instrument[0] in mix.VOCALS:
tags.append('vocal')
if stem.component == 'melody':
tags.append('melody')
if stem.component == 'bass':
tags.append('bass')
orig_stem_annotations[stem.stem_idx] = {
'times': annot_t, 'freqs': annot_f, 'tags': tags
}
return orig_stem_annotations, stem_annot_activity, orig_stem_indices
def save_annotation(times, freqs, save_path):
""" Save an annotation to a filepath or return None if annotation is
empty.
Parameters
----------
times : list
List of times
freqs : list
List of freqs
save_path : str
Path to save file
Returns
-------
output : str or None
If times/freqs are not empty, returns save_path. Otherwise
returns None.
"""
if len(times) > 0:
# using singlef0 save here because we have unwrapped multif0s
save_singlef0_output(times, freqs, save_path)
return save_path
else:
return None
def create_annotations(save_dir, track_id, stem_annotations):
"""Create a dictionary of annotations by type, given a list of annotations
by stem.
Parameters
----------
save_dir : str
Path to eventually save each annotation.
track_id : str
Medleydb trackid
stem_annotations : dictionary
Dictionary keyed by stem id with values 'times', 'freqs' and 'tags'
Returns
-------
annotations : dictionary
Dictionary keyed by annotation type (e.g. 'vocal') with values
'times' 'freqs' and 'path'.
"""
# create initial annotations dictionary
annotations = {
'multif0': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_multif0_annotation.txt".format(track_id))
},
'multif0_noguitar': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_multif0_noguitar_annotation.txt".format(track_id))
},
'multif0_nosynth': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_multif0_nosynth_annotation.txt".format(track_id))
},
'melody': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_melody_annotation.txt".format(track_id))
},
'vocal': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_vocal_annotation.txt".format(track_id))
},
'bass': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_bass_annotation.txt".format(track_id))
},
'piano': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_piano_annotation.txt".format(track_id))
},
'guitar': {
'times': [], 'freqs': [], 'path': os.path.join(
save_dir, "{}_guitar_annotation.txt".format(track_id))
}
}
# loop over each stem annotation and add it to corresponding annotation
# types (e.g. stems with melody tag are added to the melody annotation)
for key in sorted(stem_annotations.keys()):
annot_dict = stem_annotations[key]
tags = annot_dict['tags']
# all stems should have the 'multif0' tag
if 'multif0' in tags and annot_dict['times'] is not None:
annotations['multif0']['times'].extend(annot_dict['times'])
annotations['multif0']['freqs'].extend(annot_dict['freqs'])
# if stem is guitar, add it to guitar
if 'guitar' in tags and annot_dict['times'] is not None:
annotations['guitar']['times'].extend(annot_dict['times'])
annotations['guitar']['freqs'].extend(annot_dict['freqs'])
# if stem is not guitar add it to the multif0 no guitar annotation
elif annot_dict['times'] is not None:
annotations['multif0_noguitar']['times'].extend(annot_dict['times'])
annotations['multif0_noguitar']['freqs'].extend(annot_dict['freqs'])
# if stem is piano add to piano annotation
if 'piano' in tags and annot_dict['times'] is not None:
annotations['piano']['times'].extend(annot_dict['times'])
annotations['piano']['freqs'].extend(annot_dict['freqs'])
# if stem is not synthesized (i.e. not piano or guitar) add it to
# the nosynth annotation
if ('piano' not in tags and
'guitar' not in tags and
'keys' not in tags and
annot_dict['times'] is not None):
annotations['multif0_nosynth']['times'].extend(annot_dict['times'])
annotations['multif0_nosynth']['freqs'].extend(annot_dict['freqs'])
# add melody stems to melody annotation
if 'melody' in tags and annot_dict['times'] is not None:
annotations['melody']['times'].extend(annot_dict['times'])
annotations['melody']['freqs'].extend(annot_dict['freqs'])
# add vocal stems to vocal annotation
if 'vocal' in tags and annot_dict['times'] is not None:
annotations['vocal']['times'].extend(annot_dict['times'])
annotations['vocal']['freqs'].extend(annot_dict['freqs'])
# add bass stems to bass annotation
if 'bass' in tags and annot_dict['times'] is not None:
annotations['bass']['times'].extend(annot_dict['times'])
annotations['bass']['freqs'].extend(annot_dict['freqs'])
return annotations
def create_annotation_save_pairs(annotations, mix_path, mix_path_noguitar,
mix_path_nosynth):
"""Create a dictionary that maps an audio file to its corresponding
multitask annotations.
Parameters
----------
annotations : dictionary
Dictionary mapping annotation type to 'times', 'freqs', and 'path'
to save.
mix_path : str
Path to full multif0 mix
mix_path_noguitar : str
Path to no guitar multif0 mix
mix_path_nosynth : str
Path to no synthesized stems multif0 mix
Returns
-------
audio_annot_pairs : dictionary
Dictionary mapping audio path to annotation paths by type.
"""
audio_annot_pairs = {
mix_path: {},
mix_path_noguitar: {},
mix_path_nosynth: {}
}
for annot_type in annotations.keys():
output = save_annotation(
annotations[annot_type]['times'],
annotations[annot_type]['freqs'],
annotations[annot_type]['path']
)
if annot_type == 'multif0':
audio_annot_pairs[mix_path]['multif0'] = output
elif annot_type == 'multif0_noguitar':
audio_annot_pairs[mix_path_noguitar]['multif0'] = output
elif annot_type == 'multif0_nosynth':
audio_annot_pairs[mix_path_nosynth]['multif0'] = output
elif annot_type == 'guitar':
audio_annot_pairs[mix_path]['guitar'] = output
audio_annot_pairs[mix_path_noguitar]['guitar'] = None
audio_annot_pairs[mix_path_nosynth]['guitar'] = None
elif annot_type == 'piano':
audio_annot_pairs[mix_path]['piano'] = output
audio_annot_pairs[mix_path_noguitar]['piano'] = output
audio_annot_pairs[mix_path_nosynth]['piano'] = None
else:
audio_annot_pairs[mix_path][annot_type] = output
audio_annot_pairs[mix_path_noguitar][annot_type] = output
audio_annot_pairs[mix_path_nosynth][annot_type] = output
return audio_annot_pairs
def generate_filtered_stems(stem_annot_activity, mtrack, save_dir):
"""Create filtered stems for stems with annotation activity info.
Parameters
----------
stem_annot_activity : dictionary
Dictionary mapping stem_id to annotation activity information
mtrack : MultiTrack
medleydb MultiTrack object
save_dir : str
Path to save new stems.
Returns
-------
filtered_altfiles : dictionary
Dictionary mapping stem_id to a path where the new stem is saved.
"""
filtered_altfiles = {}
# for each stem with an annotation filter, create filtered stem
for key in stem_annot_activity.keys():
if stem_annot_activity[key] is None:
continue
new_stem_path = os.path.join(
save_dir, "{}_STEM_{}_alt.wav".format(mtrack.track_id, key)
)
if not os.path.exists(new_stem_path):
C.create_filtered_stem(
mtrack.stems[key].audio_path, new_stem_path,
stem_annot_activity[key]
)
filtered_altfiles[key] = new_stem_path
return filtered_altfiles
def create_mixes(mtrack, mix_path, mix_path_noguitar, mix_path_nosynth,
stem_indices, stem_indices_guitar, stem_indices_piano,
altfiles):
"""Render artificial mixes to `mix_path', `mix_path_noguitar', and
`mix_path_nosynth'.
Parameters
----------
mtrack : MultiTrack
medleydb MultiTrack object
mix_path : str
Path to save full multif0 mix
mix_path_noguitar : str
Path to save no guitar multif0 mix
mix_path_nosynth : str
Path to save no synthesized stems multif0 mix
stem_indices : list
List of stems to include in the full mix
stem_indices_guitar : list
List of guitar stems
stem_indices_piano : list
List of piano stems
altfiles : dict
Dictionary of replacement files mapping stem id to new path.
Returns
-------
mix_filepaths : list
List of filepaths included in the full mix
mix_noguitar_filepaths : list
List of filepaths included in the no guitar mix
mix_nosynth_filepaths : list
List of filepaths included in the no resynth mix
"""
# create resynth mix
if len(stem_indices) > 0:
mix_filepaths, _ = mix.mix_multitrack(
mtrack, mix_path, stem_indices=stem_indices, alternate_files=altfiles
)
else:
mix_filepaths = None
# create no guitar and no synth mixes
stem_indices_noguitar = [
s for s in stem_indices if s not in stem_indices_guitar
]
stem_indices_nosynth = [
s for s in stem_indices_noguitar if s not in stem_indices_piano
]
altfiles_noguitar = {
k: v for k, v in altfiles.items() if k in stem_indices_noguitar
}
altfiles_nosynth = {
k: v for k, v in altfiles.items() if k in stem_indices_nosynth
}
if len(stem_indices_noguitar) > 0:
mix_noguitar_filepaths, _ = mix.mix_multitrack(
mtrack, mix_path_noguitar, stem_indices=stem_indices_noguitar,
alternate_files=altfiles_noguitar
)
else:
mix_noguitar_filepaths = None
if len(stem_indices_nosynth) > 0:
mix_nosynth_filepaths, _ = mix.mix_multitrack(
mtrack, mix_path_nosynth, stem_indices=stem_indices_nosynth,
alternate_files=altfiles_nosynth
)
else:
mix_nosynth_filepaths = None
return mix_filepaths, mix_noguitar_filepaths, mix_nosynth_filepaths
def create_complete_resynth_mix(mtrack, resynth_path, replace_path, save_dir):
"""Create resynthesized mixes and all corresponding annotations
Audio files:
- (A) Multif0 remix with synth guitar + synth piano
- (B) Multif0 remix with synth piano
- (C) Multif0 remix
- (D) Original track
Annotations:
filename : description (corresponding audio file)
- Artist_Track_multif0_annotation.txt : multif0 + synth piano/guitar (A)
- Artist_Track_multif0_noguiar_annotation.txt : multif0 + synth piano (B)
- Artist_Track_multif0_nosynth_annotation.txt : multif0 (C)
- Artist_Track_melody_annotation.txt : all melody f0s (A,B,C,[D])
- Artist_Track_vocal_annotation.txt : all vocal f0s (A,B,C,[D])
- Artist_Track_bass_annotation.txt : all bass f0s (A,B,C,[D])
- Artist_Track_piano_annotation.txt : all piano f0s (A,B)
- Artist_Track_guitar_annotation.txt : all guitar f0s (A)
Parameters
----------
mtrack : MultiTrack
medleydb MultiTrack object
resynth_path : str
Path where resynthesized files live
replace_path : str
Path where replacement files live
save_dir : str
Path to save output.
Returns
-------
audio_annot_pairs : dictionary
Dictionary mapping audio files to annotation files by type.
"""
# do nothing if track has bleed
if mtrack.has_bleed:
return None
# mix audio save paths
mix_path = os.path.join(
save_dir, "{}_MIX_complete_resynth.wav".format(mtrack.track_id)
)
mix_path_noguitar = os.path.join(
save_dir, "{}_MIX_complete_noguitar.wav".format(mtrack.track_id)
)
mix_path_nosynth = os.path.join(
save_dir, "{}_MIX_complete_nosynth.wav".format(mtrack.track_id)
)
# define common structures
stem_indices = []
altfiles = {}
stem_annotations = {}
# get all annotation and index info from resynthesized stems
(replace_stem_annotations, replace_altindices,
stem_indices_replace) = get_replace_info(
mtrack, replace_path
)
stem_indices.extend(stem_indices_replace)
for key, value in replace_stem_annotations.items():
stem_annotations[key] = value
for key, value in replace_altindices.items():
altfiles[key] = value
# get all annotation and index info from resynthesized stems
(resynth_stem_annotations, resynth_altindices,
stem_indices_guitar, stem_indices_piano) = get_resynth_info(
mtrack, resynth_path, stem_indices
)
stem_indices.extend(stem_indices_piano)
stem_indices.extend(stem_indices_guitar)
for key, value in resynth_stem_annotations.items():
stem_annotations[key] = value
for key, value in resynth_altindices.items():
altfiles[key] = value
# get all annotation and index info from remaining original stems
(orig_stem_annotations, stem_annot_activity,
orig_stem_indices) = get_orig_stem_info(mtrack, stem_indices)
# fill info to common structures
stem_indices.extend(orig_stem_indices)
for key, value in orig_stem_annotations.items():
stem_annotations[key] = value
# create annotation dictionary
annotations = create_annotations(
save_dir, mtrack.track_id, stem_annotations
)
# save annotation and create pairs
audio_annot_pairs = create_annotation_save_pairs(
annotations, mix_path, mix_path_noguitar, mix_path_nosynth
)
# create new versions of stems with annotation filters
filtered_altfiles = generate_filtered_stems(
stem_annot_activity, mtrack, save_dir
)
for key, value in filtered_altfiles.items():
altfiles[key] = value
# make sure there is a least one stem left in the mix
if len(stem_indices) == 0:
print("{} had no stems after filtering :( ".format(mtrack.track_id))
return None
# generate mixes
mix_filepaths, mix_noguitar_filepaths, mix_nosynth_filepaths = create_mixes(
mtrack, mix_path, mix_path_noguitar, mix_path_nosynth,
stem_indices, stem_indices_guitar, stem_indices_piano, altfiles
)
if mix_filepaths is None:
audio_annot_pairs.pop(mix_path)
if mix_noguitar_filepaths is None:
audio_annot_pairs.pop(mix_path_noguitar)
if mix_nosynth_filepaths is None:
audio_annot_pairs.pop(mix_path_nosynth)
return audio_annot_pairs
def get_annotation_mono(mtrack, stem_list, use_estimate=True):
"""Get annotation for a subset of stems if all stems are mono
Parameters
----------
mtrack : MultiTrack
medleydb MultiTrack object
stem_list : list
list of Track objects
Returns
-------
times : list or None
list of times or None
freqs : list or None
list of freqs or None
"""
# if no stems, the annotation is empty
if len(stem_list) == 0:
times = []
freqs = []
# otherwise, check if all stems are mono
else:
all_mono = True
if mtrack.has_bleed:
all_mono = False
for stem in stem_list:
if len(stem.instrument) > 1:
all_mono = False
elif stem.f0_type[0] != 'm':
all_mono = False
# if all stems are mono add the annotation to the mix
if all_mono:
times = []
freqs = []
for stem in stem_list:
annot_t, annot_f, _ = C.get_stem_annotation(
stem, mtrack, use_estimate=use_estimate
)
# if there is no annotation return None
if annot_t is None or annot_f is None:
return None, None
times.extend(annot_t)
freqs.extend(annot_f)
else:
times = None
freqs = None
return times, freqs
def get_fullmix_annotations(mtrack, save_dir):
"""Get annotations corresponding to original medleydb mixes.
Parameters
----------
mtrack : MultiTrack
A medleydb MultiTrack object.
save_dir : str
Path to save annotation files.
Returns
-------
audio_annot_pairs : dictionary
Dictionary mapping audio files to annotation files by type.
"""
audio_annot_pairs = {mtrack.mix_path: {}}
melody_stems = []
vocal_stems = []
bass_stems = []
guitar_stems = []
piano_stems = []
guitars = [
'acoustic guitar',
'clean electric guitar',
'distorted electric guitar'
]
for stem in mtrack.stems.values():
if any(inst in mix.VOCALS for inst in stem.instrument):
vocal_stems.append(stem)
if 'Unlabeled' in stem.instrument and stem.component == 'melody':
vocal_stems.append(stem)
if stem.component == 'bass':
bass_stems.append(stem)
if stem.component == 'melody':
melody_stems.append(stem)
if any(inst in guitars for inst in stem.instrument):
guitar_stems.append(stem)
if any(inst == 'piano' for inst in stem.instrument):
piano_stems.append(stem)
# use melody if there is melody or none
if mtrack.dataset_version == 'V1':
if mtrack.melody3_annotation is not None:
annot = np.array(mtrack.melody3_annotation).T
melody_times, melody_freqs = multif0_to_timefreq(
annot[0], annot[1:].T)
else:
melody_times = []
melody_freqs = []
else:
melody_times, melody_freqs = get_annotation_mono(mtrack, melody_stems)
if melody_times is not None:
output = save_annotation(
melody_times,
melody_freqs,
os.path.join(save_dir, '{}_MIX_melody.txt'.format(mtrack.track_id))
)
audio_annot_pairs[mtrack.mix_path]['melody'] = output
# use vocals if all vocals are mono or there are none
vocal_times, vocal_freqs = get_annotation_mono(mtrack, vocal_stems, use_estimate=False)
if vocal_times is not None:
output = save_annotation(
vocal_times,
vocal_freqs,
os.path.join(save_dir, '{}_MIX_vocal.txt'.format(mtrack.track_id))
)
audio_annot_pairs[mtrack.mix_path]['vocal'] = output
# use bass if all bass is mono or there are none
bass_times, bass_freqs = get_annotation_mono(mtrack, bass_stems, use_estimate=False)
if bass_times is not None:
output = save_annotation(
bass_times,
bass_freqs,
os.path.join(save_dir, '{}_MIX_bass.txt'.format(mtrack.track_id))
)
audio_annot_pairs[mtrack.mix_path]['bass'] = output
# mark that there's no piano/guitar if there are no stems with
# those instruments
if len(piano_stems) == 0:
audio_annot_pairs[mtrack.mix_path]['piano'] = None
if len(guitar_stems) == 0:
audio_annot_pairs[mtrack.mix_path]['guitar'] = None
return audio_annot_pairs
def get_all_audio_annot_pairs(mtrack, save_dir, resynth_path, replace_path):
"""For a given multitrack get all types of mixes and corresponding
annotations, and save a json file with all info.
Parameters
----------
mtrack : MultiTrack
medleydb MultiTrack object.
save_dir : str
Path to save json output file.
resynth_path : str
Path to where resynthesized stems live.
replace_path : str
Path to where replaced stems live
Returns
-------
json_path : str
Path to saved json file
"""
print(" Resynth annotations and mixing...")
resynth_pairs = create_complete_resynth_mix(
mtrack, resynth_path, replace_path, save_dir)
print(" Fullmix annotations")
fullmix_pairs = get_fullmix_annotations(mtrack, save_dir)
all_pairs = {}
if resynth_pairs is not None:
for key, value in resynth_pairs.items():
all_pairs[key] = value
for key, value in fullmix_pairs.items():
all_pairs[key] = value
json_path = os.path.join(
save_dir, "{}_training_pairs.json".format(mtrack.track_id)
)
with open(json_path, 'w') as fhandle:
json.dump(all_pairs, fhandle, indent=2)
return json_path
def main(args):
mtracks = mdb.load_all_multitracks(
dataset_version=['V1', 'V2', 'EXTRA'])
for mtrack in mtracks:
print("Processing {}...".format(mtrack.track_id))
if os.path.exists(os.path.join(args.save_dir,
"{}_training_pairs.json".format(mtrack.track_id))):
print(" already done!")
continue
json_path = get_all_audio_annot_pairs(
mtrack, args.save_dir, args.resynth_path, args.replace_path
)
print("...saved to {}".format(json_path))
print("")
print("done!")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Generate feature files for multif0 learning.")
parser.add_argument("save_dir",
type=str,
help="Path to save npy files.")
parser.add_argument("resynth_path",
type=str,
help="resynth path")
parser.add_argument("replace_path",
type=str,
help="replace path")
main(parser.parse_args())
``` |
{
"source": "jollysg/xsens_ros_mti_driver",
"score": 2
} |
#### File: xsens_ros_mti_driver/launch/display.launch.py
```python
import os
from launch import LaunchDescription
from launch.actions import SetEnvironmentVariable, IncludeLaunchDescription
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
from pathlib import Path
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.substitutions import PathJoinSubstitution, LaunchConfiguration
from launch_ros.substitutions import FindPackageShare
def generate_launch_description():
ld = LaunchDescription()
# Set env var to print messages to stdout immediately
arg = SetEnvironmentVariable('RCUTILS_CONSOLE_STDOUT_LINE_BUFFERED', '1')
ld.add_action(arg)
driver_launch = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
launch_file_path=PathJoinSubstitution([
FindPackageShare('xsens_mti_driver'), 'launch', 'xsens_mti_node.launch.py'
]),
)
)
ld.add_action(driver_launch)
# Rviz2 node
rviz_config_path = os.path.join(get_package_share_directory('xsens_mti_driver'), 'rviz', 'display.rviz')
rviz2_node = Node(
package='rviz2',
node_executable='rviz2',
node_name='xsens_rviz2',
output='screen',
arguments=[["-d"],[rviz_config_path]],
)
ld.add_action(rviz2_node)
# Robot State Publisher node
urdf_file_path = os.path.join(get_package_share_directory('xsens_mti_driver'), 'urdf', 'MTi_6xx.urdf')
state_publisher_node = Node(
package='robot_state_publisher',
node_executable='robot_state_publisher',
node_name='xsens_state_publisher',
output='screen',
arguments=[urdf_file_path],
)
ld.add_action(state_publisher_node)
return ld
``` |
{
"source": "jollyshuai/cube-studio",
"score": 2
} |
#### File: pkgs/k8s/k8s_crd.py
```python
import datetime
import time
import traceback
import re
import os
import json
import subprocess
from kubernetes import client as k8s_client
from ..exceptions.k8s_expections import K8SFailedException, K8SJOBTimeoutException
from ..constants import NodeAffnity, PodAffnity
class K8sCRD(object):
"""
k8s custom resource抽象类
"""
def __init__(self, group, plural, version, client):
"""
Args:
group: custom resource的group,对kubeflow来说,一般是kubeflow.org
plural: custom resource类型的复数形式,例如tfjobs
version: custom resource版本
client: k8s客户端实例
"""
self.group = group
self.plural = plural
self.version = version
self.client = k8s_client.CustomObjectsApi(client)
def _start_trace_worker_logs(self, namespace, name):
try:
subprocess.check_call("which stern", shell=True)
except Exception as e:
print("WARNING: found no stern installed, can not trace worker log: {}".format(e), flush=True)
return None
cmd = "stern {} --namespace {} --kubeconfig /root/.kube/{}-kubeconfig"\
.format(name, namespace, os.getenv("KFJ_ENVIRONMENT", 'tke'))
subproc = subprocess.Popen(cmd, universal_newlines=False, shell=True, bufsize=0)
print("worker log tracer started: {}".format(subproc.pid), flush=True)
return subproc
def _keep_worker_log_tracer(self, namespace, name, trace_proc=None):
if trace_proc is None:
return None
proc_ret = trace_proc.poll()
if proc_ret is not None:
print("worker log tracer {} is terminated with code {}, will restart it"
.format(trace_proc.pid, proc_ret), flush=True)
print("*"*50, flush=True)
return self._start_trace_worker_logs(namespace, name)
return trace_proc
def _stop_trace_worker_logs(self, trace_proc):
if trace_proc is None:
return
proc_ret = trace_proc.poll()
if proc_ret is not None:
return
trace_proc.kill()
print("killed worker log tracer: {}".format(trace_proc.pid), flush=True)
def wait_for_condition(self,
namespace,
name,
expected_conditions=[],
timeout=datetime.timedelta(days=365),
polling_interval=datetime.timedelta(seconds=30),
status_callback=None,
trace_worker_log=False):
"""Waits until any of the specified conditions occur.
Args:
namespace: namespace for the CR.
name: Name of the CR.
expected_conditions: A list of conditions. Function waits until any of the
supplied conditions is reached.
timeout: How long to wait for the CR.
polling_interval: How often to poll for the status of the CR.
status_callback: (Optional): Callable. If supplied this callable is
invoked after we poll the CR. Callable takes a single argument which
is the CR.
trace_worker_log:
"""
end_time = datetime.datetime.now() + timeout
max_retries = 15
retries = 0
trace_proc = self._start_trace_worker_logs(namespace, name) if trace_worker_log else None
trace_st = time.time()
try:
while True:
try:
results = self.client.get_namespaced_custom_object(self.group, self.version, namespace,
self.plural, name)
except Exception as e:
if retries >= max_retries:
raise K8SFailedException("get k8s resource '{}/{}/{}' '{}' in namespace '{}' error: {}"
.format(self.group, self.version, self.plural, name, namespace, e))
print("get k8s resource '{}/{}/{}' '{}' in namespace '{}' error, will retry after 10s({}/{}):"
" {}\n{}".format(self.group, self.version, self.plural, name, namespace, retries,
max_retries, e, traceback.format_exc()), flush=True)
retries += 1
time.sleep(10)
continue
if results:
if status_callback:
status_callback(results)
expected, condition = self.is_expected_conditions(results, expected_conditions)
if expected:
print("k8s resource '{}/{}/{}' '{}' in namespace '{}' has reached the expected condition: '{}'"
.format(self.group, self.version, self.plural, name, namespace, condition),
flush=True)
return condition
else:
if trace_proc is None:
print("waiting k8s resource '{}/{}/{}' '{}' in namespace '{}' to reach conditions '{}',"
" current is '{}'".format(self.group, self.version, self.plural, name, namespace,
expected_conditions, condition if condition else None),
flush=True)
retries = 0
elif retries < max_retries:
print("get k8s resource '{}/{}/{}' '{}' in namespace '{}' return empty, will retry after 10s({}/{})"
.format(self.group, self.version, self.plural, name, namespace, retries, max_retries),
flush=True)
retries += 1
continue
else:
raise K8SFailedException("get k8s resource '{}/{}/{}' '{}' in namespace '{}' return empty"
.format(self.group, self.version, self.plural, name, namespace))
if datetime.datetime.now() + polling_interval > end_time:
raise K8SJOBTimeoutException("wating k8s resource '{}/{}/{}' '{}' in namespace '{}' to reach"
" conditions '{}' timeout, timeout={}, polling_interval={}"
.format(self.group, self.version, self.plural, name, namespace,
expected_conditions, timeout, polling_interval))
time.sleep(polling_interval.total_seconds())
trace_proc = self._keep_worker_log_tracer(namespace, name, trace_proc)
if trace_proc is not None and time.time() - trace_st >= 3600*2:
print("will restart worker logger tracer", flush=True)
trace_proc.kill()
trace_proc = self._start_trace_worker_logs(namespace, name)
trace_st = time.time()
finally:
self._stop_trace_worker_logs(trace_proc)
def is_expected_conditions(self, cr_object, expected_conditions):
"""
判断cr是否达到指定状态,子类必须实现此类
Args:
cr_object: cr的json描述,通过api获得
expected_conditions: 期望的状态列表
Returns:
tuple: is_expected, condition
"""
conditions = cr_object.get('status', {}).get("conditions")
if not conditions:
return False, ""
if conditions[-1]["type"] in expected_conditions and conditions[-1]["status"] == "True":
return True, conditions[-1]["type"]
else:
return False, conditions[-1]["type"]
def create(self, spec):
"""Create a CR.
Args:
spec: The spec for the CR.
"""
try:
# Create a Resource
namespace = spec["metadata"].get("namespace", "default")
name = spec["metadata"]["name"]
print("creating k8s resource '{}/{}/{}' '{}' in namespace '{}'"
.format(self.group, self.version, self.plural, name, namespace))
api_response = self.client.create_namespaced_custom_object(self.group, self.version, namespace,
self.plural, spec)
print("created k8s resource '{}/{}/{}' '{}' in namespace '{}'\nspec='{}'\nresponse={}"
.format(self.group, self.version, self.plural, name, namespace, spec, api_response))
return api_response
except Exception as e:
print("create k8s resource '{}/{}/{}' error, spec={}: {}\n{}".format(self.group, self.version, self.plural,
spec, e, traceback.format_exc()))
raise K8SFailedException("create k8s resource '{}/{}/{}' error, spec={}: {}"
.format(self.group, self.version, self.plural, spec, e))
def get_crd_status(self, crd_object, plural):
status = ''
# workflows 使用最后一个node的状态为真是状态
if plural == 'workflows':
if 'status' in crd_object and 'nodes' in crd_object['status']:
keys = list(crd_object['status']['nodes'].keys())
status = crd_object['status']['nodes'][keys[-1]]['phase']
if status != 'Pending':
status = crd_object['status']['phase']
elif plural == 'notebooks':
if 'status' in crd_object and 'conditions' in crd_object['status'] and len(
crd_object['status']['conditions']) > 0:
status = crd_object['status']['conditions'][0]['type']
elif plural == 'inferenceservices':
status = 'unready'
if 'status' in crd_object and 'conditions' in crd_object['status'] and len(
crd_object['status']['conditions']) > 0:
for condition in crd_object['status']['conditions']:
if condition['type'] == 'Ready' and condition['status'] == 'True':
status = 'ready'
else:
if 'status' in crd_object and 'phase' in crd_object['status']:
status = crd_object['status']['phase']
elif 'status' in crd_object and 'conditions' in crd_object['status'] and len(
crd_object['status']['conditions']) > 0:
status = crd_object['status']['conditions'][-1]['type'] # tfjob和experiment是这种结构
return status
# @pysnooper.snoop(watch_explode=('crd_object'))
def get_one_crd(self, namespace, name):
self.crd = k8s_client.CustomObjectsApi()
crd_object = self.crd.get_namespaced_custom_object(group=self.group, version=self.version, namespace=namespace,
plural=self.plural, name=name)
# print(crd_object['status']['conditions'][-1]['type'])
status = self.get_crd_status(crd_object, self.plural)
creat_time = crd_object['metadata']['creationTimestamp'].replace('T', ' ').replace('Z', '')
creat_time = (
datetime.datetime.strptime(creat_time, '%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=8)).strftime(
'%Y-%m-%d %H:%M:%S')
back_object = {
"name": crd_object['metadata']['name'],
"namespace": crd_object['metadata']['namespace'] if 'namespace' in crd_object['metadata'] else '',
"annotations": json.dumps(crd_object['metadata']['annotations'], indent=4,
ensure_ascii=False) if 'annotations' in crd_object['metadata'] else '',
"labels": json.dumps(crd_object['metadata']['labels'], indent=4, ensure_ascii=False) if 'labels' in
crd_object[
'metadata'] else '',
"spec": json.dumps(crd_object['spec'], indent=4, ensure_ascii=False),
"create_time": creat_time,
"status": status,
"status_more": json.dumps(crd_object['status'], indent=4,
ensure_ascii=False) if 'status' in crd_object else ''
}
# return
return back_object
def get_crd(self, namespace):
self.crd = k8s_client.CustomObjectsApi()
crd_objects = \
self.crd.list_namespaced_custom_object(group=self.group, version=self.version, namespace=namespace,
plural=self.plural)['items']
back_objects = []
for crd_object in crd_objects:
# print(crd_object['status']['conditions'][-1]['type'])
status = self.get_crd_status(crd_object, self.plural)
creat_time = crd_object['metadata']['creationTimestamp'].replace('T', ' ').replace('Z', '')
creat_time = (datetime.datetime.strptime(creat_time, '%Y-%m-%d %H:%M:%S') + datetime.timedelta(
hours=8)).strftime('%Y-%m-%d %H:%M:%S')
back_object = {
"name": crd_object['metadata']['name'],
"namespace": crd_object['metadata']['namespace'] if 'namespace' in crd_object['metadata'] else '',
"annotations": json.dumps(crd_object['metadata']['annotations'], indent=4,
ensure_ascii=False) if 'annotations' in crd_object['metadata'] else '',
"labels": json.dumps(crd_object['metadata']['labels'], indent=4, ensure_ascii=False) if 'labels' in
crd_object[
'metadata'] else '{}',
"spec": json.dumps(crd_object['spec'], indent=4, ensure_ascii=False),
"create_time": creat_time,
"status": status,
"status_more": json.dumps(crd_object['status'], indent=4,
ensure_ascii=False) if 'status' in crd_object else ''
}
back_objects.append(back_object)
return back_objects
def delete(self, name=None, namespace='pipeline', labels=None):
"""
delete a k8s cr
Args:
name: name of cr to be deleted
namespace: namespace in which cr to be delete
Returns:
"""
if name:
try:
from kubernetes.client import V1DeleteOptions
body = V1DeleteOptions(api_version=self.version, propagation_policy="Foreground")
print("deleteing k8s resource '{}/{}/{}' '{}' in namespace '{}'"
.format(self.group, self.version, self.plural, name, namespace))
api_response = self.client.delete_namespaced_custom_object(
self.group,
self.version,
namespace,
self.plural,
name,
body)
print("deleted k8s resource '{}/{}/{}' '{}' in namespace '{}', response={}"
.format(self.group, self.version, self.plural, name, namespace, api_response))
return api_response
except Exception as e:
print("delete k8s resource '{}/{}/{}' '{}' in namespace '{}' error: {}\n{}"
.format(self.group, self.version, self.plural, name, namespace, e, traceback.format_exc()))
raise K8SFailedException("delete k8s resource '{}/{}/{}' '{}' in namespace '{}' error: {}"
.format(self.group, self.version, self.plural, name, namespace, e))
elif labels and type(labels) == dict:
crds = self.get_crd(namespace=namespace)
for crd in crds:
if crd['labels']:
crd_labels = json.loads(crd['labels'])
for key in labels:
if key in crd_labels and labels[key] == crd_labels[key]:
try:
self.crd = k8s_client.CustomObjectsApi()
delete_body = k8s_client.V1DeleteOptions()
self.crd.delete_namespaced_custom_object(group=self.group, version=self.version,
namespace=namespace, plural=self.plural,
name=crd['name'], body=delete_body)
except Exception as e:
print(e)
@staticmethod
def make_affinity_spec(job_name, node_affin=None, pod_affin=None):
affinity = {}
if node_affin and node_affin.strip():
node_affin = node_affin.strip().lower()
if node_affin in [NodeAffnity.PREF_CPU, NodeAffnity.PREF_GPU]:
affinity["nodeAffinity"] = {
"preferredDuringSchedulingIgnoredDuringExecution": [
{
"weight": 100,
"preference": {
"matchExpressions": [
{
"key": "cpu" if node_affin == NodeAffnity.PREF_CPU else "gpu",
"operator": "In",
"values": ["true"]
}
]
}
}
]
}
elif node_affin in [NodeAffnity.ONLY_CPU, NodeAffnity.ONLY_GPU]:
affinity["nodeAffinity"] = {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "cpu" if node_affin == NodeAffnity.ONLY_CPU else "gpu",
"operator": "In",
"values": ["true"]
}
]
}
]
}
}
if pod_affin and pod_affin.strip():
pod_affin = pod_affin.strip().lower()
if pod_affin == PodAffnity.SPREAD:
affinity["podAntiAffinity"] = {
"preferredDuringSchedulingIgnoredDuringExecution": [
{
"weight": 100,
"podAffinityTerm": {
"labelSelector": {
"matchExpressions": [
{
"key": "app",
"operator": "In",
"values": [job_name]
}
]
},
"topologyKey": "kubernetes.io/hostname"
}
}
]
}
elif pod_affin == PodAffnity.CONCENT:
affinity["podAffinity"] = {
"preferredDuringSchedulingIgnoredDuringExecution": [
{
"weight": 100,
"podAffinityTerm": {
"labelSelector": {
"matchExpressions": [
{
"key": "app",
"operator": "In",
"values": [job_name]
}
]
},
"topologyKey": "kubernetes.io/hostname"
}
}
]
}
return affinity
@staticmethod
def make_volume_mount_spec(mount_name, mount_type, mount_point, username):
mount_type = mount_type.lower()
if mount_type == 'pvc':
vol_name = mount_name.replace('_', '-').lower()
volume_spec = {
"name": vol_name,
"persistentVolumeClaim": {
"claimName": mount_name
}
}
mount_spec = {
"name": vol_name,
"mountPath": os.path.join(mount_point, username),
"subPath": username
}
elif mount_type == 'hostpath':
temps = re.split(r'_|\.|/', mount_name)
temps = [temp for temp in temps if temp]
vol_name = '-'.join(temps).lower()
volume_spec = {
"name": vol_name,
"hostPath": {
"path": mount_name
}
}
mount_spec = {
"name": vol_name,
"mountPath": mount_point
}
elif mount_type == 'configmap':
vol_name = mount_name.replace('_', '-').replace('/', '-').replace('.', '-').lower()
volume_spec = {
"name": vol_name,
"configMap": {
"name": mount_name
}
}
mount_spec = {
"name": vol_name,
"mountPath": mount_point
}
elif mount_type == 'memory':
memory_size = mount_name.replace('_', '-').replace('/', '-').replace('.', '-').lower().replace('g', '')
volumn_name = 'memory-%s' % memory_size
volume_spec = {
"name": volumn_name,
"emptyDir": {
"medium": "Memory",
"sizeLimit": "%sGi" % memory_size
}
}
mount_spec = {
"name": volumn_name,
"mountPath": mount_point
}
else:
raise RuntimeError("unknown mount type '{}', only pvc/hostpath/configmap are allowed".format(mount_type))
return volume_spec, mount_spec
```
#### File: pkgs/k8s/tfjob.py
```python
import copy
from .k8s_crd import K8sCRD
from ..constants import NodeAffnity, ComputeResource
from job.pkgs.context import KFJobContext
from job.pkgs.utils import parse_size
class TFJob(K8sCRD):
def __init__(self, version, k8s_api_client):
super(TFJob, self).__init__("kubeflow.org", "tfjobs", version, k8s_api_client)
def create(self, name, namespace, num_workers, num_pss, driver_image, driver_command, driver_args,
driver_envs, driver_mounts, resources, restart_policy, image_pull_secrets=None, node_affin=None,
pod_affin=None, labels={}, backoff_limits=3, node_selector={}, privileged=False, creator='',
ps_resources=None, chief_resources=None):
if node_affin in [NodeAffnity.ONLY_GPU, NodeAffnity.PREF_GPU]:
ctx = KFJobContext.get_context()
gup_type = (ctx.gpu_type or '').strip().upper()
resources = resources or {}
limits = resources.get('limits', {})
if gup_type == ComputeResource.ClusterEnv.TKE:
print("under tke environment")
if ComputeResource.P_GPU in limits:
limits.pop(ComputeResource.V_GPU_CORE, None)
limits.pop(ComputeResource.V_GPU_MEM, None)
print("specified physical gpu, ignore v-gpu settings")
else:
if ComputeResource.V_GPU_CORE not in limits:
limits[ComputeResource.V_GPU_CORE] = 100
print("{} not set, default to 100".format(ComputeResource.V_GPU_CORE))
else:
limits[ComputeResource.V_GPU_CORE] = int(limits[ComputeResource.V_GPU_CORE])
gpu_mem = parse_size(limits.get(ComputeResource.V_GPU_MEM, 0))
if not gpu_mem:
min_gpu_mem = parse_size(ctx.gpu_mem_min)
if not min_gpu_mem:
print("WARNING: {} not set and KFJ_GPU_MEM_MAX env are not set"
.format(ComputeResource.V_GPU_MEM))
limits.pop(ComputeResource.V_GPU_MEM, None)
else:
gpu_mem = int((limits[ComputeResource.V_GPU_CORE] / 100 * min_gpu_mem) //
ComputeResource.V_GPU_MEM_UNIT)
limits[ComputeResource.V_GPU_MEM] = gpu_mem
print("{} not set, set to {}".format(ComputeResource.V_GPU_MEM, gpu_mem))
else:
gpu_mem = int(gpu_mem // ComputeResource.V_GPU_MEM_UNIT)
limits[ComputeResource.V_GPU_MEM] = gpu_mem
else:
print("under idc environment")
v_cores = limits.pop(ComputeResource.V_GPU_CORE, 100)
limits.pop(ComputeResource.V_GPU_MEM, None)
if ComputeResource.P_GPU not in limits:
limits[ComputeResource.P_GPU] = v_cores // 100
print("{} not set, default to 1".format(ComputeResource.P_GPU))
resources['limits'] = limits
else:
resources = resources or {}
limits = resources.get('limits', {})
if limits:
limits.pop(ComputeResource.P_GPU, None)
limits.pop(ComputeResource.V_GPU_CORE, None)
limits.pop(ComputeResource.V_GPU_MEM, None)
print("cpu job, ignored gpu settings")
print("resource spec: {}".format(resources))
cr_spec = {
"apiVersion": "{}/{}".format(self.group, self.version),
"kind": "TFJob",
"metadata": {
"name": name,
"namespace": namespace,
"labels": labels or {}
},
"spec": {
"cleanPodPolicy": "None",
"backoffLimit": backoff_limits,
"tfReplicaSpecs": {}
}
}
worker_labels = labels or {}
worker_labels['app'] = name
if num_workers > 0:
print("specified {} worker nodes".format(num_workers))
cr_spec["spec"]["tfReplicaSpecs"]["Worker"] = self.make_worker_spec(
name, num_workers, restart_policy, node_affin, pod_affin, driver_image,
driver_command, driver_args, driver_envs, driver_mounts, resources,
image_pull_secrets, node_selector, worker_labels, privileged, creator)
else:
raise RuntimeError("'num_workers' must be > 0, got {}".format(num_workers))
if num_pss > 0:
print("specified {} PS nodes".format(num_pss))
if not ps_resources:
ps_resources = copy.deepcopy(resources)
print("ps node resources not specified, use worker resources as reference")
limits = ps_resources.get('limits')
if limits:
limits.pop(ComputeResource.P_GPU, None)
limits.pop(ComputeResource.V_GPU_CORE, None)
limits.pop(ComputeResource.V_GPU_MEM, None)
print("ps node resources: {}".format(ps_resources))
cr_spec["spec"]["tfReplicaSpecs"]["PS"] = self.make_worker_spec(
name, num_pss, restart_policy, node_affin, pod_affin, driver_image,
driver_command, driver_args, driver_envs, driver_mounts, ps_resources,
image_pull_secrets, node_selector, worker_labels, privileged, creator)
print("auto add chief node under ps training mode")
if not chief_resources:
chief_resources = copy.deepcopy(resources)
print("chief node resources not specified, use worker resources as reference")
limits = chief_resources.get('limits')
if limits:
limits.pop(ComputeResource.P_GPU, None)
limits.pop(ComputeResource.V_GPU_CORE, None)
limits.pop(ComputeResource.V_GPU_MEM, None)
print("chief node resources: {}".format(chief_resources))
cr_spec["spec"]["tfReplicaSpecs"]["Chief"] = self.make_worker_spec(
name, 1, restart_policy, node_affin, pod_affin, driver_image,
driver_command, driver_args, driver_envs, driver_mounts, chief_resources,
image_pull_secrets, node_selector, worker_labels, privileged, creator)
return super(TFJob, self).create(cr_spec)
def make_worker_spec(self, job_name, replicas, restart_policy, node_affin, pod_affin, driver_image,
driver_command, driver_args, driver_envs, driver_mounts, resources=None,
image_pull_secrets=None, node_selector={}, labels={}, privileged=False, creator=''):
worker_spec = {
"replicas": replicas,
"restartPolicy": restart_policy or 'Never',
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
},
"labels": labels or {}
},
"spec": {
"affinity": self.make_affinity_spec(job_name, node_affin, pod_affin),
"containers": [
{
"name": "tensorflow",
"image": driver_image,
"imagePullPolicy": "Always",
"command": driver_command,
"args": driver_args,
"env": driver_envs or [],
"volumeMounts": [
self.make_volume_mount_spec(mn, mt, mp, creator)[1] for mn, mt, mp in driver_mounts
],
"resources": resources if resources else {},
"securityContext": {"privileged": privileged}
}
],
"volumes": [
self.make_volume_mount_spec(mn, mt, mp, creator)[0] for mn, mt, mp in driver_mounts
],
"nodeSelector": node_selector or {}
}
}
}
if image_pull_secrets:
worker_spec['template']['spec']['imagePullSecrets'] = [{'name': sec} for sec in image_pull_secrets]
return worker_spec
```
#### File: pkgs/tf/extend_activations.py
```python
import tensorflow as tf
class Dice(tf.keras.layers.Layer):
"""
用于DIN模型的Dice激活函数
"""
def __init__(self, name='dice'):
super(Dice, self).__init__(name=name)
self.bn = tf.keras.layers.BatchNormalization(center=False, scale=False)
self.alpha = self.add_weight(shape=(), dtype=tf.float32, name='alpha')
def call(self, x):
x_normed = self.bn(x)
x_p = tf.sigmoid(x_normed)
return self.alpha * (1.0 - x_p) * x + x_p * x
```
#### File: pkgs/tf/extend_losses.py
```python
import tensorflow as tf
class BPRLoss(tf.keras.losses.Loss):
def __init__(self, name="BPR_loss"):
super(BPRLoss, self).__init__(name=name)
@tf.function
def call(self, y_true, y_pred):
# [batch, 1]
loss = tf.math.log1p(tf.math.exp(-y_pred))
return loss
class PairHingeLoss(tf.keras.losses.Loss):
def __init__(self, margin, name="pair_hinge_loss"):
super(PairHingeLoss, self).__init__(name=name)
assert margin >= 0, "'margin' must be >= 0, got {}".format(margin)
self.margin = float(margin)
@tf.function
def call(self, y_true, y_pred):
# [batch, 1]
gap = self.margin - y_pred
# [batch, 1]
loss = tf.math.maximum(0., gap)
return loss
```
#### File: job/pkgs/utils.py
```python
import inspect
import os
import queue
import re
import threading
import time
import sys
from datetime import datetime, timedelta
from dateutil import relativedelta
from .constants import PipelineParam
def parse_best_parameter(best_param_path: str, job: dict):
import json
best_params = json.load(open(best_param_path, 'r+'))
TRAIN_ARGS_PARAMETERS = {
'batch_size': None,
'epochs': None,
'train_type': None,
'num_samples': None,
'num_val_samples': None,
'optimizer': None,
'losses': None,
'metrics': None,
'early_stopping': None,
}
job_detail = job['job_detail']
job_custom = True if ('params' in job_detail or 'model_args' not in job_detail) else False
if job_custom:
params = job['job_detail']['params']
params_record = dict()
if params is not None:
for i,arg in enumerate(params):
if i%2==0:
params_record[arg] = i
val = best_params.get(arg[2:])
if val is not None:
job['job_detail']['params'][i+1] = val
for param in best_params:
param_t = '--'+param if param[:2]!='--' else param
if param_t not in params_record:
if param_t in job['job_detail'].keys():
job['job_detail'][param_t] = best_params.get(param)
else:
job['job_detail']['params'] += [param_t, best_params.get(param)]
else:
for train_arg in job['job_detail']['train_args'].keys():
val = best_params.get(train_arg)
if val is not None:
job['job_detail']['train_args'][train_arg] = val
for arg, val in best_params.items():
if arg not in TRAIN_ARGS_PARAMETERS.keys():
job['job_detail']['model_args'][arg] = val
return job
def dynamic_load_class(pkg_path, class_name):
add_path = os.path.dirname(os.path.realpath(pkg_path))
sys.path.append(add_path)
module_name = os.path.basename(pkg_path).split('.')[0]
module = __import__(module_name)
clazz = getattr(module, class_name)
return clazz
def parse_size(size, int_result=True):
if isinstance(size, (int, float)):
return int(size) if int_result else float(size)
if size is None:
return 0 if int_result else 0.
assert isinstance(size, str)
size = size.strip()
if not size:
return 0 if int_result else 0.
size = size.lower()
m = re.match(r'^(\d+[.]?\d*)(k|m|g)?$', size)
if not m:
return None
number = m.group(1)
unit = m.group(2)
number = float(number)
if unit == 'k':
number *= 2**10
elif unit == 'm':
number *= 2**20
elif unit == 'g':
number *= 2**30
return int(number) if int_result else number
def parse_timedelta(delta_str):
assert not delta_str or isinstance(delta_str, str)
import datetime
if not delta_str or not delta_str.strip():
return None
m = re.match(r'^(\d+)(ms|s|m|h|d|w)$', delta_str)
if not m:
return None
number = int(m.group(1))
unit = m.group(2)
if unit == 'ms':
return datetime.timedelta(milliseconds=number)
elif unit == 's':
return datetime.timedelta(seconds=number)
elif unit == 'm':
return datetime.timedelta(minutes=number)
elif unit == 'h':
return datetime.timedelta(hours=number)
elif unit == 'd':
return datetime.timedelta(days=number)
elif unit == 'w':
return datetime.timedelta(weeks=number)
return None
def expand_path(path, run_path=None, pack_path=None, ignore_abs_path=True):
if not path or not path.strip():
return path
path = path.strip()
if ignore_abs_path and os.path.isabs(path):
return path
if run_path:
run_path = run_path.strip()
if pack_path:
pack_path = pack_path.strip()
if not run_path and not pack_path:
return path
if run_path:
path = re.sub(PipelineParam.RUN_PATH_PAT, run_path, path)
if pack_path:
path = re.sub(PipelineParam.PACK_PATH_PAT, pack_path, path)
return path
def make_abs_or_data_path(path, data_path, pack_path):
if not path or not path.strip():
return data_path
path = expand_path(path, run_path=data_path, pack_path=pack_path)
if os.path.isabs(path):
return path
return os.path.join(data_path, path)
def make_abs_or_pack_path(path, data_path, pack_path):
if not path or not path.strip():
return pack_path
path = expand_path(path, run_path=data_path, pack_path=pack_path)
if os.path.isabs(path):
return path
return os.path.join(pack_path, path)
def expand_param(param_val, data_path, pack_path):
if not isinstance(param_val, str):
return param_val
if data_path:
param_val = re.sub(PipelineParam.RUN_PATH_PAT, data_path, param_val)
if pack_path:
param_val = re.sub(PipelineParam.PACK_PATH_PAT, pack_path, param_val)
expanded = param_val
for m in re.finditer(PipelineParam.DATE_PAT, param_val):
date = datetime.now()
f, n, unit, fmt = m.group(2, 3, 4, 6)
if all([f, n, unit]):
delta_num = int(f+n)
if unit == 'd':
date = date + timedelta(days=delta_num)
elif unit == 'w':
date = date + timedelta(weeks=delta_num)
elif unit == 'h':
date = date + timedelta(hours=delta_num)
elif unit == 'm':
date = date + timedelta(minutes=delta_num)
elif unit == 's':
date = date + timedelta(seconds=delta_num)
elif unit == 'M':
date = date + relativedelta.relativedelta(months=delta_num)
elif unit == 'y':
date = date + relativedelta.relativedelta(years=delta_num)
if not fmt:
fmt = "%Y%m%d"
expanded = expanded.replace(m.group(0), date.strftime(fmt))
return expanded
def recur_expand_param(param, data_path, pack_path):
if not param:
return param
if isinstance(param, (list, tuple)):
expaned = []
for i in param:
expaned.append(recur_expand_param(i, data_path, pack_path))
if isinstance(param, tuple):
return tuple(expaned)
return expaned
elif isinstance(param, dict):
expanded = {}
for k, v in param.items():
expanded[k] = recur_expand_param(v, data_path, pack_path)
return expanded
else:
return expand_param(param, data_path, pack_path)
def split_file_name(file_name):
if not file_name:
return '', '', ''
dir_name = os.path.dirname(file_name)
name = os.path.basename(file_name)
base, ext = os.path.splitext(name)
return dir_name, base, ext
def try_archive_by_config(config_json, data_path, pack_path):
if not config_json:
return None
if not isinstance(config_json, list):
config_json = [config_json]
from .archiver import Archiver
archiver = Archiver()
archived = []
for i, cj in enumerate(config_json):
src = cj.get('src', '').strip()
if not src:
print("'src' of {}th archive not set, ignore it".format(i))
continue
src = make_abs_or_data_path(src, data_path, pack_path)
path_name = cj.get('path_name', '').strip()
compress = cj.get('compress', False)
cj_archived = archiver.archive(src, path_name, compress)
if cj_archived:
archived.extend(cj_archived)
return archived
def call_user_module(module, func_name, func_must_exists, nullable, check_return_type,
inject_args: dict = None, **kwargs):
injected_args = {}
if not hasattr(module, func_name):
if func_must_exists:
raise ModuleNotFoundError("user function '{}' not found in module {}".format(func_name, module))
else:
return None, None
func = getattr(module, func_name)
args_spec = inspect.getfullargspec(func)
varkw_args = {}
if inject_args:
for a, v in inject_args.items():
if a in args_spec.args:
kwargs[a] = v
injected_args[a] = v
print("user function '{}' of module {} declared arg '{}', inject value '{}'"
.format(func_name, module, a, v))
elif args_spec.varkw:
varkw_args[a] = v
injected_args[a] = v
print("user function '{}' of module {} declared kw arg '{}', inject '{}'={} into it"
.format(func_name, module, args_spec.varkw, a, v))
not_support_args = kwargs.keys() - args_spec.args
if not_support_args:
for nsa in not_support_args:
v = kwargs.pop(nsa)
if args_spec.varkw:
if nsa not in varkw_args:
varkw_args[nsa] = v
print("'{}'={} in kwargs not decleared in use function '{}' of module {},"
" moved it into kw arg '{}'".format(nsa, v, func_name, module, args_spec.varkw))
else:
print("'{}'={} in kwargs not decleared in use function '{}' of module {}, will be excluded"
.format(nsa, v, func_name, module))
ret_obj = func(**kwargs, **varkw_args)
if ret_obj is None and not nullable:
raise RuntimeError("user function '{}' of module {} return None, args={}"
.format(func_name, module, kwargs))
if ret_obj is not None and check_return_type is not None and not isinstance(ret_obj, check_return_type):
raise RuntimeError("object '{}' returned by user function '{}' of module {} is of not type '{}'"
.format(ret_obj, func_name, module, check_return_type))
return ret_obj, injected_args
def find_duplicated_entries(seq):
if not seq:
return None
from collections import Counter
cnt = Counter(seq)
duplicated_entries = list(map(lambda i: i[0], filter(lambda x: x[1] > 1, cnt.items())))
return duplicated_entries
class _WriterThread(threading.Thread):
def __init__(self, q: queue.Queue, f, max_batch_len, name='bufferd_text_file_writer_thread'):
super(_WriterThread, self).__init__()
self.q = q
self.f = f
self.setName(name)
self.wrote_lines = 0
self.wrote_times = 0
self.wrote_cost_time = 0
self.max_batch_len = max_batch_len
self.batch = []
self.first_write_time = None
def _flush(self):
if not self.batch:
return
num_lines = len(self.batch)
st = time.perf_counter()
if self.first_write_time is None:
self.first_write_time = st
data = ''.join(self.batch)
self.f.write(data)
self.wrote_lines += num_lines
self.wrote_times += 1
cost = time.perf_counter() - st
self.wrote_cost_time += cost
self.batch.clear()
print("{}: wrote {} lines, cost {}s, totally write {} times with {} lines, cost {}s, elapsed {}s"
.format(self.getName(), num_lines, cost, self.wrote_times, self.wrote_lines,
self.wrote_cost_time, time.perf_counter()-self.first_write_time))
def run(self) -> None:
if not self.q:
raise RuntimeError("{}: no queue specified".format(self.getName()))
print("{}: started".format(self.getName()))
while True:
try:
if len(self.batch) >= self.max_batch_len/2:
line = self.q.get_nowait()
else:
line = self.q.get()
if line == BufferedTextFileWriter.END_MARK:
self._flush()
print("{}: received end mark, exit, totally write {} times with {} lines, cost {}s, elapsed {}s"
.format(self.getName(), self.wrote_times, self.wrote_lines, self.wrote_cost_time,
time.perf_counter()-self.first_write_time))
return
self.batch.append(line)
if len(self.batch) >= self.max_batch_len:
self._flush()
except queue.Empty:
self._flush()
class BufferedTextFileWriter(object):
END_MARK = '__END_MARK__'
def __init__(self, filename, line_buffer_len, sys_buffer_size=2**25):
self.filename = filename
self.line_buffer_len = line_buffer_len
self.sys_buffer_size = sys_buffer_size
self.opened_fn = None
self.q = None
self._write_thread = None
def __enter__(self):
if self.opened_fn is None:
self.opened_fn = open(self.filename, 'w', buffering=self.sys_buffer_size)
self.q = queue.Queue(self.line_buffer_len)
self._write_thread = _WriterThread(self.q, self.opened_fn, self.line_buffer_len,
"writerthread: {}".format(self.filename))
self._write_thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.opened_fn is not None:
self.q.put(self.END_MARK)
print("begin waiting writer thread to terminate")
self._write_thread.join()
print("writer thread terminated")
self.opened_fn.close()
self.opened_fn = None
self.q = None
self._write_thread = None
def write(self, line):
if self.opened_fn is None:
raise RuntimeError("please use with syntax to use BufferedTextFileWriter")
if not line:
return 0
self.q.put(line)
return 1
@property
def wrote_lines(self):
if self._write_thread is None:
return 0
return self._write_thread.wrote_lines
def even_spread_num(n, num_buckets):
d, r = divmod(n, num_buckets)
buckets = [d]*num_buckets
for i in range(r):
buckets[i] += 1
return buckets
def find_files(file_patterns):
if not isinstance(file_patterns, (list, tuple, str)):
print("WARNING: file_patterns should be list/tuple/str, got '{}': {}"
.format(type(file_patterns), file_patterns))
return []
files = []
if not isinstance(file_patterns, (list, tuple)):
file_patterns = [file_patterns]
import glob
for fp in file_patterns:
files.extend(glob.glob(fp))
return files
def get_file_md5(file_path):
import hashlib
if not os.path.isfile(file_path):
return None
md5 = hashlib.md5()
with open(file_path, 'rb') as f:
while True:
d = f.read(4 << 20)
if not d:
break
md5.update(d)
return str(md5.hexdigest())
```
#### File: job/tf_distributed_train/tfjob_launcher.py
```python
import datetime
import json
import subprocess
import time
import uuid
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from job.pkgs.constants import NodeAffnity, JOB_DEF_NAMESPACE, WORKER_DEF_RESOURCE_LIMITS, DEF_IMAGE_PULL_SECRETS, \
ComputeResource, PodAffnity
from job.pkgs.context import JobComponentRunner, KFJobContext
from job.pkgs.k8s.tfjob import TFJob
from job.pkgs.utils import parse_timedelta
TRAINER_TYPE_PLAIN = "plain"
TRAINER_TYPE_RUNNER = "runner"
TRAINER_SPECS = {
TRAINER_TYPE_PLAIN: {
"image": "ai.tencentmusic.com/tme-public/tf2.3_plain_train:latest",
"cmd": ["python3", "-m", "job.tf_plain_train.plain_train"]
},
TRAINER_TYPE_RUNNER: {
"image": "ai.tencentmusic.com/tme-public/tf2.3_keras_train:latest",
"cmd": ["python3", "-m", "job.tf_keras_train.runner_train"]
}
}
class TFJobLauncher(JobComponentRunner):
def job_func(self, jc_entry):
job = jc_entry.job
job_name = job.get('name')
job_namespace = job.get('namespace') or jc_entry.context.namespace or JOB_DEF_NAMESPACE
num_workers = int(job.get('num_workers', 1))
num_pss = int(job.get('num_pss', 0))
node_affin = job.get("node_affin")
pod_affin = job.get("pod_affin")
node_selector = job.get("node_selector", {}) or jc_entry.context.parsed_node_selector()
resources = job.get("resources")
if not isinstance(resources, dict) or 'limits' not in resources:
print("user specified resource {} not valid".format(resources))
resources = jc_entry.context.parsed_resource_spec()
if resources:
print("will use resource spec from tfjob for workers: {}".format(resources))
else:
resources = WORKER_DEF_RESOURCE_LIMITS
ps_resources = job.get("ps_resources")
chief_resources = job.get("chief_resources")
if (ComputeResource.P_GPU in resources['limits'] or ComputeResource.V_GPU_CORE in resources['limits']) \
and not node_affin:
node_affin = NodeAffnity.ONLY_GPU
print("auto set node_affin={}".format(node_affin))
if node_affin in [NodeAffnity.ONLY_GPU, NodeAffnity.PREF_GPU] and 'cpu' in node_selector:
node_selector.pop('cpu', None)
print("auto poped up 'cpu' in node selector: {}".format(node_selector))
if node_affin in [NodeAffnity.ONLY_CPU, NodeAffnity.PREF_CPU] and 'gpu' in node_selector:
node_selector.pop('gpu', None)
print("auto poped up 'gpu' in node selector: {}".format(node_selector))
restart_policy = job.get("restart_policy", '').strip()
if restart_policy and restart_policy not in ['OnFailure', 'Always', 'ExitCode', 'Never']:
print("WARNING: unrecognized 'restart_policy' '{}', reset to 'Never'".format(restart_policy))
restart_policy = 'Never'
backoff_limits = job.get("backoff_limits", num_workers)
if backoff_limits < 0:
print("WARNING: 'backoff_limits' should be >=0, got {}, defaults to 1".format(backoff_limits))
backoff_limits = 1
job_timeout = parse_timedelta(job.get('timeout', '365d'))
job_polling_interval = parse_timedelta(job.get('polling_interval', '30s'))
trainer_type = job.get("trainer", TRAINER_TYPE_RUNNER).strip().lower()
trainer_spec = TRAINER_SPECS.get(trainer_type)
if not trainer_spec:
raise NotImplementedError("unsupported trainer type '{}', supported are {}"
.format(trainer_type, TRAINER_SPECS.keys()))
print("use trainer '{}', spec={}, num_workers={}, num_pss={}"
.format(trainer_type, trainer_spec, num_workers, num_pss))
driver_job_detail = job.get('job_detail')
driver_args = [
"--job", json.dumps(driver_job_detail),
"--pack-path", jc_entry.pack_path,
"--upstream-output-file", jc_entry.upstream_output_file,
"--export-path", jc_entry.export_path,
"--pipeline-id", jc_entry.pipeline_id,
"--run-id", jc_entry.run_id,
"--creator", jc_entry.creator,
"--output-file", jc_entry.output_file or self.output_file
]
driver_mounts = jc_entry.context.parsed_volumn_mounts() or []
job_labels = {
"run-rtx": jc_entry.runner,
"upload-rtx": jc_entry.creator,
"pipeline-id": jc_entry.pipeline_id,
"run-id": jc_entry.run_id,
"workflow-name": jc_entry.pipeline_name,
'task-id': jc_entry.task_id,
'task-name': jc_entry.task_name
}
user_envs = job.get("envs")
driver_envs = jc_entry.context.to_k8s_env_list()
if isinstance(user_envs, dict):
for k, v in user_envs.items():
driver_envs.append({"name": str(k), "value": str(v)})
if 'profile_batch' in driver_job_detail.get('train_args', {}).get('tensorboard', {}) and \
node_affin in [NodeAffnity.ONLY_GPU, NodeAffnity.PREF_GPU]:
privileged = True
print("job use gpu and tf profiler, set privileged=True")
else:
privileged = False
self.launch_tfjob(job_name, job_namespace, num_workers, num_pss, trainer_spec.get("image"),
trainer_spec.get("cmd"), driver_args, driver_envs, driver_mounts, resources,
restart_policy, node_affin, pod_affin, job_labels, backoff_limits, job_timeout,
job_polling_interval, False, node_selector, privileged, jc_entry.creator,
ps_resources, chief_resources)
@classmethod
def default_job_name(cls):
import re
ctx = KFJobContext.get_context()
p_name = str(ctx.pipeline_name) or ''
p_name = re.sub(r'[^-a-z0-9]', '-', p_name)
jid = str(uuid.uuid4()).replace('-', '')
return "-".join(["tfjob", p_name, jid])[:54]
# return "tfjob-" + str(uuid.uuid1())
@classmethod
def launch_tfjob(cls, name, namespace, num_workers, num_pss, driver_image, driver_cmd,
driver_args, driver_envs, driver_mounts, resources=None, restart_policy=None,
node_affin=None, pod_affin=None, job_labels={}, backoff_limits=3, job_timeout=None,
job_polling_interval=None, delete_after_finish=False, node_selector={}, privileged=False,
creator='', ps_resources=None, chief_resources=None):
subprocess.check_call("echo '10.101.140.98 cls-g9v4gmm0.ccs.tencent-cloud.com' >> /etc/hosts", shell=True)
k8s_config.load_incluster_config()
k8s_api_client = k8s_client.ApiClient()
tfjob = TFJob("v1", k8s_api_client)
job_name = name.strip() if name and name.strip() else cls.default_job_name()
if node_affin == NodeAffnity.PREF_GPU:
node_affin = NodeAffnity.ONLY_GPU
print("WARING: 'node_affin' set to 'pref_gpu', changed it to 'only_gpu' to avoid heterogeneity")
if node_affin == NodeAffnity.PREF_CPU:
node_affin = NodeAffnity.ONLY_CPU
print("WARING: 'node_affin' set to 'pref_cpu', changed it to 'only_cpu' to avoid heterogeneity")
if not pod_affin and node_affin in [NodeAffnity.ONLY_GPU, NodeAffnity.PREF_GPU]:
pod_affin = PodAffnity.CONCENT
print("auto set pod_affin to {}".format(pod_affin))
st = time.perf_counter()
print('begin create new tfjob %s' % job_name)
tfjob.create(job_name, namespace, num_workers, num_pss, driver_image, driver_cmd, driver_args,
driver_envs, driver_mounts, resources, restart_policy, DEF_IMAGE_PULL_SECRETS,
node_affin, pod_affin, job_labels, backoff_limits, node_selector, privileged, creator,
ps_resources, chief_resources)
job_timeout = job_timeout if job_timeout else datetime.timedelta(days=365)
job_polling_inteval = job_polling_interval if job_polling_interval else datetime.timedelta(seconds=30)
condition = tfjob.wait_for_condition(namespace, job_name, ["Succeeded", "Failed"], job_timeout,
job_polling_inteval, trace_worker_log=True)
print("TFJob '{}' finished in condition '{}', cost {}s".format(job_name, condition, time.perf_counter() - st))
if condition != 'Succeeded':
raise RuntimeError("TFJob '{}' in namespace '{}' failed, num_workers={}, driver_args={}"
.format(job_name, namespace, num_workers, driver_args))
if delete_after_finish:
print("will delete tfjob '{}' in '{}'".format(job_name, namespace))
tfjob.delete(name=job_name, namespace=namespace)
print("deleted tfjob '{}' in '{}'".format(job_name, namespace))
if __name__ == "__main__":
runner = TFJobLauncher("TFJob launcher for train component")
runner.run()
```
#### File: job/tf_keras_train/runner_train.py
```python
import json
import os
import datetime
from job.pkgs.constants import ComponentOutput
from job.pkgs.context import JobComponentRunner
from job.pkgs.httpclients.model_repo_client import ModelRepoClient
from job.pkgs.tf.model_runner import TFModelRunner
from job.pkgs.utils import make_abs_or_pack_path, split_file_name, try_archive_by_config
class TFRunnerTrainer(JobComponentRunner):
def job_func(self, jc_entry):
tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')
print("tf_config={}".format(tf_config))
job = jc_entry.job
user_py_file = job.get('script_name')
model_args = job.get('model_args', {})
train_args = job.get('train_args', {})
train_data_args = job.get('train_data_args', {})
val_data_args = job.get('val_data_args', {})
save_model_args = job.get('save_model_args', {})
if not user_py_file or not user_py_file.strip():
raise RuntimeError("'script_name' not set")
user_py_file = make_abs_or_pack_path(user_py_file, jc_entry.export_path, jc_entry.pack_path)
if not os.path.isfile(user_py_file):
raise RuntimeError("user script '{}' not exist".format(user_py_file))
workdir = os.getcwd()
os.chdir(os.path.dirname(user_py_file))
print("change work dir from '{}' to '{}'".format(workdir, os.getcwd()))
runner = TFModelRunner(user_py_file, jc_entry.export_path, jc_entry.pack_path, tf_config,
model_args=model_args, train_args=train_args,
train_data_args=train_data_args,
val_data_args=val_data_args,
save_model_args=save_model_args)
saved_models = runner.run_train()
if runner.is_chief() and saved_models:
version = train_args.get('version', '').strip()
if not version:
version = datetime.datetime.now().strftime("%Y%m%d.%H%M%S")
save_info_strings = []
for model_save_path, model_name in saved_models:
# if not model_name:
# model_name = split_file_name(user_py_file)[1]
# print("model name not set, will use script file base name '{}' as model name".format(model_name))
archive_config = {
"src": model_save_path,
"path_name": model_name
}
archived = try_archive_by_config(archive_config, jc_entry.export_path, jc_entry.pack_path)
if archived:
model_save_path = archived[0][1]
mrc = ModelRepoClient(jc_entry.creator)
mrc.add_model(jc_entry.pipeline_id, jc_entry.run_id, 'tf', model_name, model_save_path,
version, None, thr_exp=False)
print("added model training record, pipeline_id='{}', run_id='{}', model_name='{}', mode_save_path='{}'"
.format(jc_entry.pipeline_id, jc_entry.run_id, model_name, model_save_path))
save_info_strings.append('|'.join([model_save_path, model_name]))
return '\n'.join(save_info_strings)
else:
print("not chief")
if __name__ == '__main__':
trainer = TFRunnerTrainer("TF runner trainer component", ComponentOutput.MODEL_TRAIN_OUTPUT)
trainer.run()
```
#### File: myapp/models/model_job.py
```python
from flask_appbuilder import Model
from sqlalchemy import Column, Integer, String, ForeignKey,Float
from sqlalchemy.orm import relationship
import datetime,time,json
from sqlalchemy import (
Boolean,
Column,
create_engine,
DateTime,
ForeignKey,
Integer,
MetaData,
String,
Table,
Text,
Enum,
)
import numpy
import random
import copy
import logging
from myapp.models.helpers import AuditMixinNullable, ImportMixin
from flask import escape, g, Markup, request
from .model_team import Project
from myapp import app,db
from myapp.models.helpers import ImportMixin
# from myapp.models.base import MyappModel
# 添加自定义model
from sqlalchemy import Column, Integer, String, ForeignKey ,Date,DateTime
from flask_appbuilder.models.decorators import renders
from flask import Markup
from myapp.models.base import MyappModelBase
import datetime
metadata = Model.metadata
conf = app.config
from myapp.utils import core
import re
from myapp.utils.py import py_k8s
import pysnooper
# 定义model
class Repository(Model,AuditMixinNullable,MyappModelBase):
__tablename__ = 'repository'
id = Column(Integer, primary_key=True)
name = Column(String(50), unique = True, nullable=False)
server = Column(String(100), nullable=False)
user = Column(String(100), nullable=False)
password = Column(String(100), nullable=False)
hubsecret = Column(String(100))
def __repr__(self):
return self.name
label_columns_spec={
"server":'域名',
"user":"用户名",
"hubsecret": 'k8s hubsecret',
}
label_columns=MyappModelBase.label_columns.copy()
label_columns.update(label_columns_spec)
# 定义model
class Images(Model,AuditMixinNullable,MyappModelBase):
__tablename__='images'
id = Column(Integer, primary_key=True)
project_id = Column(Integer, ForeignKey('project.id')) # 定义外键
project = relationship(
"Project", foreign_keys=[project_id]
)
name = Column(String(200), nullable=False)
describe = Column(Text)
repository_id = Column(Integer, ForeignKey('repository.id')) # 定义外键
repository = relationship(
"Repository", foreign_keys=[repository_id]
)
entrypoint=Column(String(200))
dockerfile=Column(Text)
gitpath=Column(String(200))
label_columns_spec={
"project":'功能分类',
}
label_columns = MyappModelBase.label_columns.copy()
label_columns.update(label_columns_spec)
@property
def images_url(self):
if self.gitpath:
return Markup(f'<a href="{self.gitpath}">{self.name}</a>')
return self.name
def __repr__(self):
return self.name
# 定义model
class Job_Template(Model,AuditMixinNullable,MyappModelBase):
__tablename__='job_template'
id = Column(Integer, primary_key=True)
project_id = Column(Integer, ForeignKey('project.id')) # 定义外键
project = relationship(
"Project", foreign_keys=[project_id]
)
name = Column(String(100), nullable=False,unique=True)
version = Column(Enum('Release','Alpha'),nullable=False,default='Release')
images_id = Column(Integer, ForeignKey('images.id')) # 定义外键
images = relationship(
Images, foreign_keys=[images_id]
)
hostAliases = Column(Text) # host文件
describe = Column(Text)
workdir=Column(String(400))
entrypoint=Column(String(200))
args=Column(Text)
env = Column(Text) # 默认自带的环境变量
volume_mount = Column(String(400),default='') # 强制必须挂载
privileged = Column(Boolean, default=False) # 是否启用特权模式
accounts = Column(String(100)) # 使用账户
demo=Column(Text)
expand = Column(Text(65536), default='{}')
label_columns_spec={
"project": "功能分类",
}
label_columns = MyappModelBase.label_columns.copy()
label_columns.update(label_columns_spec)
def __repr__(self):
return self.name # +"(%s)"%self.version
@renders('args')
def args_html(self):
return Markup('<pre><code>' + self.args + '</code></pre>')
@renders('demo')
def demo_html(self):
return Markup('<pre><code>' + self.demo + '</code></pre>')
@renders('expand')
def expand_html(self):
return Markup('<pre><code>' + self.expand + '</code></pre>')
@renders('name')
def name_title(self):
return Markup(f'<a data-toggle="tooltip" rel="tooltip" title data-original-title="{self.describe}">{self.name}</a>')
@property
def images_url(self):
return Markup(f'<a target=_blank href="/images_modelview/show/{self.images.id}">{self.images.name}</a>')
# import pysnooper
# @pysnooper.snoop()
def get_env(self,name):
if self.env and name in self.env:
envs = self.env.split('\n')
for env in envs:
if name in env:
return env[env.index('=')+1:].strip()
else:
return None
def clone(self):
return Job_Template(
name=self.name,
version=self.version,
project_id=self.project_id,
images_id=self.images_id,
describe=self.describe,
args=self.args,
demo=self.demo,
expand=self.expand
)
# 定义model
class Pipeline(Model,ImportMixin,AuditMixinNullable,MyappModelBase):
__tablename__ = 'pipeline'
id = Column(Integer, primary_key=True)
name = Column(String(100),nullable=False,unique=True)
describe = Column(String(200),nullable=False)
project_id = Column(Integer, ForeignKey('project.id'),nullable=False) # 定义外键
project = relationship(
"Project", foreign_keys=[project_id]
)
dag_json = Column(Text,nullable=False,default='{}')
namespace=Column(String(100),default='pipeline')
global_env = Column(String(500),default='')
schedule_type = Column(Enum('once', 'crontab'),nullable=False,default='once')
cron_time = Column(String(100)) # 调度周期
pipeline_file=Column(Text(65536),default='')
pipeline_argo_id = Column(String(100))
version_id = Column(String(100))
run_id = Column(String(100))
node_selector = Column(String(100), default='cpu=true,train=true') # 挂载
image_pull_policy = Column(Enum('Always','IfNotPresent'),nullable=False,default='Always')
parallelism = Column(Integer, nullable=False,default=1) # 同一个pipeline,最大并行的task数目
alert_status = Column(String(100), default='Pending,Running,Succeeded,Failed,Terminated') # 哪些状态会报警Pending,Running,Succeeded,Failed,Unknown,Waiting,Terminated
alert_user = Column(String(300), default='')
expand = Column(Text(65536),default='[]')
depends_on_past = Column(Boolean, default=False)
max_active_runs = Column(Integer, nullable=False,default=3) # 最大同时运行的pipeline实例
expired_limit = Column(Integer, nullable=False, default=1) # 过期保留个数,此数值有效时,会优先使用,覆盖max_active_runs的功能
parameter = Column(Text(65536), default='{}')
def __repr__(self):
return self.name
@property
def pipeline_url(self):
pipeline_url="/pipeline_modelview/web/" +str(self.id)
return Markup(f'<a href="{pipeline_url}">{self.describe}</a>')
@property
def run_pipeline(self):
pipeline_run_url = "/pipeline_modelview/run_pipeline/" +str(self.id)
return Markup(f'<a target=_blank href="{pipeline_run_url}">run</a>')
@property
def cronjob_start_time(self):
cronjob_start_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if self.parameter:
return json.loads(self.parameter).get('cronjob_start_time',cronjob_start_time)
return cronjob_start_time
@property
def log(self):
if self.run_id:
pipeline_url = "/pipeline_modelview/web/log/%s"%self.id
return Markup(f'<a target=_blank href="{pipeline_url}">日志</a>')
else:
return Markup(f'日志')
@property
def pod(self):
url = "/pipeline_modelview/web/pod/%s" % self.id
return Markup(f'<a target=_blank href="{url}">pod</a>')
@renders('dag_json')
def dag_json_html(self):
dag_json = self.dag_json or '{}'
return Markup('<pre><code>' + dag_json + '</code></pre>')
@renders('expand')
def expand_html(self):
return Markup('<pre><code>' + self.expand + '</code></pre>')
@renders('parameter')
def parameter_html(self):
return Markup('<pre><code>' + self.parameter + '</code></pre>')
@renders('pipeline_file')
def pipeline_file_html(self):
pipeline_file = self.pipeline_file or ''
return Markup('<pre><code>' + pipeline_file + '</code></pre>')
# @renders('describe')
# def describe_html(self):
# return Markup('<pre><code>' + self.pipeline_file + '</code></pre>')
# 获取pipeline中的所有task
def get_tasks(self,dbsession=db.session):
return dbsession.query(Task).filter_by(pipeline_id=self.id).all()
# @pysnooper.snoop()
def delete_old_task(self, dbsession=db.session):
try:
expand_tasks = json.loads(self.expand) if self.expand else []
tasks = dbsession.query(Task).filter_by(pipeline_id=self.id).all()
tasks_id = [int(expand_task['id']) for expand_task in expand_tasks if expand_task.get('id', '').isdecimal()]
for task in tasks:
if task.id not in tasks_id:
db.session.delete(task)
db.session.commit()
except Exception as e:
print(e)
# 获取当期运行时workflow的数量
def get_workflow(self):
back_crds = []
try:
k8s_client = py_k8s.K8s(self.project.cluster['KUBECONFIG'])
crd_info = conf.get("CRD_INFO", {}).get('workflow', {})
if crd_info:
crds = k8s_client.get_crd(group=crd_info['group'], version=crd_info['version'],
plural=crd_info['plural'], namespace=self.namespace,
label_selector="pipeline-id=%s"%str(self.id))
for crd in crds:
if crd.get('labels', '{}'):
labels = json.loads(crd['labels'])
if labels.get('pipeline-id', '') == str(self.id):
back_crds.append(crd)
return back_crds
except Exception as e:
print(e)
return back_crds
@property
def run_instance(self):
# workflow = db.session.query(Workflow).filter_by(foreign_key= str(self.id)).filter_by(status= 'Running').filter_by(create_time > datetime.datetime.now().strftime("%Y-%m-%d")).all()
# workflow_num = len(workflow) if workflow else 0
# url = '/workflow_modelview/list/?_flt_2_name=%s'%self.name.replace("_","-")[:54]
url = r'/workflow_modelview/list/?_flt_2_labels="pipeline-id"%3A+"'+'%s"' % self.id
# print(url)
return Markup(f"<a href='{url}'>{self.schedule_type}</a>") # k8s有长度限制
# 这个dag可能不对,所以要根据真实task纠正一下
def fix_dag_json(self,dbsession=db.session):
if not self.dag_json:
return "{}"
dag = json.loads(self.dag_json)
# 如果添加了task,但是没有保存pipeline,就自动创建dag
if not dag:
tasks = self.get_tasks(dbsession)
if tasks:
dag = {}
for task in tasks:
dag[task.name] = {}
dag_json = json.dumps(dag, indent=4, ensure_ascii=False)
return dag_json
else:
return "{}"
# 清理dag中不存在的task
if dag:
tasks = self.get_tasks(dbsession)
all_task_names = [task.name for task in tasks]
# 先把没有加入的task加入到dag
for task in tasks:
if task.name not in dag:
dag[task.name] = {}
# 把已经删除了的task移除dag
dag_back = copy.deepcopy(dag)
for dag_task_name in dag_back:
if dag_task_name not in all_task_names:
del dag[dag_task_name]
# 将已经删除的task从其他task的上游依赖中删除
for dag_task_name in dag:
upstream_tasks = dag[dag_task_name]['upstream'] if 'upstream' in dag[dag_task_name] else []
new_upstream_tasks = []
for upstream_task in upstream_tasks:
if upstream_task in all_task_names:
new_upstream_tasks.append(upstream_task)
dag[dag_task_name]['upstream'] = new_upstream_tasks
# def get_downstream(dag):
# # 生成下行链路图
# for task_name in dag:
# dag[task_name]['downstream'] = []
# for task_name1 in dag:
# if task_name in dag[task_name1].get("upstream", []):
# dag[task_name]['downstream'].append(task_name1)
# return dag
#
# dag = get_downstream(dag)
dag_json = json.dumps(dag, indent=4, ensure_ascii=False)
return dag_json
# 自动聚焦到视图中央
# @pysnooper.snoop()
def fix_position(self):
expand_tasks = json.loads(self.expand) if self.expand else []
if not expand_tasks:
expand_tasks = []
x=[]
y=[]
for item in expand_tasks:
if "position" in item:
if item['position'].get('x',0):
x.append(int(item['position'].get('x',0)))
y.append(int(item['position'].get('y', 0)))
x_dist=400- numpy.mean(x) if x else 0
y_dist = 300 -numpy.mean(y) if y else 0
for item in expand_tasks:
if "position" in item:
if item['position'].get('x', 0):
item['position']['x'] = int(item['position']['x'])+x_dist
item['position']['y'] = int(item['position']['y']) + y_dist
return expand_tasks
# 生成前端锁需要的扩展字段
def fix_expand(self,dbsession=db.session):
tasks_src = self.get_tasks(dbsession)
tasks = {}
for task in tasks_src:
tasks[str(task.id)] = task
expand_tasks = json.loads(self.expand) if self.expand else []
if not expand_tasks:
expand_tasks=[]
expand_copy = copy.deepcopy(expand_tasks)
# 已经不存在的task要删掉
for item in expand_copy:
if "data" in item:
if item['id'] not in tasks:
expand_tasks.remove(item)
else:
# if item['source'] not in tasks or item['target'] not in tasks:
expand_tasks.remove(item) # 删除所有的上下游关系,后面全部重新
# 增加新的task的位置
for task_id in tasks:
exist=False
for item in expand_tasks:
if "data" in item and item['id']==str(task_id):
exist=True
break
if not exist:
# if task_id not in expand_tasks:
expand_tasks.append({
"id": str(task_id),
"type": "dataSet",
"position": {
"x": random.randint(100,1000),
"y": random.randint(100,1000)
},
"data": {
"taskId": task_id,
"taskName": tasks[task_id].name,
"name": tasks[task_id].name,
"describe": tasks[task_id].label
}
})
# 重写所有task的上下游关系
dag_json = json.loads(self.dag_json)
for task_name in dag_json:
upstreams = dag_json[task_name].get("upstream", [])
if upstreams:
for upstream_name in upstreams:
upstream_task_id = [task_id for task_id in tasks if tasks[task_id].name==upstream_name][0]
task_id = [task_id for task_id in tasks if tasks[task_id].name==task_name][0]
if upstream_task_id and task_id:
expand_tasks.append(
{
"source": str(upstream_task_id),
# "sourceHandle": None,
"target": str(task_id),
# "targetHandle": None,
"id": self.name + "__edge-%snull-%snull" % (upstream_task_id, task_id)
}
)
return expand_tasks
# @pysnooper.snoop()
def clone(self):
return Pipeline(
name=self.name.replace('_','-'),
project_id=self.project_id,
dag_json=self.dag_json,
describe=self.describe,
namespace=self.namespace,
global_env=self.global_env,
schedule_type='once',
cron_time=self.cron_time,
pipeline_file='',
pipeline_argo_id=self.pipeline_argo_id,
node_selector=self.node_selector,
image_pull_policy=self.image_pull_policy,
parallelism=self.parallelism,
alert_status='',
expand=self.expand,
parameter=self.parameter
)
# 定义model
class Task(Model,ImportMixin,AuditMixinNullable,MyappModelBase):
__tablename__ = 'task'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
label = Column(String(100), nullable=False) # 别名
job_template_id = Column(Integer, ForeignKey('job_template.id')) # 定义外键
job_template = relationship(
"Job_Template", foreign_keys=[job_template_id]
)
pipeline_id = Column(Integer, ForeignKey('pipeline.id')) # 定义外键
pipeline = relationship(
"Pipeline", foreign_keys=[pipeline_id]
)
working_dir = Column(String(1000),default='')
command = Column(String(1000),default='')
overwrite_entrypoint = Column(Boolean,default=False) # 是否覆盖入口
args = Column(Text)
volume_mount = Column(String(200),default='kubeflow-user-workspace(pvc):/mnt,kubeflow-archives(pvc):/archives') # 挂载
node_selector = Column(String(100),default='cpu=true,train=true') # 挂载
resource_memory = Column(String(100),default='2G')
resource_cpu = Column(String(100), default='2')
resource_gpu= Column(String(100), default='0')
timeout = Column(Integer, nullable=False,default=0)
retry = Column(Integer, nullable=False,default=0)
outputs = Column(Text,default='{}') # task的输出,会将输出复制到minio上 {'prediction': '/output.txt'}
monitoring = Column(Text,default='{}') # 该任务的监控信息
expand = Column(Text(65536), default='')
# active = Column(Boolean,default=True) # 是否激活,可以先配置再运行跑
export_parent = "pipeline"
def __repr__(self):
return self.name
@property
def debug(self):
return Markup(f'<a target=_blank href="/task_modelview/debug/{self.id}">debug</a>')
@property
def run(self):
return Markup(f'<a target=_blank href="/task_modelview/run/{self.id}">run</a>')
@property
def clear(self):
return Markup(f'<a href="/task_modelview/clear/{self.id}">clear</a>')
@property
def log(self):
return Markup(f'<a target=_blank href="/task_modelview/log/{self.id}">log</a>')
def get_node_selector(self):
project_node_selector = self.get_default_node_selector(self.pipeline.project.node_selector,self.resource_gpu,'train')
gpu_type = core.get_gpu(self.resource_gpu)[1]
if gpu_type:
project_node_selector+=',gpu-type='+gpu_type
return project_node_selector
@renders('args')
def args_html(self):
return Markup('<pre><code>' + self.args + '</code></pre>')
@renders('expand')
def expand_html(self):
return Markup('<pre><code>' + self.expand + '</code></pre>')
@renders('monitoring')
def monitoring_html(self):
try:
monitoring = json.loads(self.monitoring)
monitoring['link']=self.pipeline.project.cluster.get('GRAFANA_HOST','').strip('/')+conf.get('GRAFANA_TASK_PATH')+monitoring.get('pod_name','')
return Markup('<pre><code>' + json.dumps(monitoring,ensure_ascii=False,indent=4) + '</code></pre>')
except Exception as e:
return Markup('<pre><code> 暂无 </code></pre>')
@property
def job_args_demo(self):
return Markup('<pre><code>' + self.job_template.demo + '</code></pre>')
@property
def job_template_url(self):
return Markup(f'<a target=_blank href="/job_template_modelview/show/{self.job_template.id}">{self.job_template.name}</a>')
def clone(self):
return Task(
name=self.name.replace('_','-'),
label=self.label,
job_template_id=self.job_template_id,
pipeline_id=self.pipeline_id,
working_dir=self.working_dir,
command=self.command,
args=self.args,
volume_mount=self.volume_mount,
node_selector=self.node_selector,
resource_memory=self.resource_memory,
resource_cpu=self.resource_cpu,
timeout=self.timeout,
retry=self.retry,
expand=self.expand
)
# 每次上传运行
class RunHistory(Model,MyappModelBase):
__tablename__ = "run"
id = Column(Integer, primary_key=True)
pipeline_id = Column(Integer, ForeignKey('pipeline.id')) # 定义外键
pipeline = relationship(
"Pipeline", foreign_keys=[pipeline_id]
)
pipeline_file = Column(Text(65536), default='')
pipeline_argo_id = Column(String(100)) # 上传的pipeline id
version_id = Column(String(100)) # 上传的版本号
experiment_id = Column(String(100))
run_id = Column(String(100))
message = Column(Text, default='')
created_on = Column(DateTime, default=datetime.datetime.now, nullable=False)
execution_date=Column(String(100), nullable=False)
status = Column(String(100),default='comed') # commed表示已经到了该调度的时间,created表示已经发起了调度。注意操作前校验去重
@property
def status_url(self):
if self.status=='comed':
return self.status
return Markup(f'<a target=_blank href="/workflow_modelview/list/?_flt_2_labels={self.run_id}">{self.status}</a>')
@property
def creator(self):
return self.pipeline.creator
@property
def pipeline_url(self):
return Markup(f'<a target=_blank href="/pipeline_modelview/web/{self.pipeline.id}">{self.pipeline.describe}</a>')
@property
def history(self):
url = r'/workflow_modelview/list/?_flt_2_labels="pipeline-id"%3A+"' + '%s"' % self.pipeline_id
return Markup(f"<a href='{url}'>运行记录</a>")
@property
def log(self):
if self.run_id:
pipeline_url = self.pipeline.project.cluster.get('PIPELINE_URL')+ "runs/details/" +str(self.run_id)
return Markup(f'<a target=_blank href="{pipeline_url}">日志</a>')
else:
return Markup(f'日志')
import sqlalchemy as sa
class Crd:
# __tablename__ = "crd"
id = Column(Integer, primary_key=True)
name = Column(String(100),default='')
namespace = Column(String(100), default='')
create_time=Column(String(100), default='')
change_time = Column(String(100), default='')
status = Column(String(100), default='')
annotations = Column(Text, default='')
labels = Column(Text, default='')
spec = Column(Text(65536), default='')
status_more = Column(Text(65536), default='')
username = Column(String(100), default='')
info_json = Column(Text, default='{}')
add_row_time = Column(DateTime, default=datetime.datetime.now)
# delete = Column(Boolean,default=False)
foreign_key = Column(String(100), default='')
@renders('annotations')
def annotations_html(self):
return Markup('<pre><code>' + self.annotations + '</code></pre>')
@renders('labels')
def labels_html(self):
return Markup('<pre><code>' + self.labels + '</code></pre>')
@property
def final_status(self):
status='未知'
try:
if self.status_more:
status = json.loads(self.status_more).get('phase','未知')
except Exception as e:
print(e)
return status
@renders('spec')
def spec_html(self):
return Markup('<pre><code>' + self.spec + '</code></pre>')
@renders('status_more')
def status_more_html(self):
return Markup('<pre><code>' + self.status_more + '</code></pre>')
@renders('info_json')
def info_json_html(self):
return Markup('<pre><code>' + self.info_json + '</code></pre>')
@renders('namespace')
def namespace_url(self):
# user_roles = [role.name.lower() for role in list(g.user.roles)]
# if "admin" in user_roles:
url = conf.get('K8S_DASHBOARD_CLUSTER', '') + '#/search?namespace=%s&q=%s' % (self.namespace, self.name.replace('_', '-'))
# else:
# url = conf.get('K8S_DASHBOARD_PIPELINE','')+'#/search?namespace=%s&q=%s'%(self.namespace,self.name.replace('_','-'))
return Markup(f'<a target=_blank href="{url}">{self.namespace}</a>')
@property
def stop(self):
return Markup(f'<a href="../stop/{self.id}">停止</a>')
# 定义model
class Workflow(Model,Crd,MyappModelBase):
__tablename__ = 'workflow'
@renders('namespace')
def namespace_url(self):
if self.pipeline:
url = conf.get('K8S_DASHBOARD_CLUSTER', '') + '#/search?namespace=%s&q=%s' % (self.namespace, self.pipeline.name.replace('_', '-'))
return Markup(f'<a target=_blank href="{url}">{self.namespace}</a>')
else:
url = conf.get('K8S_DASHBOARD_CLUSTER', '') + '#/search?namespace=%s&q=%s' % (self.namespace, self.name.replace('_', '-'))
return Markup(f'<a target=_blank href="{url}">{self.namespace}</a>')
@property
def run_history(self):
label = json.loads(self.labels) if self.labels else {}
runid = label.get('run-id','')
if runid:
return db.session.query(RunHistory).filter(RunHistory.pipeline_file.contains(runid)).first()
# return db.session.query(RunHistory).filter_by(run_id=runid).first()
else:
return None
@property
def schedule_type(self):
run_history = self.run_history
if run_history:
return 'crontab'
else:
return 'once'
@property
def execution_date(self):
run_history = self.run_history
if run_history:
return run_history.execution_date
else:
return 'once'
@property
def task_status(self):
status_mode = json.loads(self.status_more)
task_status={}
nodes=status_mode.get('nodes',{})
tasks = self.pipeline.get_tasks()
for pod_name in nodes:
pod = nodes[pod_name]
if pod['type']=='Pod':
if pod['phase']=='Succeeded': # 那些重试和失败的都忽略掉
templateName=pod['templateName']
for task in tasks:
if task.name==templateName:
finish_time = datetime.datetime.strptime(pod['finishedAt'], '%Y-%m-%d %H:%M:%S')
start_time = datetime.datetime.strptime(pod['startedAt'], '%Y-%m-%d %H:%M:%S')
elapsed = (finish_time - start_time).days * 24 + (finish_time - start_time).seconds / 60 / 60
task_status[task.label]= str(round(elapsed,2))+"h"
message=""
for key in task_status:
message += key+": "+task_status[key]+"\n"
return Markup('<pre><code>' + message + '</code></pre>')
@property
def elapsed_time(self):
status_mode = json.loads(self.status_more)
finish_time=status_mode.get('finishedAt',self.change_time)
if not finish_time: finish_time=self.change_time
start_time = status_mode.get('startedAt', '')
try:
if finish_time and start_time:
if 'T' in finish_time:
finish_time = datetime.datetime.strptime(finish_time,'%Y-%m-%dT%H:%M:%S')
else:
finish_time = datetime.datetime.strptime(finish_time, '%Y-%m-%d %H:%M:%S')
if 'T' in start_time:
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S')
else:
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
elapsed = (finish_time-start_time).days*24+(finish_time-start_time).seconds/60/60
return str(round(elapsed,2))+"h"
except Exception as e:
print(e)
return '未知'
@property
def pipeline_url(self):
if self.labels:
try:
labels = json.loads(self.labels)
pipeline_id = labels.get("pipeline-id",'')
if pipeline_id:
pipeline = db.session.query(Pipeline).filter_by(id=int(pipeline_id)).first()
if pipeline:
# return Markup(f'{pipeline.describe}')
return Markup(f'<a href="/pipeline_modelview/web/{pipeline.id}">{pipeline.describe}</a>')
pipeline_name = self.name[:-6]
pipeline = db.session.query(Pipeline).filter_by(name=pipeline_name).first()
if pipeline:
return Markup(f'{pipeline.describe}')
except Exception as e:
print(e)
return Markup(f'未知')
@property
def pipeline(self):
if self.labels:
try:
labels = json.loads(self.labels)
pipeline_id = labels.get("pipeline-id",'')
if pipeline_id:
pipeline = db.session.query(Pipeline).filter_by(id=int(pipeline_id)).first()
if pipeline:
return pipeline
# pipeline_name = self.name[:-6]
# pipeline = db.session.query(Pipeline).filter_by(name=pipeline_name).first()
# return pipeline
except Exception as e:
print(e)
return None
@property
def project(self):
pipeline = self.pipeline
if pipeline:
return pipeline.project.name
else:
return "未知"
@property
def log(self):
if self.labels:
try:
labels = json.loads(self.labels)
run_id = labels.get("pipeline/runid",'')
if run_id:
pipeline_url = conf.get('PIPELINE_URL')+ "runs/details/" +str(run_id)
return Markup(f'<a target=_blank href="{pipeline_url}">日志</a>')
except Exception as e:
print(e)
return Markup(f'日志')
# 定义model
class Tfjob(Model,Crd,MyappModelBase):
__tablename__ = 'tfjob'
@property
def pipeline(self):
if self.labels:
try:
labels = json.loads(self.labels)
pipeline_id = labels.get("pipeline-id",'')
if pipeline_id:
pipeline = db.session.query(Pipeline).filter_by(id=int(pipeline_id)).first()
return Markup(f'<a href="/pipeline_modelview/list/?_flt_2_name={pipeline.name}">{pipeline.describe}</a>')
except Exception as e:
print(e)
return Markup(f'未知')
@property
def run_instance(self):
if self.labels:
try:
labels = json.loads(self.labels)
run_id = labels.get("run-id",'')
if run_id:
return Markup(f'<a href="/workflow_modelview/list/?_flt_2_labels={run_id}">运行实例</a>')
except Exception as e:
print(e)
return Markup(f'未知')
# 定义model
class Xgbjob(Model,Crd,MyappModelBase):
__tablename__ = 'xgbjob'
# 定义model
class Pytorchjob(Model,Crd,MyappModelBase):
__tablename__ = 'pytorchjob'
```
#### File: cube-studio/myapp/security.py
```python
from flask_login import current_user, LoginManager
import logging
import json
import jwt
from sqlalchemy.ext.declarative import declared_attr
from flask_babel import lazy_gettext
import time
import hashlib
from typing import List
import requests
from flask import current_app,redirect, g, flash, request, session, abort, make_response
from flask_appbuilder.security.sqla import models as ab_models
from flask_appbuilder.security.sqla.manager import SecurityManager
from werkzeug.security import generate_password_hash
from flask_babel import lazy_gettext as _
from flask_appbuilder.security.views import (
PermissionModelView,
PermissionViewModelView,
RoleModelView,
UserModelView,
RoleListWidget,
RoleShowWidget,
)
from werkzeug.security import check_password_hash
from flask_appbuilder.security.sqla.models import (
assoc_permissionview_role,
assoc_user_role,
)
from flask import g
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
Sequence,
String,
Table,
UniqueConstraint,
)
from sqlalchemy.orm import backref, relationship
from flask_appbuilder.security.decorators import has_access, has_access_api, permission_name
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.widgets import ListWidget
from flask_appbuilder.const import LOGMSG_WAR_SEC_LOGIN_FAILED
from sqlalchemy import or_
import xml.etree.ElementTree as ET
from myapp.exceptions import MyappSecurityException
from flask_appbuilder.security.views import AuthDBView, AuthRemoteUserView
from flask_appbuilder.security.views import expose
from flask_appbuilder import base
from flask_login import login_user, logout_user,login_manager
from flask_appbuilder.security.views import AuthDBView, AuthRemoteUserView
from flask_appbuilder.security.registerviews import RegisterUserDBView
from flask_appbuilder.security.forms import RegisterUserDBForm
from flask_appbuilder.security.views import expose
import xml.etree.ElementTree as ET
from functools import update_wrapper
from flask import redirect, g, flash, request, session, abort
from celery.schedules import crontab
from flask_appbuilder.security.sqla.models import assoc_permissionview_role
from sqlalchemy import select, Table
from flask_appbuilder.const import (
AUTH_DB,
AUTH_LDAP,
AUTH_OAUTH,
AUTH_OID,
AUTH_REMOTE_USER,
LOGMSG_ERR_SEC_AUTH_LDAP,
LOGMSG_ERR_SEC_AUTH_LDAP_TLS,
LOGMSG_WAR_SEC_LOGIN_FAILED,
LOGMSG_WAR_SEC_NO_USER,
LOGMSG_WAR_SEC_NOLDAP_OBJ,
PERMISSION_PREFIX
)
from flask_appbuilder.models.sqla import Model
from flask_appbuilder.actions import action
import pysnooper
import json
# 用户列表页面模板
class MyappSecurityListWidget(ListWidget):
"""
Redeclaring to avoid circular imports
"""
template = "myapp/fab_overrides/list.html"
# 角色列表页模板
class MyappRoleListWidget(ListWidget):
"""
Role model view from FAB already uses a custom list widget override
So we override the override
"""
template = "myapp/fab_overrides/list_role.html"
def __init__(self, **kwargs):
kwargs["appbuilder"] = current_app.appbuilder
super().__init__(**kwargs)
# 自定义list,add,edit页面内容
UserModelView.list_columns= ["username", "active", "roles"]
UserModelView.edit_columns= ["first_name", "last_name", "username", "active", "email"]
UserModelView.add_columns= ["first_name", "last_name", "username", "email", "active", "roles"]
UserModelView.list_widget = MyappSecurityListWidget
RoleModelView.list_widget = MyappRoleListWidget
PermissionViewModelView.list_widget = MyappSecurityListWidget
PermissionModelView.list_widget = MyappSecurityListWidget
# 自定义扩展系统自带的user
from flask_appbuilder.security.sqla.models import User,Role
from sqlalchemy import Column, Integer, ForeignKey, String, Sequence, Table
# 修改绑定
class MyUser(User):
__tablename__ = 'ab_user'
org = Column(String(200)) # 新增的属性,组织架构
def get_full_name(self):
return self.username
# 使用用户名为名称
def __repr__(self):
return self.username
def is_admin(self):
user_roles = [role.name.lower() for role in list(self.roles)]
if "admin" in user_roles:
return True
return False
@property
def secret(self):
if self.changed_on:
pass
# help(self.changed_on)
# timestamp = int(func.date_format(self.changed_on))
timestamp = int(self.changed_on.timestamp())
payload = {
"iss": self.username # 用户名作为身份
# "iat": timestamp, # 签发期
# "nbf": timestamp, # 生效期
# "exp": timestamp + 60 * 60 * 24 * 30 * 12, # 有效期12个月
}
global_password = '<PASSWORD>'
encoded_jwt = jwt.encode(payload, global_password, algorithm='HS256')
encoded_jwt = encoded_jwt.decode('utf-8')
return encoded_jwt
return ''
# 自定义role view 视图
class MyRoleModelView(RoleModelView):
datamodel = SQLAInterface(Role)
order_columns = ["id"]
route_base = "/roles"
list_columns = ["name", "permissions"]
# 自定义用户展示
class MyUserRemoteUserModelView(UserModelView):
list_columns = ["username", "active", "roles", ]
edit_columns = ["first_name", "last_name", "username", "active", "email", "roles",'org' ]
add_columns = ["first_name", "last_name", "username", "email", "active", "roles",'org' ]
show_columns = ["username", "active", "roles", "login_count"]
list_widget = MyappSecurityListWidget
label_columns = {
"get_full_name": lazy_gettext("Full Name"),
"first_name": lazy_gettext("First Name"),
"last_name": lazy_gettext("Last Name"),
"username": lazy_gettext("User Name"),
"password": lazy_gettext("Password"),
"active": lazy_gettext("Is Active?"),
"email": lazy_gettext("Email"),
"roles": lazy_gettext("Role"),
"last_login": lazy_gettext("Last login"),
"login_count": lazy_gettext("Login count"),
"fail_login_count": lazy_gettext("Failed login count"),
"created_on": lazy_gettext("Created on"),
"created_by": lazy_gettext("Created by"),
"changed_on": lazy_gettext("Changed on"),
"changed_by": lazy_gettext("Changed by"),
"secret": lazy_gettext("Authorization"),
}
show_fieldsets = [
(
lazy_gettext("User info"),
{"fields": ["username", "active", "roles", "login_count",'secret']},
),
(
lazy_gettext("Personal Info"),
{"fields": ["first_name", "last_name", "email",'org'], "expanded": True},
),
(
lazy_gettext("Audit Info"),
{
"fields": [
"last_login",
"fail_login_count",
"created_on",
"created_by",
"changed_on",
"changed_by",
],
"expanded": False,
},
),
]
user_show_fieldsets = [
(
lazy_gettext("User info"),
{"fields": ["username", "active", "roles", "login_count",'secret']},
),
(
lazy_gettext("Personal Info"),
{"fields": ["first_name", "last_name", "email"], "expanded": True},
),
]
@expose("/userinfo/")
@has_access
def userinfo(self):
item = self.datamodel.get(g.user.id, self._base_filters)
widgets = self._get_show_widget(
g.user.id, item, show_fieldsets=self.user_show_fieldsets
)
self.update_redirect()
return self.render_template(
self.show_template,
title=self.user_info_title,
widgets=widgets,
appbuilder=self.appbuilder,
)
from myapp.project import MyCustomRemoteUserView
from myapp.project import Myauthdbview
# myapp自带的角色和角色权限,自定义了各种权限
# 基础类fab-Security-Manager中 def load_user(self, pk): 是用来认证用户的
# before_request是user赋值给g.user
# @pysnooper.snoop()
class MyappSecurityManager(SecurityManager):
user_model = MyUser # 用户使用自定义的用户
rolemodelview = MyRoleModelView #
# 远程认证
userremoteusermodelview = MyUserRemoteUserModelView
authremoteuserview = MyCustomRemoteUserView
# 账号密码认证
userdbmodelview = MyUserRemoteUserModelView
authdbview = Myauthdbview
# 构建启动前工作,认证
@staticmethod
def before_request():
g.user = current_user
# if len(request.path)>7 and request.path[:7]!='/static' and g.user and hasattr(g.user, 'username'):
# logging.info('------------%s(%s):%s'%(request.method,g.user.username,request.path))
def __init__(self, appbuilder):
super(MyappSecurityManager, self).__init__(appbuilder)
# 添加从header中进行认证的方式
self.lm.header_loader(self.load_user_from_header)
# 使用header 认证,通过rtx名获取用户
# @pysnooper.snoop(depth=1)
def load_user_from_header(self, authorization_value):
# token=None
# if 'token' in request.headers:
# token = request.headers['token']
if authorization_value:
# rtx 免认证
if len(authorization_value) < 20:
username = authorization_value
if username:
user = self.find_user(username)
g.user = user
return user
else: # token 认证
encoded_jwt = authorization_value.encode('utf-8')
payload = jwt.decode(encoded_jwt, 'myapp', algorithms=['HS256'])
# if payload['iat'] > time.time():
# return
# elif payload['exp'] < time.time():
# return
# else:
user = self.find_user(payload['iss'])
g.user = user
return user
# 自定义登录用户
def load_user(self, pk):
user = self.get_user_by_id(int(pk))
# set cookie
return user
# 注册security菜单栏下的子菜单和链接
# @pysnooper.snoop()
def register_views(self):
if not self.appbuilder.app.config.get('FAB_ADD_SECURITY_VIEWS', True):
return
# Security APIs
self.appbuilder.add_api(self.security_api)
if self.auth_user_registration:
if self.auth_type == AUTH_DB:
self.registeruser_view = self.registeruserdbview()
elif self.auth_type == AUTH_OID:
self.registeruser_view = self.registeruseroidview()
elif self.auth_type == AUTH_OAUTH:
self.registeruser_view = self.registeruseroauthview()
if self.registeruser_view:
self.appbuilder.add_view_no_menu(self.registeruser_view)
self.appbuilder.add_view_no_menu(self.resetpasswordview())
self.appbuilder.add_view_no_menu(self.resetmypasswordview())
self.appbuilder.add_view_no_menu(self.userinfoeditview())
if self.auth_type == AUTH_DB:
self.user_view = self.userdbmodelview
self.auth_view = self.authdbview()
elif self.auth_type == AUTH_LDAP:
self.user_view = self.userldapmodelview
self.auth_view = self.authldapview()
elif self.auth_type == AUTH_OAUTH:
self.user_view = self.useroauthmodelview
self.auth_view = self.authoauthview()
elif self.auth_type == AUTH_REMOTE_USER:
self.user_view = self.userremoteusermodelview
self.auth_view = self.authremoteuserview()
else:
self.user_view = self.useroidmodelview
self.auth_view = self.authoidview()
if self.auth_user_registration:
pass
self.registeruser_view = self.registeruseroidview()
self.appbuilder.add_view_no_menu(self.registeruser_view)
self.appbuilder.add_view_no_menu(self.auth_view)
self.user_view = self.appbuilder.add_view(
self.user_view,
"List Users",
icon="fa-user",
href="/users/list/?_flt_2_username=",
label=_("List Users"),
category="Security",
category_icon="fa-cogs",
category_label=_("Security"),
)
role_view = self.appbuilder.add_view(
self.rolemodelview,
"List Roles",
icon="fa-group",
href="/roles/list/?_flt_2_name=",
label=_("List Roles"),
category="Security",
category_icon="fa-cogs",
)
role_view.related_views = [self.user_view.__class__]
if self.userstatschartview:
self.appbuilder.add_view(
self.userstatschartview,
"User's Statistics",
icon="fa-bar-chart-o",
label=_("User's Statistics"),
category="Security",
)
if self.auth_user_registration:
self.appbuilder.add_view(
self.registerusermodelview,
"User's Statistics",
icon="fa-user-plus",
label=_("User Registrations"),
category="Security",
)
self.appbuilder.menu.add_separator("Security")
self.appbuilder.add_view(
self.permissionmodelview,
"Base Permissions",
icon="fa-lock",
label=_("Base Permissions"),
category="Security",
)
self.appbuilder.add_view(
self.viewmenumodelview,
"Views/Menus",
icon="fa-list-alt",
label=_("Views/Menus"),
category="Security",
)
self.appbuilder.add_view(
self.permissionviewmodelview,
"Permission on Views/Menus",
icon="fa-link",
label=_("Permission on Views/Menus"),
category="Security",
)
# @pysnooper.snoop()
def add_org_user(self,username,first_name,last_name,org,email,roles,password="",hashed_password=""):
"""
Generic function to create user
"""
try:
user = self.user_model()
user.first_name = first_name
user.org = org
user.last_name = last_name
user.username = username
user.email = email
user.active = True
user.roles+=roles # 添加默认注册角色
user.password=password
# if hashed_password:
# user.password = <PASSWORD>
# else:
# user.password = <PASSWORD>password_<PASSWORD>(password)
self.get_session.add(user)
self.get_session.commit()
try:
from myapp.models.model_team import Project_User, Project
public_project = self.get_session.query(Project).filter(Project.name == "public").filter(Project.type == "org").first()
if public_project:
project_user = Project_User()
project_user.project = public_project
project_user.role = 'dev'
project_user.user_id = user.id
self.get_session.add(project_user)
self.get_session.commit()
except Exception as e1:
self.get_session.rollback()
return user
except Exception as e:
self.get_session.rollback()
return False
# 添加public项目组
# 添加注册远程用户
# @pysnooper.snoop()
def auth_user_remote_org_user(self, username,org_name='',password=''):
if not username:
return None
# 查找用户
user = self.find_user(username=username)
# 添加以组织同名的角色,同时添加上级角色
# 注册rtx同名角色
rtx_role = self.add_role(username)
# 如果用户不存在就注册用户
if user is None:
user = self.add_org_user(
username=username,
first_name=username,
last_name=username,
password=password,
org=org_name, # 添加组织架构
email=username + "@<EMAIL>",
roles=[self.find_role(self.auth_user_registration_role),rtx_role] if self.find_role(self.auth_user_registration_role) else [rtx_role,] # org_role 添加gamma默认角色, 组织架构角色先不自动添加
)
elif not user.is_active: # 如果用户未激活不允许接入
print(LOGMSG_WAR_SEC_LOGIN_FAILED.format(username))
return None
if user:
gamma_role = self.find_role(self.auth_user_registration_role)
if gamma_role and gamma_role not in user.roles:
user.roles.append(gamma_role)
if rtx_role and rtx_role not in user.roles:
user.roles.append(rtx_role)
# 更新用户信息
if org_name:
user.org = org_name # 更新组织架构字段
org_role = self.add_role(org_name)
if org_role not in user.roles:
user.roles.append(org_role)
self.update_user_auth_stat(user)
return user
READ_ONLY_MODEL_VIEWS = {
'link','Minio','Kubernetes Dashboard','Granfana','Wiki'
}
USER_MODEL_VIEWS = {
"UserDBModelView",
"UserLDAPModelView",
"UserOAuthModelView",
"UserOIDModelView",
"UserRemoteUserModelView",
}
# 只有admin才能看到的menu
ADMIN_ONLY_VIEW_MENUS = {
"ResetPasswordView",
"RoleModelView",
"List Users",
"List Roles",
"UserStatsChartView",
"Base Permissions",
"Permission on Views/Menus",
"Action Log",
"Views/Menus",
"ViewMenuModelView",
"User's Statistics",
"Security",
} | USER_MODEL_VIEWS
ALPHA_ONLY_VIEW_MENUS = {}
# 只有admin才有的权限
ADMIN_ONLY_PERMISSIONS = {
"can_override_role_permissions",
"can_override_role_permissions",
# "can_approve", # db owner需要授权approve 权限后才能授权
"can_update_role",
}
READ_ONLY_PERMISSION = {"can_show", "can_list",'can_add'}
ALPHA_ONLY_PERMISSIONS = {
"muldelete"
}
# 用户创建menu才有的权限
OBJECT_SPEC_PERMISSIONS = {
"can_only_access_owned_queries",
}
# 所有人都可以有的基本权限
ACCESSIBLE_PERMS = {"can_userinfo","can_request_access","can_approve"}
# 获取用户是否有在指定视图上的指定权限名
# @pysnooper.snoop()
def can_access(self, permission_name, view_name):
"""Protecting from has_access failing from missing perms/view"""
user = g.user
if user.is_anonymous:
return self.is_item_public(permission_name, view_name)
return self._has_view_access(user, permission_name, view_name)
# 获取用户具有指定权限的视图
def user_view_menu_names(self, permission_name: str):
from myapp import db
base_query = (
db.session.query(self.viewmenu_model.name)
.join(self.permissionview_model)
.join(self.permission_model)
.join(assoc_permissionview_role)
.join(self.role_model)
)
# 非匿名用户
if not g.user.is_anonymous:
# filter by user id
view_menu_names = (
base_query.join(assoc_user_role)
.join(self.user_model)
.filter(self.user_model.id == g.user.id)
.filter(self.permission_model.name == permission_name)
).all()
return set([s.name for s in view_menu_names])
# Properly treat anonymous user 匿名用户
public_role = self.get_public_role()
if public_role:
# filter by public role
view_menu_names = (
base_query.filter(self.role_model.id == public_role.id).filter(
self.permission_model.name == permission_name
)
).all()
return set([s.name for s in view_menu_names])
return set()
# 在视图上添加权限
def merge_perm(self, permission_name, view_menu_name):
logging.warning(
"This method 'merge_perm' is deprecated use add_permission_view_menu"
)
self.add_permission_view_menu(permission_name, view_menu_name)
# 判断权限是否是user自定义权限
def is_user_defined_permission(self, perm):
return perm.permission.name in self.OBJECT_SPEC_PERMISSIONS
# 初始化自定义角色,将对应的权限加到对应的角色上
# @pysnooper.snoop()
def sync_role_definitions(self):
"""Inits the Myapp application with security roles and such"""
from myapp import conf
logging.info("Syncing role definition")
# Creating default roles
self.set_role("Admin", self.is_admin_pvm)
self.set_role("Gamma", self.is_gamma_pvm)
self.set_role("granter", self.is_granter_pvm)
# commit role and view menu updates
self.get_session.commit()
self.clean_perms()
# 清理权限
def clean_perms(self):
"""FAB leaves faulty permissions that need to be cleaned up"""
logging.info("Cleaning faulty perms")
sesh = self.get_session
pvms = sesh.query(ab_models.PermissionView).filter(
or_(
ab_models.PermissionView.permission == None, # NOQA
ab_models.PermissionView.view_menu == None, # NOQA
)
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
logging.info("Deleted {} faulty permissions".format(deleted_count))
# 为角色添加权限,pvm_check为自定义权限校验函数。这样变量权限,就能通过pvm_check函数知道时候应该把权限加到角色上
def set_role(self, role_name, pvm_check):
logging.info("Syncing {} perms".format(role_name))
sesh = self.get_session
# 获取所有的pv记录
pvms = sesh.query(ab_models.PermissionView).all()
# 获取权限和视图都有值的pv
pvms = [p for p in pvms if p.permission and p.view_menu]
# 添加或者获取role
role = self.add_role(role_name)
# 检查pv是否归属于该role
role_pvms = [p for p in pvms if pvm_check(p)]
role.permissions = role_pvms
# 添加pv-role记录
sesh.merge(role)
sesh.commit()
# 看一个权限是否是只有admin角色该有的权限
def is_admin_only(self, pvm):
# not readonly operations on read only model views allowed only for admins
if (
pvm.view_menu.name in self.READ_ONLY_MODEL_VIEWS
and pvm.permission.name not in self.READ_ONLY_PERMISSION
):
return True
return (
pvm.view_menu.name in self.ADMIN_ONLY_VIEW_MENUS
or pvm.permission.name in self.ADMIN_ONLY_PERMISSIONS
)
# 校验权限是否是默认所有人可接受的
def is_accessible_to_all(self, pvm):
return pvm.permission.name in self.ACCESSIBLE_PERMS
# 看一个权限是否是admin角色该有的权限
def is_admin_pvm(self, pvm):
return not self.is_user_defined_permission(pvm)
# 看一个权限是否是gamma角色该有的权限
def is_gamma_pvm(self, pvm):
return not (
self.is_user_defined_permission(pvm)
or self.is_admin_only(pvm)
) or self.is_accessible_to_all(pvm)
def is_granter_pvm(self, pvm):
return pvm.permission.name in {"can_override_role_permissions", "can_approve"}
# 创建视图,创建权限,创建视图-权限绑定记录。
def set_perm(self, mapper, connection, target,permission_name): # noqa
#
# connection is sql
# target is tables/db model
if target.perm != target.get_perm():
link_table = target.__table__
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(perm=target.get_perm())
)
# add to view menu if not already exists
permission_name = permission_name
view_menu_name = target.get_perm()
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
# 如果权限不存存在就创建
if not permission:
permission_table = (
self.permission_model.__table__ # pylint: disable=no-member
)
connection.execute(permission_table.insert().values(name=permission_name))
permission = self.find_permission(permission_name)
# 如果视图不存在就创建
if not view_menu:
view_menu_table = self.viewmenu_model.__table__ # pylint: disable=no-member
connection.execute(view_menu_table.insert().values(name=view_menu_name))
view_menu = self.find_view_menu(view_menu_name)
# 获取是否存在 视图-权限绑定 记录
if permission and view_menu:
pv = (
self.get_session.query(self.permissionview_model)
.filter_by(permission=permission, view_menu=view_menu)
.first()
)
# 如果没有视图-权限绑定 记录,就创建
if not pv and permission and view_menu:
permission_view_table = (
self.permissionview_model.__table__ # pylint: disable=no-member
)
connection.execute(
permission_view_table.insert().values(
permission_id=permission.id, view_menu_id=view_menu.id
)
)
# 重新获取权限视图绑定记录
pv = (
self.get_session.query(self.permissionview_model)
.filter_by(permission=permission, view_menu=view_menu)
.first()
)
return pv
# 根据权限,视图,添加到相关pv-role
@classmethod
def add_pv_role(self,permission_name,view_menu_name,session):
permission = session.query(self.permission_model).filter_by(name=permission_name).first()
view_menu = session.query(self.viewmenu_model).filter_by(name=view_menu_name).first()
# 获取是否存在 视图-权限绑定 记录
if permission and view_menu:
pv = (
session.query(self.permissionview_model)
.filter_by(permission=permission, view_menu=view_menu)
.first()
)
try:
# 为用户所属组织架构都添加该pv
if pv and g.user and g.user.org:
roles = session.query(self.role_model).all() # 获取所有角色,自动在相应角色下面添加pv
if roles:
for role in roles:
if role.name in g.user.org:
# 为pvm-role表中添加记录
pv_role = session.execute(select([assoc_permissionview_role.c.id]).where(assoc_permissionview_role.c.permission_view_id==pv.id)
.where(assoc_permissionview_role.c.role_id==role.id)
.limit(1)
).fetchall()
if not pv_role:
session.execute(assoc_permissionview_role.insert().values(
permission_view_id=pv.id, role_id=role.id
)
)
except Exception as e:
logging.error(e)
@classmethod
def get_join_projects_id(self,session):
from myapp.models.model_team import Project_User
if g.user:
projects_id = session.query(Project_User.project_id).filter(Project_User.user_id == User.get_user_id()).all()
projects_id = [project_id[0] for project_id in projects_id]
return projects_id
else:
return []
@classmethod
def get_create_pipeline_ids(self,session):
from myapp.models.model_job import Pipeline
if g.user:
pipeline_ids = session.query(Pipeline.id).filter(Pipeline.created_by_fk == User.get_user_id()).all()
pipeline_ids = [pipeline_id[0] for pipeline_id in pipeline_ids]
return pipeline_ids
else:
return []
```
#### File: myapp/tasks/async_task.py
```python
import sys,os
import numpy as np
from bs4 import BeautifulSoup
import requests,base64,hashlib
from collections import namedtuple
import datetime
from email.utils import make_msgid, parseaddr
import logging
import time,json
from urllib.error import URLError
import urllib.request
import pysnooper
import re
import croniter
from dateutil.tz import tzlocal
import shutil
import os,sys,io,json,datetime,time
import subprocess
from datetime import datetime, timedelta
import os
import sys
import time
import datetime
from myapp.utils.py.py_k8s import K8s
from myapp.utils.celery import session_scope
from myapp.project import push_message,push_admin
from myapp.tasks.celery_app import celery_app
# Myapp framework imports
from myapp import app, db, security_manager
from myapp.models.model_job import (
Pipeline,
RunHistory,
Workflow,
Tfjob,
Pytorchjob,
Xgbjob,
Task
)
from myapp.models.model_notebook import Notebook
from myapp.security import (
MyUser
)
from myapp.views.view_pipeline import run_pipeline,dag_to_pipeline
from sqlalchemy.exc import InvalidRequestError,OperationalError
from sqlalchemy import or_
from myapp.models.model_docker import Docker
conf = app.config
@celery_app.task(name="task.check_docker_commit", bind=True) # , soft_time_limit=15
def check_docker_commit(task,docker_id): # 在页面中测试时会自定接收者和id
with session_scope(nullpool=True) as dbsession:
try:
docker = dbsession.query(Docker).filter_by(id=int(docker_id)).first()
pod_name = "docker-commit-%s-%s" % (docker.created_by.username, str(docker.id))
namespace = conf.get('NOTEBOOK_NAMESPACE')
k8s_client = K8s(conf.get('CLUSTERS').get(conf.get('ENVIRONMENT')).get('KUBECONFIG'))
begin_time=datetime.datetime.now()
now_time=datetime.datetime.now()
while((now_time-begin_time).seconds<1800): # 也就是最多commit push 30分钟
time.sleep(12000)
commit_pods = k8s_client.get_pods(namespace=namespace,pod_name=pod_name)
if commit_pods:
commit_pod=commit_pods[0]
if commit_pod['status']=='Succeeded':
docker.last_image=docker.target_image
dbsession.commit()
break
# 其他异常状态直接报警
if commit_pod['status']!='Running':
push_message(conf.get('ADMIN_USER').split(','),'commit pod %s not running'%commit_pod['name'])
break
else:
break
except Exception as e:
print(e)
```
#### File: myapp/tools/watch_tfjob.py
```python
import time,datetime,logging,os,sys
import asyncio
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from kubernetes import watch
from os import path
import json
import requests
from myapp.utils.py.py_k8s import check_status_time
from sqlalchemy.exc import InvalidRequestError,OperationalError
import pysnooper
import myapp
import math
from myapp import app, db, security_manager
from myapp.models.model_job import (
Tfjob,
Task
)
from myapp.utils.celery import session_scope
from myapp.project import push_admin,push_message
from myapp.models.model_job import Pipeline,Workflow
from myapp.models.model_katib import Hyperparameter_Tuning,Experiments
import pymysql
conf=app.config
from myapp.utils.py.py_prometheus import Prometheus
prometheus = Prometheus(conf.get('PROMETHEUS',''))
cluster=os.getenv('ENVIRONMENT','').lower()
if not cluster:
print('no cluster %s'%cluster)
exit(1)
else:
clusters = conf.get('CLUSTERS',{})
if clusters and cluster in clusters:
kubeconfig = clusters[cluster]['KUBECONFIG']
k8s_config.kube_config.load_kube_config(config_file=kubeconfig)
else:
print('no kubeconfig in cluster %s' % cluster)
exit(1)
# 推送微信消息
# @pysnooper.snoop()
def deliver_message(tfjob):
if not tfjob:
return
receivers = tfjob.username.split(',')
receivers = [receiver.strip() for receiver in receivers]
if not receivers:
print('no receivers')
return
info_json = json.loads(tfjob.info_json)
# print(info_json,experiments.status)
if tfjob.status in info_json['alert_status'] and tfjob.status not in info_json['has_push']:
receivers=list(set(receivers))
# data = {
# "Sender": sender,
# "Rcptto":receivers,
# }
workflow_name = info_json.get('workflow_name','')
hp_name = info_json.get('hp_name', '')
if workflow_name:
message = "tfjob: %s \nworkflow: %s \nnamespace: %s\nstatus: %s \ntime: %s" % (tfjob.name,workflow_name,tfjob.namespace,tfjob.status,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
elif hp_name:
message = "tfjob: %s \nhp: %s(%s) \nnamespace: %s\nstatus: %s \ntime: %s" % (tfjob.name,info_json.get('hp_name',''),info_json.get('describe',''),tfjob.namespace,tfjob.status,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
else:
message = "tfjob: %s \nnamespace: %s\nstatus: %s \ntime: %s" % (tfjob.name,tfjob.namespace,tfjob.status,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
if message:
push_message(receivers,message)
# @pysnooper.snoop()
def check_has_push(crd,dbsession):
# 可能是workflow启动的或者是hp启动的
workflow_name = crd['labels'].get('workflow-name','')
hp_name = crd['labels'].get('hp-name', '')
username = crd['username']
alert_status = ''
# 如果是从workflow中创建的
if workflow_name:
pipeline = dbsession.query(Pipeline).filter_by(name=workflow_name).first()
if pipeline and pipeline.alert_status:
alert_status = pipeline.alert_status
print("tf %s from workflow_name %s,user %s,status %s" % (crd['name'],workflow_name,crd['username'],crd['status']))
if hp_name:
hp = dbsession.query(Hyperparameter_Tuning).filter_by(name=hp_name).first()
if hp and hp.alert_status:
alert_status = hp.alert_status
print("tf %s from hp %s,user %s,status %s" % (crd['name'], workflow_name, crd['username'], crd['status']))
# print("%s status %s"%(crd['name'], crd['status']))
alert_status='Pending' # 这里写死,就是相当于必须且仅Pending告警
info_json={
"workflow_name":workflow_name,
"hp_name":hp_name,
"alert_status": alert_status,
"has_push":''
}
# print(crd['name'],crd['namespace'])
tfjob = dbsession.query(Tfjob).filter(Tfjob.name==crd['name']).filter(Tfjob.namespace==crd['namespace']).first()
if tfjob:
print('exist tfjob')
if tfjob.info_json:
exist_info_json = json.loads(tfjob.info_json)
info_json['has_push']=exist_info_json.get('has_push','')
tfjob.create_time = crd['create_time']
tfjob.status = crd['status']
tfjob.annotations = json.dumps(crd['annotations'],indent=4,ensure_ascii=False)
tfjob.labels = json.dumps(crd['labels'],indent=4,ensure_ascii=False)
tfjob.spec = json.dumps(crd['spec'],indent=4,ensure_ascii=False),
tfjob.status_more = json.dumps(crd['status_more'],indent=4,ensure_ascii=False)
tfjob.username = crd['username']
tfjob.info_json = json.dumps(info_json,indent=4,ensure_ascii=False)
dbsession.commit()
if crd['status'] in info_json['alert_status'] and crd['status'] not in info_json['has_push']:
return False,tfjob
else:
return True,tfjob
else:
print('new tfjob')
# crd['status_more']={}
# crd['spec']={}
tfjob = Tfjob(name=crd['name'],namespace=crd['namespace'],create_time=crd['create_time'],
status=crd['status'],
annotations=json.dumps(crd['annotations'],indent=4,ensure_ascii=False),
labels=json.dumps(crd['labels'],indent=4,ensure_ascii=False),
spec=json.dumps(crd['spec'],indent=4,ensure_ascii=False),
status_more=json.dumps(crd['status_more'],indent=4,ensure_ascii=False),
username=username,
info_json=json.dumps(info_json,indent=4,ensure_ascii=False))
dbsession.add(tfjob)
dbsession.commit()
return False,tfjob
#
# # 推送修改通知
# @pysnooper.snoop()
# def push_resource_rec(task,dbsession):
# task_monitorings = json.loads(task.monitoring).get('tfjob',[])
# if len(task_monitorings)>9:
# max_cpu = 0
# max_memory=0
# init_message = 'pipeline(%s)中分布式训练%s,推荐资源如下,自行修改:\n' % (task.pipeline.describe,task.label)
# message = init_message
# # tfjob_src_mem=re.match(task.args.match("memory": "32G",))
# for task_monitoring in task_monitorings:
# if float(task_monitoring.get('cpu',0))>max_cpu:
# max_cpu = float(task_monitoring.get('cpu',0))
# if float(task_monitoring.get('memory', 0)) > max_memory:
# max_memory = float(task_monitoring.get('memory', 0))
# if max_cpu:
# rec_cpu = math.ceil(max_cpu*1.4)
# if rec_cpu>150:
# rec_cpu=150
# if rec_cpu!=int(task.resource_cpu):
# message += "task(%s),原申请cpu:%s,近10次最大使用cpu:%s,建议申请值:%s\n" % (task.label,task.resource_cpu, max_cpu, rec_cpu)
# task.resource_cpu = str(rec_cpu)
# if max_memory:
# rec_memory = math.ceil(max_memory*1.4)
# if rec_memory>350:
# rec_memory=350
# if rec_memory!=int(task.resource_memory.replace('G','').replace('M','')):
# message += "task(%s),原申请mem:%s,近10次最大使用mem:%s(G),建议申请值:%s\n" % (task.label,task.resource_memory, max_memory, str(rec_memory)+"G")
# task.resource_memory = str(rec_memory)+"G"
#
# dbsession.commit()
# if message!=init_message:
# push_message([task.pipeline.created_by.username],message)
# @pysnooper.snoop()
def save_monitoring(tfjob,dbsession):
try:
if tfjob.status=='Succeeded':
task_id = json.loads(tfjob.labels).get('task-id','')
if task_id:
task = dbsession.query(Task).filter_by(id=int(task_id)).first()
metrics = prometheus.get_resource_metric(tfjob.name, namespace='pipeline')
monitoring = json.loads(task.monitoring) if task and task.monitoring else {}
tfjob_monitoring = monitoring.get('tfjob', [])
if metrics:
tfjob_monitoring.append({
"cpu": metrics.get('cpu', ''),
"memory": metrics.get('memory', ''),
"name": tfjob.name,
"update_time": datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
})
# 清理监控记录
tfjob_monitoring_new = []
for metric in tfjob_monitoring:
# 采集结果不对的,和采集结果太久远的都清理掉
if float(metric.get('cpu', 0)) > 0.1 and float(metric.get('memory', 0)) > 0.1 and metric['update_time'] > (datetime.datetime.now() - datetime.timedelta(days=30)).strftime('%Y-%m-%d %H:%M:%S'):
tfjob_monitoring_new.append(metric)
if len(tfjob_monitoring_new) > 10:
del tfjob_monitoring_new[0]
monitoring_new = {}
monitoring_new['task'] = monitoring.get('task', [])
monitoring_new['tfjob'] = tfjob_monitoring_new
print(monitoring_new)
if task:
task.monitoring = json.dumps(monitoring_new,ensure_ascii=False,indent=4)
dbsession.commit()
# print(pods)
# push_resource_rec(task, dbsession)
except Exception as e:
print(e)
# @pysnooper.snoop()
def save_history(tfjob,dbsession):
info_json = json.loads(tfjob.info_json)
if info_json['has_push']:
if not tfjob.status in info_json['has_push']:
info_json['has_push'] += ',' + tfjob.status
else:
info_json['has_push'] = tfjob.status
tfjob.info_json = json.dumps(info_json, indent=4, ensure_ascii=False)
dbsession.commit()
# @pysnooper.snoop()
def check_crd_exist(group,version,namespace,plural,name):
client = k8s_client.CustomObjectsApi()
exist_crd = client.get_namespaced_custom_object(group,version,namespace,plural,name)
return exist_crd
@pysnooper.snoop()
def deal_event(event,crd_info,namespace):
with session_scope(nullpool=True) as dbsession:
try:
crd_object = event['object']
exist_crd = check_crd_exist(group=crd_info['group'], version=crd_info["version"], namespace=namespace,
plural=crd_info["plural"], name=crd_object['metadata']['name'])
if not exist_crd:
print('not exist')
return
status = ''
if 'status' in crd_object:
if 'conditions' in crd_object['status']:
if len(crd_object['status']['conditions']) > 0:
if 'type' in crd_object['status']['conditions'][-1]:
status = crd_object['status']['conditions'][-1]['type']
creat_time = crd_object['metadata']['creationTimestamp'].replace('T', ' ').replace('Z', '')
creat_time = (datetime.datetime.strptime(creat_time, '%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=8)).strftime('%Y-%m-%d %H:%M:%S')
back_object = {
'username': '',
"name": crd_object['metadata']['name'],
"namespace": crd_object['metadata']['namespace'] if 'namespace' in crd_object['metadata'] else '',
"annotations": crd_object['metadata'].get('annotations', {}),
"labels": crd_object['metadata'].get('labels', {}),
"spec": crd_object['spec'],
"create_time": creat_time,
"status": status,
"status_more": check_status_time(crd_object['status']) if 'status' in crd_object else {}
}
if 'run-rtx' in back_object['labels']:
back_object['username'] = back_object['labels']['run-rtx']
elif 'upload-rtx' in back_object:
back_object['username'] = back_object['labels']['upload-rtx']
has_push, crd_model = check_has_push(back_object,dbsession)
if not has_push:
try:
deliver_message(crd_model)
except Exception as e1:
print('push fail:', e1)
push_admin(str(e1))
save_history(crd_model,dbsession)
save_monitoring(crd_model,dbsession)
except Exception as e:
print(e)
@pysnooper.snoop()
def listen_crd():
crd_info = conf.get('CRD_INFO')['tfjob']
namespace = conf.get('PIPELINE_NAMESPACE')
w = watch.Watch()
print('begin listen')
while(True):
try:
for event in w.stream(k8s_client.CustomObjectsApi().list_namespaced_custom_object, group=crd_info['group'],
version=crd_info['version'],
namespace=namespace, plural=crd_info['plural'], pretty='true'):
if event['type']=='ADDED' or event['type']=='MODIFIED': # ADDED MODIFIED DELETED
deal_event(event,crd_info,namespace)
elif event['type']=='ERROR':
w = watch.Watch()
time.sleep(60)
except Exception as ee:
print(ee)
# 不能使用异步io,因为stream会阻塞
if __name__=='__main__':
listen_crd()
```
#### File: myapp/views/route.py
```python
from myapp import (
app,
appbuilder,
conf,
db,
event_logger,
get_feature_flags,
is_feature_enabled,
results_backend,
security_manager,
)
config = app.config
@app.route("/health")
def health():
return "OK"
@app.route("/healthcheck")
def healthcheck():
return "OK"
@app.route("/ping")
def ping():
return "OK"
```
#### File: myapp/views/view_katib.py
```python
from flask import render_template,redirect
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder import ModelView, ModelRestApi
from flask_appbuilder import ModelView,AppBuilder,expose,BaseView,has_access
from importlib import reload
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
# 将model添加成视图,并控制在前端的显示
import uuid
from myapp.models.model_katib import Hyperparameter_Tuning
from myapp.models.model_job import Repository
from flask_appbuilder.actions import action
from flask_appbuilder.models.sqla.filters import FilterEqualFunction, FilterStartsWith,FilterEqual,FilterNotEqual
from wtforms.validators import EqualTo,Length
from flask_babel import lazy_gettext,gettext
from flask_appbuilder.security.decorators import has_access
from flask_appbuilder.forms import GeneralModelConverter
from myapp.utils import core
from myapp import app, appbuilder,db,event_logger
from wtforms.ext.sqlalchemy.fields import QuerySelectField
import os,sys
from wtforms.validators import DataRequired, Length, NumberRange, Optional,Regexp
from sqlalchemy import and_, or_, select
from myapp.exceptions import MyappException
from wtforms import BooleanField, IntegerField, SelectField, StringField,FloatField,DateField,DateTimeField,SelectMultipleField,FormField,FieldList
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget,BS3PasswordFieldWidget,DatePickerWidget,DateTimePickerWidget,Select2ManyWidget,Select2Widget
from myapp.forms import MyBS3TextAreaFieldWidget,MySelect2Widget,MyCodeArea,MyLineSeparatedListField,MyJSONField,MyBS3TextFieldWidget,MyCommaSeparatedListField,MySelectMultipleField
from myapp.views.view_team import Project_Filter
from myapp.utils.py import py_k8s
from flask_wtf.file import FileField
import shlex
import re,copy
from flask import (
current_app,
abort,
flash,
g,
Markup,
make_response,
redirect,
render_template,
request,
send_from_directory,
Response,
url_for,
)
from .baseApi import (
MyappModelRestApi
)
from myapp import security_manager
from werkzeug.datastructures import FileStorage
from .base import (
api,
BaseMyappView,
check_ownership,
data_payload_response,
DeleteMixin,
generate_download_headers,
get_error_msg,
get_user_roles,
handle_api_exception,
json_error_response,
json_success,
MyappFilter,
MyappModelView,
)
from flask_appbuilder import CompactCRUDMixin, expose
import pysnooper,datetime,time,json
from kubernetes.client import V1ObjectMeta
import kubeflow.katib as kc
from kubeflow.katib import constants
from kubeflow.katib import utils
from kubeflow.katib import V1alpha3AlgorithmSetting
from kubeflow.katib import V1alpha3AlgorithmSetting
from kubeflow.katib import V1alpha3AlgorithmSpec
from kubeflow.katib import V1alpha3CollectorSpec
from kubeflow.katib import V1alpha3EarlyStoppingSetting
from kubeflow.katib import V1alpha3EarlyStoppingSpec
from kubeflow.katib import V1alpha3Experiment
from kubeflow.katib import V1alpha3ExperimentCondition
from kubeflow.katib import V1alpha3ExperimentList
from kubeflow.katib import V1alpha3ExperimentSpec
from kubeflow.katib import V1alpha3ExperimentStatus
from kubeflow.katib import V1alpha3FeasibleSpace
from kubeflow.katib import V1alpha3FileSystemPath
from kubeflow.katib import V1alpha3FilterSpec
from kubeflow.katib import V1alpha3GoTemplate
from kubeflow.katib import V1alpha3GraphConfig
from kubeflow.katib import V1alpha3Metric
from kubeflow.katib import V1alpha3MetricsCollectorSpec
from kubeflow.katib import V1alpha3NasConfig
from kubeflow.katib import V1alpha3ObjectiveSpec
from kubeflow.katib import V1alpha3Observation
from kubeflow.katib import V1alpha3Operation
from kubeflow.katib import V1alpha3OptimalTrial
from kubeflow.katib import V1alpha3ParameterAssignment
from kubeflow.katib import V1alpha3ParameterSpec
from kubeflow.katib import V1alpha3SourceSpec
from kubeflow.katib import V1alpha3Suggestion
from kubeflow.katib import V1alpha3SuggestionCondition
from kubeflow.katib import V1alpha3SuggestionList
from kubeflow.katib import V1alpha3SuggestionSpec
from kubeflow.katib import V1alpha3SuggestionStatus
from kubeflow.katib import V1alpha3TemplateSpec
from kubeflow.katib import V1alpha3Trial
from kubeflow.katib import V1alpha3TrialAssignment
from kubeflow.katib import V1alpha3TrialCondition
from kubeflow.katib import V1alpha3TrialList
from kubeflow.katib import V1alpha3TrialSpec
from kubeflow.katib import V1alpha3TrialStatus
from kubeflow.katib import V1alpha3TrialTemplate
conf = app.config
class HP_Filter(MyappFilter):
# @pysnooper.snoop()
def apply(self, query, func):
user_roles = [role.name.lower() for role in list(self.get_user_roles())]
if "admin" in user_roles:
return query.order_by(self.model.id.desc())
join_projects_id = security_manager.get_join_projects_id(db.session)
# public_project_id =
# logging.info(join_projects_id)
return query.filter(
or_(
self.model.project_id.in_(join_projects_id),
# self.model.project.name.in_(['public'])
)
).order_by(self.model.id.desc())
# 定义数据库视图
class Hyperparameter_Tuning_ModelView_Base():
datamodel = SQLAInterface(Hyperparameter_Tuning)
conv = GeneralModelConverter(datamodel)
label_title='超参搜索'
check_redirect_list_url = '/hyperparameter_tuning_modelview/list/'
help_url = conf.get('HELP_URL', {}).get(datamodel.obj.__tablename__, '') if datamodel else ''
base_permissions = ['can_add', 'can_edit', 'can_delete', 'can_list', 'can_show'] # 默认为这些
base_order = ('id', 'desc')
base_filters = [["id", HP_Filter, lambda: []]] # 设置权限过滤器
order_columns = ['id']
list_columns = ['project','name_url','describe','job_type','creator','run_url','modified']
show_columns = ['created_by','changed_by','created_on','changed_on','job_type','name','namespace','describe',
'parallel_trial_count','max_trial_count','max_failed_trial_count','objective_type',
'objective_goal','objective_metric_name','objective_additional_metric_names','algorithm_name',
'algorithm_setting','parameters_html','trial_spec_html','experiment_html']
add_form_query_rel_fields = {
"project": [["name", Project_Filter, 'org']]
}
edit_form_query_rel_fields = add_form_query_rel_fields
edit_form_extra_fields={}
edit_form_extra_fields["alert_status"] = MySelectMultipleField(
label=_(datamodel.obj.lab('alert_status')),
widget=Select2ManyWidget(),
choices=[[x, x] for x in
['Pending', 'Running', 'Succeeded', 'Failed', 'Unknown', 'Waiting', 'Terminated']],
description="选择通知状态",
)
edit_form_extra_fields['name'] = StringField(
_(datamodel.obj.lab('name')),
description='英文名(字母、数字、- 组成),最长50个字符',
widget=BS3TextFieldWidget(),
validators=[DataRequired(), Regexp("^[a-z][a-z0-9\-]*[a-z0-9]$"), Length(1, 54)]
)
edit_form_extra_fields['describe'] = StringField(
_(datamodel.obj.lab('describe')),
description='中文描述',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
edit_form_extra_fields['namespace'] = StringField(
_(datamodel.obj.lab('namespace')),
description='运行命名空间',
widget=BS3TextFieldWidget(),
default=datamodel.obj.namespace.default.arg,
validators=[DataRequired()]
)
edit_form_extra_fields['parallel_trial_count'] = IntegerField(
_(datamodel.obj.lab('parallel_trial_count')),
default=datamodel.obj.parallel_trial_count.default.arg,
description='可并行的计算实例数目',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
edit_form_extra_fields['max_trial_count'] = IntegerField(
_(datamodel.obj.lab('max_trial_count')),
default=datamodel.obj.max_trial_count.default.arg,
description='最大并行的计算实例数目',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
edit_form_extra_fields['max_failed_trial_count'] = IntegerField(
_(datamodel.obj.lab('max_failed_trial_count')),
default=datamodel.obj.max_failed_trial_count.default.arg,
description='最大失败的计算实例数目',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
edit_form_extra_fields['objective_type'] = SelectField(
_(datamodel.obj.lab('objective_type')),
default=datamodel.obj.objective_type.default.arg,
description='目标函数类型(和自己代码中对应)',
widget=Select2Widget(),
choices=[['maximize', 'maximize'], ['minimize', 'minimize']],
validators=[DataRequired()]
)
edit_form_extra_fields['objective_goal'] = FloatField(
_(datamodel.obj.lab('objective_goal')),
default=datamodel.obj.objective_goal.default.arg,
description='目标门限',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
edit_form_extra_fields['objective_metric_name'] = StringField(
_(datamodel.obj.lab('objective_metric_name')),
default=datamodel.obj.objective_metric_name.default.arg,
description='目标函数(和自己代码中对应)',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
edit_form_extra_fields['objective_additional_metric_names'] = StringField(
_(datamodel.obj.lab('objective_additional_metric_names')),
default=datamodel.obj.objective_additional_metric_names.default.arg,
description='其他目标函数(和自己代码中对应)',
widget=BS3TextFieldWidget()
)
algorithm_name_choices = ['grid', 'random', 'hyperband', 'bayesianoptimization']
algorithm_name_choices = [[algorithm_name_choice, algorithm_name_choice] for algorithm_name_choice in
algorithm_name_choices]
edit_form_extra_fields['algorithm_name'] = SelectField(
_(datamodel.obj.lab('algorithm_name')),
default=datamodel.obj.algorithm_name.default.arg,
description='搜索算法',
widget=Select2Widget(),
choices=algorithm_name_choices,
validators=[DataRequired()]
)
edit_form_extra_fields['algorithm_setting'] = StringField(
_(datamodel.obj.lab('algorithm_setting')),
default=datamodel.obj.algorithm_setting.default.arg,
widget=BS3TextFieldWidget(),
description='搜索算法配置'
)
edit_form_extra_fields['parameters_demo'] = StringField(
_(datamodel.obj.lab('parameters_demo')),
description='搜索参数示例,标准json格式,注意:所有整型、浮点型都写成字符串型',
widget=MyCodeArea(code=core.hp_parameters_demo()),
)
edit_form_extra_fields['parameters'] = StringField(
_(datamodel.obj.lab('parameters')),
default=datamodel.obj.parameters.default.arg,
description='搜索参数,注意:所有整型、浮点型都写成字符串型',
widget=MyBS3TextAreaFieldWidget(rows=10),
validators=[DataRequired()]
)
edit_form_extra_fields['node_selector'] = StringField(
_(datamodel.obj.lab('node_selector')),
description="部署task所在的机器(目前无需填写)",
widget=BS3TextFieldWidget()
)
edit_form_extra_fields['working_dir'] = StringField(
_(datamodel.obj.lab('working_dir')),
description="工作目录,如果为空,则使用Dockerfile中定义的workingdir",
widget=BS3TextFieldWidget()
)
edit_form_extra_fields['image_pull_policy'] = SelectField(
_(datamodel.obj.lab('image_pull_policy')),
description="镜像拉取策略(always为总是拉取远程镜像,IfNotPresent为若本地存在则使用本地镜像)",
widget=Select2Widget(),
choices=[['Always', 'Always'], ['IfNotPresent', 'IfNotPresent']]
)
edit_form_extra_fields['volume_mount'] = StringField(
_(datamodel.obj.lab('volume_mount')),
description='外部挂载,格式:$pvc_name1(pvc):/$container_path1,$pvc_name2(pvc):/$container_path2',
widget=BS3TextFieldWidget()
)
edit_form_extra_fields['resource_memory'] = StringField(
_(datamodel.obj.lab('resource_memory')),
default=datamodel.obj.resource_memory.default.arg,
description='内存的资源使用限制(每个测试实例),示例:1G,20G',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
edit_form_extra_fields['resource_cpu'] = StringField(
_(datamodel.obj.lab('resource_cpu')),
default=datamodel.obj.resource_cpu.default.arg,
description='cpu的资源使用限制(每个测试实例)(单位:核),示例:2', widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
# @pysnooper.snoop()
def set_column(self, hp=None):
# 对编辑进行处理
request_data = request.args.to_dict()
job_type = request_data.get('job_type', '')
if hp:
job_type = hp.job_type
job_type_choices = ['','TFJob','XGBoostJob','PyTorchJob','Job']
job_type_choices = [[job_type_choice,job_type_choice] for job_type_choice in job_type_choices]
if hp:
self.edit_form_extra_fields['job_type'] = SelectField(
_(self.datamodel.obj.lab('job_type')),
description="超参搜索的任务类型",
choices=job_type_choices,
widget=MySelect2Widget(extra_classes="readonly",value=job_type),
validators=[DataRequired()]
)
else:
self.edit_form_extra_fields['job_type'] = SelectField(
_(self.datamodel.obj.lab('job_type')),
description="超参搜索的任务类型",
widget=MySelect2Widget(new_web=True,value=job_type),
choices=job_type_choices,
validators=[DataRequired()]
)
self.edit_form_extra_fields['tf_worker_num'] = IntegerField(
_(self.datamodel.obj.lab('tf_worker_num')),
default=json.loads(hp.job_json).get('tf_worker_num',3) if hp and hp.job_json else 3,
description='工作节点数目',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
self.edit_form_extra_fields['tf_worker_image'] = StringField(
_(self.datamodel.obj.lab('tf_worker_image')),
default=json.loads(hp.job_json).get('tf_worker_image',conf.get('KATIB_TFJOB_DEFAULT_IMAGE','')) if hp and hp.job_json else conf.get('KATIB_TFJOB_DEFAULT_IMAGE',''),
description='工作节点镜像',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
self.edit_form_extra_fields['tf_worker_command'] = StringField(
_(self.datamodel.obj.lab('tf_worker_command')),
default=json.loads(hp.job_json).get('tf_worker_command','python xx.py') if hp and hp.job_json else 'python xx.py',
description='工作节点启动命令',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
self.edit_form_extra_fields['job_worker_image'] = StringField(
_(self.datamodel.obj.lab('job_worker_image')),
default=json.loads(hp.job_json).get('job_worker_image',conf.get('KATIB_JOB_DEFAULT_IMAGE','')) if hp and hp.job_json else conf.get('KATIB_JOB_DEFAULT_IMAGE',''),
description='工作节点镜像',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
self.edit_form_extra_fields['job_worker_command'] = StringField(
_(self.datamodel.obj.lab('job_worker_command')),
default=json.loads(hp.job_json).get('job_worker_command','python xx.py') if hp and hp.job_json else 'python xx.py',
description='工作节点启动命令',
widget=MyBS3TextAreaFieldWidget(),
validators=[DataRequired()]
)
self.edit_form_extra_fields['pytorch_worker_num'] = IntegerField(
_(self.datamodel.obj.lab('pytorch_worker_num')),
default=json.loads(hp.job_json).get('pytorch_worker_num', 3) if hp and hp.job_json else 3,
description='工作节点数目',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
self.edit_form_extra_fields['pytorch_worker_image'] = StringField(
_(self.datamodel.obj.lab('pytorch_worker_image')),
default=json.loads(hp.job_json).get('pytorch_worker_image',conf.get('KATIB_PYTORCHJOB_DEFAULT_IMAGE','')) if hp and hp.job_json else conf.get('KATIB_PYTORCHJOB_DEFAULT_IMAGE',''),
description='工作节点镜像',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
self.edit_form_extra_fields['pytorch_master_command'] = StringField(
_(self.datamodel.obj.lab('pytorch_master_command')),
default=json.loads(hp.job_json).get('pytorch_master_command',
'python xx.py') if hp and hp.job_json else 'python xx.py',
description='master节点启动命令',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
self.edit_form_extra_fields['pytorch_worker_command'] = StringField(
_(self.datamodel.obj.lab('pytorch_worker_command')),
default=json.loads(hp.job_json).get('pytorch_worker_command',
'python xx.py') if hp and hp.job_json else 'python xx.py',
description='工作节点启动命令',
widget=BS3TextFieldWidget(),
validators=[DataRequired()]
)
self.edit_columns = ['job_type','project','name','namespace','describe','parallel_trial_count','max_trial_count','max_failed_trial_count',
'objective_type','objective_goal','objective_metric_name','objective_additional_metric_names',
'algorithm_name','algorithm_setting','parameters_demo',
'parameters']
self.edit_fieldsets=[(
lazy_gettext('common'),
{"fields": copy.deepcopy(self.edit_columns), "expanded": True},
)]
if job_type=='TFJob':
group_columns = ['tf_worker_num','tf_worker_image','tf_worker_command']
self.edit_fieldsets.append((
lazy_gettext(job_type),
{"fields":group_columns, "expanded": True},
)
)
for column in group_columns:
self.edit_columns.append(column)
if job_type=='Job':
group_columns = ['job_worker_image','job_worker_command']
self.edit_fieldsets.append((
lazy_gettext(job_type),
{"fields":group_columns, "expanded": True},
)
)
for column in group_columns:
self.edit_columns.append(column)
if job_type=='PyTorchJob':
group_columns = ['pytorch_worker_num','pytorch_worker_image','pytorch_master_command','pytorch_worker_command']
self.edit_fieldsets.append((
lazy_gettext(job_type),
{"fields":group_columns, "expanded": True},
)
)
for column in group_columns:
self.edit_columns.append(column)
if job_type=='XGBoostJob':
group_columns = ['pytorchjob_worker_image','pytorchjob_worker_command']
self.edit_fieldsets.append((
lazy_gettext(job_type),
{"fields":group_columns, "expanded": True},
)
)
for column in group_columns:
self.edit_columns.append(column)
task_column=['working_dir','volume_mount','node_selector','image_pull_policy','resource_memory','resource_cpu']
self.edit_fieldsets.append((
lazy_gettext('task args'),
{"fields": task_column, "expanded": True},
))
for column in task_column:
self.edit_columns.append(column)
self.edit_fieldsets.append((
lazy_gettext('run experiment'),
{"fields": ['alert_status'], "expanded": True},
))
self.edit_columns.append('alert_status')
self.add_form_extra_fields = self.edit_form_extra_fields
self.add_fieldsets = self.edit_fieldsets
self.add_columns=self.edit_columns
# 处理form请求
def process_form(self, form, is_created):
# from flask_appbuilder.forms import DynamicForm
if 'parameters_demo' in form._fields:
del form._fields['parameters_demo'] # 不处理这个字段
# 生成实验
# @pysnooper.snoop()
def make_experiment(self,item):
# 搜索算法相关
algorithmsettings = []
for setting in item.algorithm_setting.strip().split(','):
setting = setting.strip()
if setting:
key,value = setting.split('=')[0].strip(),setting.split('=')[1].strip()
algorithmsettings.append(V1alpha3AlgorithmSetting(name=key,value=value))
algorithm = V1alpha3AlgorithmSpec(
algorithm_name=item.algorithm_name,
algorithm_settings=algorithmsettings if algorithmsettings else None
)
# 实验结果度量,很多中搜集方式,这里不应该写死这个。
metrics_collector_spec=None
if item.job_type=='TFJob':
collector = V1alpha3CollectorSpec(kind="TensorFlowEvent")
source = V1alpha3SourceSpec(V1alpha3FileSystemPath(kind="Directory", path="/train"))
metrics_collector_spec = V1alpha3MetricsCollectorSpec(
collector=collector,
source=source)
elif item.job_type=='Job':
pass
# 目标函数
objective = V1alpha3ObjectiveSpec(
goal=item.objective_goal,
objective_metric_name=item.objective_metric_name,
type=item.objective_type)
# 搜索参数
parameters=[]
hp_parameters = json.loads(item.parameters)
for parameter in hp_parameters:
if hp_parameters[parameter]['type']=='int' or hp_parameters[parameter]['type']=='double':
feasible_space = V1alpha3FeasibleSpace(
min=str(hp_parameters[parameter]['min']),
max=str(hp_parameters[parameter]['max']),
step = str(hp_parameters[parameter].get('step','')) if hp_parameters[parameter].get('step','') else None)
parameters.append(V1alpha3ParameterSpec(
feasible_space=feasible_space,
name=parameter,
parameter_type=hp_parameters[parameter]['type']
))
elif hp_parameters[parameter]['type']=='categorical':
feasible_space = V1alpha3FeasibleSpace(list=hp_parameters[parameter]['list'])
parameters.append(V1alpha3ParameterSpec(
feasible_space=feasible_space,
name=parameter,
parameter_type=hp_parameters[parameter]['type']
))
# 实验模板
go_template = V1alpha3GoTemplate(
raw_template=item.trial_spec
)
trial_template = V1alpha3TrialTemplate(go_template=go_template)
labels = {
"run-rtx":g.user.username,
"hp-name":item.name,
# "hp-describe": item.describe
}
# Experiment 跑实例测试
experiment = V1alpha3Experiment(
api_version= conf.get('CRD_INFO')['experiment']['group']+"/"+ conf.get('CRD_INFO')['experiment']['version'] ,#"kubeflow.org/v1alpha3",
kind="Experiment",
metadata=V1ObjectMeta(name=item.name+"-"+uuid.uuid4().hex[:4], namespace=conf.get('KATIB_NAMESPACE'),labels=labels),
spec=V1alpha3ExperimentSpec(
algorithm=algorithm,
max_failed_trial_count=item.max_failed_trial_count,
max_trial_count=item.max_trial_count,
metrics_collector_spec=metrics_collector_spec,
objective=objective,
parallel_trial_count=item.parallel_trial_count,
parameters=parameters,
trial_template=trial_template
)
)
item.experiment = json.dumps(experiment.to_dict(),indent=4,ensure_ascii=False)
@expose('/create_experiment/<id>',methods=['GET'])
# @pysnooper.snoop(watch_explode=('hp',))
def create_experiment(self,id):
hp = db.session.query(Hyperparameter_Tuning).filter(Hyperparameter_Tuning.id == int(id)).first()
if hp:
from myapp.utils.py.py_k8s import K8s
k8s_client = K8s(hp.project.cluster['KUBECONFIG'])
namespace = conf.get('KATIB_NAMESPACE')
crd_info =conf.get('CRD_INFO')['experiment']
print(hp.experiment)
k8s_client.create_crd(group=crd_info['group'],version=crd_info['version'],plural=crd_info['plural'],namespace=namespace,body=hp.experiment)
flash('部署完成','success')
# kclient = kc.KatibClient()
# kclient.create_experiment(hp, namespace=conf.get('KATIB_NAMESPACE'))
self.update_redirect()
return redirect(self.get_redirect())
# @pysnooper.snoop(watch_explode=())
def merge_trial_spec(self,item):
image_secrets = conf.get('HUBSECRET',[])
user_hubsecrets = db.session.query(Repository.hubsecret).filter(Repository.created_by_fk == g.user.id).all()
if user_hubsecrets:
for hubsecret in user_hubsecrets:
if hubsecret[0] not in image_secrets:
image_secrets.append(hubsecret[0])
image_secrets = [
{
"name": hubsecret
} for hubsecret in image_secrets
]
item.job_json={}
if item.job_type=='TFJob':
item.trial_spec=core.merge_tfjob_experiment_template(
worker_num=item.tf_worker_num,
node_selector=item.get_node_selector(),
volume_mount=item.volume_mount,
image=item.tf_worker_image,
image_secrets = image_secrets,
hostAliases=conf.get('HOSTALIASES',''),
workingDir=item.working_dir,
image_pull_policy=item.image_pull_policy,
resource_memory=item.resource_memory,
resource_cpu=item.resource_cpu,
command=item.tf_worker_command
)
item.job_json={
"tf_worker_num":item.tf_worker_num,
"tf_worker_image": item.tf_worker_image,
"tf_worker_command": item.tf_worker_command,
}
if item.job_type == 'Job':
item.trial_spec=core.merge_job_experiment_template(
node_selector=item.get_node_selector(),
volume_mount=item.volume_mount,
image=item.job_worker_image,
image_secrets=image_secrets,
hostAliases=conf.get('HOSTALIASES',''),
workingDir=item.working_dir,
image_pull_policy=item.image_pull_policy,
resource_memory=item.resource_memory,
resource_cpu=item.resource_cpu,
command=item.job_worker_command
)
item.job_json = {
"job_worker_image": item.job_worker_image,
"job_worker_command": item.job_worker_command,
}
if item.job_type == 'PyTorchJob':
item.trial_spec=core.merge_pytorchjob_experiment_template(
worker_num=item.pytorch_worker_num,
node_selector=item.get_node_selector(),
volume_mount=item.volume_mount,
image=item.pytorch_worker_image,
image_secrets=image_secrets,
hostAliases=conf.get('HOSTALIASES', ''),
workingDir=item.working_dir,
image_pull_policy=item.image_pull_policy,
resource_memory=item.resource_memory,
resource_cpu=item.resource_cpu,
master_command=item.pytorch_master_command,
worker_command=item.pytorch_worker_command
)
item.job_json = {
"pytorch_worker_num":item.pytorch_worker_num,
"pytorch_worker_image": item.pytorch_worker_image,
"pytorch_master_command": item.pytorch_master_command,
"pytorch_worker_command": item.pytorch_worker_command,
}
item.job_json = json.dumps(item.job_json,indent=4,ensure_ascii=False)
# 检验参数是否有效
# @pysnooper.snoop()
def validate_parameters(self,parameters,algorithm):
try:
parameters = json.loads(parameters)
for parameter_name in parameters:
parameter = parameters[parameter_name]
if parameter['type'] == 'int' and 'min' in parameter and 'max' in parameter:
parameter['min'] = int(parameter['min'])
parameter['max'] = int(parameter['max'])
if not parameter['max']>parameter['min']:
raise Exception('min must lower than max')
continue
if parameter['type'] == 'double' and 'min' in parameter and 'max' in parameter:
parameter['min'] = float(parameter['min'])
parameter['max'] = float(parameter['max'])
if not parameter['max']>parameter['min']:
raise Exception('min must lower than max')
if algorithm=='grid':
parameter['step'] = float(parameter['step'])
continue
if parameter['type']=='categorical' and 'list' in parameter and type(parameter['list'])==list:
continue
raise MyappException('parameters type must in [int,double,categorical], and min\max\step\list should exist, and min must lower than max ')
return json.dumps(parameters,indent=4,ensure_ascii=False)
except Exception as e:
print(e)
raise MyappException('parameters not valid:'+str(e))
# @pysnooper.snoop()
def pre_add(self, item):
if item.job_type is None:
raise MyappException("Job type is mandatory")
core.validate_json(item.parameters)
item.parameters = self.validate_parameters(item.parameters,item.algorithm_name)
item.resource_memory=core.check_resource_memory(item.resource_memory,self.src_item_json.get('resource_memory',None) if self.src_item_json else None)
item.resource_cpu = core.check_resource_cpu(item.resource_cpu,self.src_item_json.get('resource_cpu',None) if self.src_item_json else None)
self.merge_trial_spec(item)
self.make_experiment(item)
def pre_update(self, item):
self.pre_add(item)
pre_add_get=set_column
pre_update_get=set_column
@action(
"copy", __("Copy Hyperparameter Experiment"), confirmation=__('Copy Hyperparameter Experiment'), icon="fa-copy",multiple=True, single=False
)
def copy(self, hps):
if not isinstance(hps, list):
hps = [hps]
for hp in hps:
new_hp = hp.clone()
new_hp.name = new_hp.name+"-copy"
new_hp.describe = new_hp.describe + "-copy"
new_hp.created_on = datetime.datetime.now()
new_hp.changed_on = datetime.datetime.now()
db.session.add(new_hp)
db.session.commit()
return redirect(request.referrer)
class Hyperparameter_Tuning_ModelView(Hyperparameter_Tuning_ModelView_Base,MyappModelView):
datamodel = SQLAInterface(Hyperparameter_Tuning)
conv = GeneralModelConverter(datamodel)
# 添加视图和菜单
appbuilder.add_view(Hyperparameter_Tuning_ModelView,"katib超参搜索",icon = 'fa-shopping-basket',category = '超参搜索',category_icon = 'fa-glass')
# 添加api
class Hyperparameter_Tuning_ModelView_Api(Hyperparameter_Tuning_ModelView_Base,MyappModelRestApi):
datamodel = SQLAInterface(Hyperparameter_Tuning)
conv = GeneralModelConverter(datamodel)
route_base = '/hyperparameter_tuning_modelview/api'
list_columns = ['created_by','changed_by','created_on','changed_on','job_type','name','namespace','describe',
'parallel_trial_count','max_trial_count','max_failed_trial_count','objective_type',
'objective_goal','objective_metric_name','objective_additional_metric_names','algorithm_name',
'algorithm_setting','parameters','job_json','trial_spec','working_dir','node_selector',
'image_pull_policy','resource_memory','resource_cpu','experiment','alert_status']
add_columns = ['job_type','name','namespace','describe',
'parallel_trial_count','max_trial_count','max_failed_trial_count','objective_type',
'objective_goal','objective_metric_name','objective_additional_metric_names','algorithm_name',
'algorithm_setting','parameters','job_json','working_dir','node_selector','image_pull_policy',
'resource_memory','resource_cpu']
edit_columns = add_columns
appbuilder.add_api(Hyperparameter_Tuning_ModelView_Api)
# list正在运行的Experiments
from myapp.views.view_workflow import Crd_ModelView_Base
from myapp.models.model_katib import Experiments
class Experiments_ModelView(Crd_ModelView_Base,MyappModelView,DeleteMixin):
label_title='超参调度'
datamodel = SQLAInterface(Experiments)
list_columns = ['url','namespace_url','create_time','status','username']
crd_name = 'experiment'
appbuilder.add_view(Experiments_ModelView,"katib超参调度",icon = 'fa-tasks',category = '超参搜索')
# 添加api
class Experiments_ModelView_Api(Crd_ModelView_Base,MyappModelRestApi):
datamodel = SQLAInterface(Experiments)
route_base = '/experiments_modelview/api'
list_columns = ['url', 'namespace_url', 'create_time', 'status', 'username']
crd_name = 'experiment'
appbuilder.add_api(Experiments_ModelView_Api)
```
#### File: myapp/views/view_kfserving.py
```python
from flask import render_template,redirect
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask import Blueprint, current_app, jsonify, make_response, request
# 将model添加成视图,并控制在前端的显示
from myapp.models.model_serving import Service,KfService
from myapp.models.model_team import Project,Project_User
from myapp.utils import core
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
from flask_appbuilder.actions import action
from myapp import app, appbuilder,db,event_logger
import logging
import re
import uuid
import requests
from myapp.exceptions import MyappException
from flask_appbuilder.security.decorators import has_access
from myapp.models.model_job import Repository
from flask_wtf.file import FileAllowed, FileField, FileRequired
from werkzeug.datastructures import FileStorage
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from myapp import security_manager
import os,sys
from wtforms.validators import DataRequired, Length, NumberRange, Optional,Regexp
from wtforms import BooleanField, IntegerField, SelectField, StringField,FloatField,DateField,DateTimeField,SelectMultipleField,FormField,FieldList
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget,BS3PasswordFieldWidget,DatePickerWidget,DateTimePickerWidget,Select2ManyWidget,Select2Widget
from myapp.forms import MyBS3TextAreaFieldWidget,MySelect2Widget,MyCodeArea,MyLineSeparatedListField,MyJSONField,MyBS3TextFieldWidget,MySelectMultipleField
from myapp.utils.py import py_k8s
import os, zipfile
import shutil
from flask import (
current_app,
abort,
flash,
g,
Markup,
make_response,
redirect,
render_template,
request,
send_from_directory,
Response,
url_for,
)
from .base import (
DeleteMixin,
api,
BaseMyappView,
check_ownership,
CsvResponse,
data_payload_response,
DeleteMixin,
generate_download_headers,
get_error_msg,
get_user_roles,
handle_api_exception,
json_error_response,
json_success,
MyappFilter,
MyappModelView,
)
from sqlalchemy import and_, or_, select
from .baseApi import (
MyappModelRestApi
)
import kubernetes
from kfserving import KFServingClient
from kfserving import V1alpha2EndpointSpec
from kfserving import V1alpha2CustomSpec
from kfserving import V1alpha2PredictorSpec
from kfserving import V1alpha2TensorflowSpec
from kfserving import V1alpha2InferenceServiceSpec
from kfserving import V1alpha2InferenceService
from flask_appbuilder import CompactCRUDMixin, expose
import pysnooper,datetime,time,json
conf = app.config
class KfService_ModelView(MyappModelView):
datamodel = SQLAInterface(KfService)
crd_name = 'inferenceservice'
help_url = conf.get('HELP_URL', {}).get(datamodel.obj.__tablename__, '') if datamodel else ''
show_columns = ['name', 'label','service_type','default_service','canary_service','canary_traffic_percent','k8s_yaml']
add_columns = ['name', 'label', 'service_type','default_service','canary_service','canary_traffic_percent']
list_columns = ['label_url','host','service','deploy','status','roll']
edit_columns = add_columns
base_order = ('id','desc')
order_columns = ['id']
@expose('/deploy1/<kfservice_id>',methods=['POST',"GET"])
def deploy1(self,kfservice_id):
mykfservice = db.session.query(KfService).filter_by(id=kfservice_id).first()
from myapp.utils.py.py_k8s import K8s
k8s = K8s(mykfservice.project.cluster['KUBECONFIG'])
namespace = conf.get('KFSERVING_NAMESPACE')
crd_info = conf.get('CRD_INFO')['inferenceservice']
crd_list = k8s.get_crd(group=crd_info['group'], version=crd_info['version'], plural=crd_info['plural'],
namespace=namespace)
for crd_obj in crd_list:
if crd_obj['name'] == mykfservice.name:
k8s.delete_crd(group=crd_info['group'], version=crd_info['version'], plural=crd_info['plural'],
namespace=namespace, name=mykfservice.name)
def get_env(env_str):
if not env_str:
return []
envs = re.split('\r|\n', env_str)
envs = [env.split('=') for env in envs if env and len(env.split('=')) == 2]
return envs
def get_kfjson(service,mykfservice):
if not service:
return None
image_secrets = conf.get('HUBSECRET', [])
user_hubsecrets = db.session.query(Repository.hubsecret).filter(Repository.created_by_fk == g.user.id).all()
if user_hubsecrets:
for hubsecret in user_hubsecrets:
if hubsecret[0] not in image_secrets:
image_secrets.append(hubsecret[0])
kfjson={
"minReplicas": service.min_replicas,
"maxReplicas": service.max_replicas,
"custom": {
"affinity": {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "gpu" if core.get_gpu(service.resource_gpu)[0] else "cpu",
"operator": "In",
"values": [
"true"
]
},
]
}
]
}
},
},
"imagePullSecrets": [{"name":hubsecret} for hubsecret in image_secrets],
"container": {
"image": service.images,
"imagePullPolicy": 'Always',
"name": mykfservice.name+"-"+service.name,
"workingDir": service.working_dir if service.working_dir else None,
"command": ["sh", "-c",service.command] if service.command else None,
"resources": {
"requests": {
"cpu": service.resource_cpu,
"memory": service.resource_memory
}
},
"env":[{"name":env[0],"value":env[1]} for env in get_env(service.env)],
# "volumeMounts": [
# {
# "mountPath": "/mnt/%s" % service.created_by.username,
# "name": "workspace",
# "subPath": service.created_by.username
# }
# ],
# "volumeDevices":[
# {
# "devicePath": "/data/home/",
# "name": "workspace"
# }
# ]
}
# "volumes": [
# {
# "name": "workspace",
# "persistentVolumeClaim": {
# "claimName": "kubeflow-user-workspace"
# }
# }
# ]
}
}
return kfjson
crd_json={
"apiVersion": "serving.kubeflow.org/v1alpha2",
"kind": "InferenceService",
"metadata": {
"labels": {
"app": mykfservice.name
},
"name": mykfservice.name,
"namespace": namespace
},
"spec": {
"canaryTrafficPercent": mykfservice.canary_traffic_percent,
"default": {
mykfservice.service_type: get_kfjson(mykfservice.default_service,mykfservice)
},
"canary": {
mykfservice.service_type: get_kfjson(mykfservice.canary_service,mykfservice),
} if mykfservice.canary_service else None,
}
}
import yaml
ya = yaml.load(json.dumps(crd_json))
ya_str = yaml.safe_dump(ya, default_flow_style=False)
logging.info(ya_str)
crd_objects = k8s.create_crd(group=crd_info['group'],version=crd_info['version'],plural=crd_info['plural'],namespace=namespace,body=crd_json)
flash(category='warning',message='部署启动,一分钟后部署完成')
return redirect('/kfservice_modelview/list/')
# 创建kfserving
@expose('/deploy/<kfservice_id>', methods=['POST', "GET"])
def deploy(self, kfservice_id):
mykfservice = db.session.query(KfService).filter_by(id=kfservice_id).first()
namespace = conf.get('KFSERVING_NAMESPACE')
crd_info = conf.get('CRD_INFO')['inferenceservice']
# 根据service生成container
def make_container(service,mykfservice):
from myapp.utils.py.py_k8s import K8s
k8s = K8s() # 不部署,不需要配置集群信息
container = k8s.make_container(name=mykfservice.name + "-" + service.name,
command=["sh", "-c",service.command] if service.command else None,
args=None,
volume_mount=None,
image_pull_policy='Always',
image=service.images,
working_dir=service.working_dir if service.working_dir else None,
env=service.env,
resource_memory=service.resource_memory,
resource_cpu = service.resource_cpu,
resource_gpu= service.resource_gpu,
username = service.created_by.username
)
return container
api_version = crd_info['group'] + '/' + crd_info['version']
default_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
min_replicas= mykfservice.default_service.min_replicas,
max_replicas=mykfservice.default_service.max_replicas,
custom=V1alpha2CustomSpec(
container=make_container(mykfservice.default_service,mykfservice)
)
)
) if mykfservice.default_service else None
canary_endpoint_spec = V1alpha2EndpointSpec(
predictor= V1alpha2PredictorSpec(
min_replicas=mykfservice.canary_service.min_replicas,
max_replicas=mykfservice.canary_service.max_replicas,
custom=V1alpha2CustomSpec(
container=make_container(mykfservice.canary_service,mykfservice)
)
)
) if mykfservice.canary_service else None
metadata = kubernetes.client.V1ObjectMeta(
name=mykfservice.name,
labels={
"app":mykfservice.name,
"rtx-user":mykfservice.created_by.username
},
namespace=namespace
)
isvc = V1alpha2InferenceService(
api_version=api_version,
kind=crd_info['kind'],
metadata=metadata,
spec=V1alpha2InferenceServiceSpec(
default=default_endpoint_spec,
canary=canary_endpoint_spec,
canary_traffic_percent=mykfservice.canary_traffic_percent
)
)
KFServing = KFServingClient()
try:
KFServing.delete(mykfservice.name, namespace=namespace,version=crd_info['version'])
except Exception as e:
print(e)
KFServing.create(isvc,namespace=namespace,version=crd_info['version'])
flash(category='warning', message='部署启动,一分钟后部署完成')
return redirect('/kfservice_modelview/list/')
# 灰度
@expose('/roll/<kfservice_id>', methods=['POST', "GET"])
def roll(self, kfservice_id):
mykfservice = db.session.query(KfService).filter_by(id=kfservice_id).first()
namespace = conf.get('KFSERVING_NAMESPACE')
crd_info = conf.get('CRD_INFO')['inferenceservice']
# 根据service生成container
def make_container(service, mykfservice):
from myapp.utils.py.py_k8s import K8s
k8s = K8s() # 不部署,不需要配置集群信息
container = k8s.make_container(name=mykfservice.name + "-" + service.name,
command=["sh", "-c", service.command] if service.command else None,
args=None,
volume_mount=None,
image_pull_policy='Always',
image=service.images,
working_dir=service.working_dir if service.working_dir else None,
env=service.env,
resource_memory=service.resource_memory,
resource_cpu=service.resource_cpu,
resource_gpu=service.resource_gpu,
username=service.created_by.username,
ports = service.ports
)
return container
canary_endpoint_spec = V1alpha2EndpointSpec(
predictor=V1alpha2PredictorSpec(
min_replicas=mykfservice.canary_service.min_replicas,
max_replicas=mykfservice.canary_service.max_replicas,
custom=V1alpha2CustomSpec(
container=make_container(mykfservice.canary_service, mykfservice)
)
)
) if mykfservice.canary_service else None
KFServing = KFServingClient()
KFServing.rollout_canary(mykfservice.name, canary=canary_endpoint_spec, percent=mykfservice.canary_traffic_percent,
namespace=namespace, timeout_seconds=120,version=crd_info['version'])
flash(category='warning', message='滚动升级已配置,刷新查看当前流量比例')
return redirect('/kfservice_modelview/list/')
# 基础批量删除
# @pysnooper.snoop()
def base_muldelete(self,items):
if not items:
abort(404)
for item in items:
try:
k8s_client = py_k8s.K8s(item.project.cluster['KUBECONFIG'])
crd_info = conf.get("CRD_INFO", {}).get(self.crd_name, {})
if crd_info:
k8s_client.delete_crd(group=crd_info['group'],version=crd_info['version'],plural=crd_info['plural'],namespace=conf.get('KFSERVING_NAMESPACE'),name=item.name)
except Exception as e:
flash(str(e), "danger")
def pre_delete(self,item):
self.base_muldelete([item])
# @event_logger.log_this
# @expose("/delete/<pk>")
# @has_access
# def delete(self, pk):
# pk = self._deserialize_pk_if_composite(pk)
# self.base_delete(pk)
# url = url_for(f"{self.endpoint}.list")
# return redirect(url)
appbuilder.add_view(KfService_ModelView,"kfserving",icon = 'fa-tasks',category = '服务化')
```
#### File: myapp/views/view_runhistory.py
```python
from flask import render_template,redirect
from flask_appbuilder.models.sqla.interface import SQLAInterface
# 将model添加成视图,并控制在前端的显示
from myapp.models.model_job import Repository,Images,Job_Template,Task,Pipeline,Workflow,Tfjob,Xgbjob,RunHistory,Pytorchjob
from myapp import app, appbuilder,db,event_logger
from wtforms import BooleanField, IntegerField,StringField, SelectField,FloatField,DateField,DateTimeField,SelectMultipleField,FormField,FieldList
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget,BS3PasswordFieldWidget,DatePickerWidget,DateTimePickerWidget,Select2ManyWidget,Select2Widget,BS3TextAreaFieldWidget
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
from sqlalchemy import and_, or_, select
from .baseApi import (
MyappModelRestApi
)
from myapp import security_manager
import kfp # 使用自定义的就要把pip安装的删除了
from werkzeug.datastructures import FileStorage
from .base import (
api,
BaseMyappView,
check_ownership,
data_payload_response,
DeleteMixin,
generate_download_headers,
get_error_msg,
get_user_roles,
handle_api_exception,
json_error_response,
json_success,
MyappFilter,
MyappModelView,
)
from flask_appbuilder import CompactCRUDMixin, expose
import pysnooper,datetime,time,json
conf = app.config
logging = app.logger
class RunHistory_Filter(MyappFilter):
# @pysnooper.snoop()
def apply(self, query, func):
user_roles = [role.name.lower() for role in list(self.get_user_roles())]
if "admin" in user_roles:
return query
pipeline_ids = security_manager.get_create_pipeline_ids(db.session)
return query.filter(
or_(
self.model.pipeline_id.in_(pipeline_ids),
# self.model.project.name.in_(['public'])
)
)
class RunHistory_ModelView_Base():
label_title='定时调度历史'
datamodel = SQLAInterface(RunHistory)
base_order = ('id', 'desc')
order_columns = ['id']
list_columns = ['pipeline_url','creator','created_on','execution_date','status_url','log','history']
edit_columns = ['status']
base_filters = [["id", RunHistory_Filter, lambda: []]] # 设置权限过滤器
add_form_extra_fields = {
"status": SelectField(
_(datamodel.obj.lab('status')),
description="状态comed为已识别未提交,created为已提交",
widget=Select2Widget(),
choices=[['comed', 'comed'], ['created', 'created']]
),
}
edit_form_extra_fields = add_form_extra_fields
class RunHistory_ModelView(RunHistory_ModelView_Base,MyappModelView,DeleteMixin):
datamodel = SQLAInterface(RunHistory)
appbuilder.add_view(RunHistory_ModelView,"定时调度记录",icon = 'fa-clock-o',category = '训练')
# 添加api
class RunHistory_ModelView_Api(RunHistory_ModelView_Base,MyappModelRestApi):
datamodel = SQLAInterface(RunHistory)
route_base = '/runhistory_modelview/api'
appbuilder.add_api(RunHistory_ModelView_Api)
``` |
{
"source": "jolnanis/motus",
"score": 3
} |
#### File: motus/motus/dictools.py
```python
from abc import ABC, abstractmethod
import gc
import mimetypes
import pkgutil
import yaml
class FileHandlingException(Exception):
"""Exception raised during opening a dic file for Read/Write operations"""
class ParsingException(FileHandlingException):
"""Exception raised durin parsing"""
class NonAplhaWordException(ParsingException):
"""Raised when a word is not composed of A-Z letters"""
class ConfigFileException(Exception):
"""Exception raised when Substitution file is poorly formed"""
class DicStateException(Exception):
"""Exception raised when encountering a"""
def new_dic(*args, **kwargs):
if 'dic_path' in list(kwargs) or args != ():
rd = Reader()
rd.config(*args, **kwargs)
return rd
else:
return Dic()
class Dic:
"""Toolbox class used to manipulate, clean and save word sets. \n
Words are cleaned automatically upon addition to the set.
"""
def __init__(self):
self.content = dict()
self._state_flag = True
self._words_flag = True
@property
def state(self):
if self._state_flag or self._state is None:
if self.content == {} or self.content == []:
self._state = 'empty'
elif isinstance(self.content, list):
self._state = 'list of words'
elif isinstance(list(self.content)[0], str):
self._state = 'various-initials dict'
elif isinstance(self.content, dict):
self._state = 'various-lengths dict'
else:
raise Exception('Internal Dictionnary corrupted')
self._state_flag = False
return self._state
@property
def words(self):
if self._words_flag or self._words is None:
content = self.content
if self.state == 'various-lengths dict':
self._words = [
word for length in list(content)
for initial in list(content[length])
for word in content[length][initial]
]
elif self.state == 'various-initials dict':
self._words = [
word for initial in list(content)
for word in content[initial]
]
elif self.state == 'list of words':
self._words = content
elif self.state == 'empty':
self._words = []
self._words_flag = False
return self._words
def _insert(self, word, container, state):
self._state_flag = True
if state == 'empty':
return [word]
elif state == 'list of words':
if len(word) == len(container[0]):
if container == [] or word[0] == container[0][0]:
container.append(word)
return container
else:
return {word[0]: [word], container[0][0]: container}
else:
return {
len(word): {word[0]: [word]},
len(container[0]): {container[0][0]: container},
}
elif state == 'various-initials dict':
one_word = container[list(container)[0]][0]
if len(word) == len(one_word):
if word[0] in list(container):
container[word[0]].append(word)
return container
else:
container[word[0]] = [word]
return container
else:
return {
len(word): {word[0]: [word]},
len(one_word): container
}
elif state == 'various-lengths dict':
if len(word) in list(container):
container[len(word)] = self._insert(
word,
container[len(word)],
'various-initials dict',
)
return container
else:
container[len(word)] = {word[0]: [word]}
return container
def insert(self, word):
if word is None:
return
try:
self.content = self._insert(word, self.content, self.state)
except KeyError:
err_str = ('Problem when adding an entry to the dic. '
'Internal state corrupted')
raise DicStateException(err_str)
class FileHandler(ABC):
""" Parent abstract class for dictools.Reader and dictools.Writer
"""
if mimetypes.guess_type('example.yml')[0] is None:
mimetypes.add_type('application/x-yaml', '.yml')
mimetypes.add_type('application/x-yaml', '.yaml')
@abstractmethod
def __init__(self):
self.dic_path = None
self.filetype = None
self._infer_flag = True
def config(self, dic_path, filetype):
if dic_path is not None:
self.dic_path = dic_path
self._infer_flag = True
if filetype is not None:
self.filetype = filetype
@property
def inferred_filetype(self):
if self._infer_flag or self._inferred_filetype is not None:
if self.dic_path is not None:
self._inferred_filetype = mimetypes.guess_type(self.dic_path)[0]
self._infer_flag = False
return self._inferred_filetype
class Reader(FileHandler):
def __init__(self, *args, **kwargs):
super().__init__()
self.pckg = None
self._substitutions = {}
self.config(*args, **kwargs)
def config(self, dic_path=None, filetype=None, config=None, pckg=None):
"""Takes up to 4 positional keyword arguments :\n
`dic_path`: str, path to the dictionary\n
`filetype`: str, accepts `text/plain` and `application/x-yaml`\n
`config`: str, path to a config file\n
`pckg`: bool, `True` if the dic should be retrieved from the package
"""
if pckg is not None:
self.pckg = pckg
if config is not None:
self._config_substitutions(config)
super().config(dic_path, filetype)
def add_substitution(self, old: str, new: str):
print(self._substitutions)
self._substitutions[old.upper()] = new.upper()
def _config_substitutions(self, config_file):
self._substitutions = dict()
with open(config_file, 'r') as file:
i = 0
for line in file:
i = i+1
line = line.strip()
if line == '' or line[0] == '#':
continue
try:
old, new = line.split(' ')
self.add_substitution(old, new)
except ValueError as e:
raise ConfigFileException(
f'Could not parse line {i} of file {self.dic_path}:\n'
f'{line}'
) from e
def parse(self, dic_path=None, filetype=None, pckg=None):
if (dic_path or filetype or pckg) is not None:
self.config(dic_path=dic_path, filetype=filetype, pckg=pckg)
if 'dic_path' not in dir(self) or self.dic_path is None:
raise FileHandlingException('No Target file')
if (('filetype' not in dir(self) or self.filetype is None)
and self.inferred_filetype is None):
raise FileHandlingException(f'Could not infer a file type for'
f'{self.file_path}')
if ((self.filetype and self.inferred_filetype) is not None
and self.filetype != self.inferred_filetype):
err_str = (f'Incompatible filetypes: {self.filetype}, '
f'{self.inferred_filetype}')
raise FileHandlingException(err_str)
return self._get_parser(self.filetype or self.inferred_filetype)
def _get_parser(self, filetype):
if filetype == 'text/plain':
return self._txt_parser(self.dic_path)
elif filetype == 'application/x-yaml':
return self._yaml_parser(self.dic_path)
else:
raise FileHandlingException('Unknown filetype: {filetype}')
def _txt_parser(self, path):
d = new_dic()
for line in open(path, 'r'):
d.insert(self.clean(line))
return d
def _yaml_parser(self, path):
d = new_dic()
if self.pckg:
data = pkgutil.get_data('motus.dic', self.dic_path)
d.content = yaml.safe_load(data.decode())
del data
gc.collect()
else:
with open(path, 'r') as file:
d.content = yaml.safe_load(file)
if d.content is None:
d.content = {}
return d
def clean(self, line):
word = line.strip().upper()
for old, new in self._substitutions.items():
word = word.replace(old, new)
if word == '':
return None
if (ord(min(word)) > 64 and ord(max(word)) < 90):
return word
else:
unauth = [char for char in word if ord(char) < 64 or ord(char) > 90]
if len(unauth) == 1:
error_str = f'Unauthorized character {unauth[0]} in word {word}'
else:
error_str = f'Unauthorized characters {unauth} in word {word}'
raise NonAplhaWordException(error_str)
class Writer(FileHandler):
def __init__(self, *args, **kwargs):
super().__init__()
self.config(*args, **kwargs)
def config(self, dic_path=None, filetype=None):
""" Takes up to 2 positionnal keywoard arguments :\n
`dic_path`: str, path to the dictionary being written\n
`filetype`: str, accepts `text/plain` and `application/x-yaml`
"""
super().config(dic_path, filetype)
def write(self, dic, dic_path=None, filetype=None):
if (dic_path or filetype is not None):
self.config(dic_path, filetype)
if 'dic_path' not in dir(self) or self.dic_path is None:
raise FileHandlingException('No Destination file')
if (('filetype' not in dir(self) or self.filetype is None)
and self.inferred_filetype is None):
raise FileHandlingException(f'Could not infer a file type for'
f'{self.dic_path}')
if ((self.filetype and self.inferred_filetype) is not None
and self.filetype != self.inferred_filetype):
err_str = (f'Incompatible filetypes: {self.filetype}, '
f'{self.inferred_filetype}')
raise FileHandlingException(err_str)
return self._get_writer(self.filetype or self.inferred_filetype)
@classmethod
def _get_writer(cls, filetype):
if filetype == 'text/plain':
return cls._txt_writer
elif filetype == 'application/x-yaml':
return cls._yaml_writer
else:
raise FileHandlingException('Unknown filetype: {filetype}')
@classmethod
def _txt_writer(cls, dic, path):
try:
file = open(path, 'w')
open_flag = True
for word in dic.words:
file.write(word + '\n')
file.close()
open_flag = False
except Exception:
if open_flag:
file.close()
raise
@classmethod
def _yaml_writer(cls, dic, path):
try:
file = open(path, 'w')
open_flag = True
yaml.dump(dic.content, file, default_flow_style=False)
file.close()
open_flag = False
except Exception:
if open_flag:
file.close()
raise
```
#### File: motus/tests/test_dictools_file_handler.py
```python
import io
import os
import unittest
from unittest import mock
import tempfile
from motus import dictools
from motus.dictools import FileHandler, Reader, Writer
DIC = 'motus.dictools.Dic'
FILE_HANDLER = 'motus.dictools.FileHandler'
READER = 'motus.dictools.Reader'
WRITER = 'motus.dictools.Writer'
class DictoolsTestReaderInstanciation(unittest.TestCase):
def _test_reader_instanciation(self, *args, **kwargs):
"""`expected`: expected mock.call for Reader.config()"""
if 'expected' in list(kwargs):
expected = kwargs.pop('expected')
else:
expected = None
with mock.patch(f'{FILE_HANDLER}.__init__') as fd_mock:
with mock.patch(f'{READER}.config') as cfg_mock:
rd = Reader(*args, **kwargs)
self.assertIsInstance(rd, FileHandler)
self.assertIsInstance(rd, Reader)
self.assertEqual(rd._substitutions, dict())
fd_mock.assert_called_once_with()
cfg_mock.assert_called_once()
if expected is not None:
self.assertEqual(cfg_mock.call_args, expected)
def test_reader_instanciation_1(self):
self._test_reader_instanciation()
def test_reader_instanciation_2(self):
self._test_reader_instanciation(
'path_to_dic',
expected=mock.call('path_to_dic'),
)
def test_reader_instanciation_3(self):
self._test_reader_instanciation(
'path_to_dic',
'filetype',
expected=mock.call('path_to_dic', 'filetype'),
)
def test_reader_instanciation_4(self):
self._test_reader_instanciation(
'path/to/dic.yml',
'application/x-yaml',
'config.txt',
False,
expected=(
mock.call('path/to/dic.yml', 'application/x-yaml',
'config.txt', False,)
)
)
def test_reader_instanciation_5(self):
self._test_reader_instanciation(
config='cfg.txt',
dic_path='path/to/dic.txt',
expected=(
mock.call(config='cfg.txt',
dic_path='path/to/dic.txt')
)
)
EMPTY_LIST = []
EMPTY_LIST_TXT = ['\n']
EMPTY_LIST_YML = ['\n']
LIST_OF_WORDS = ['ASPIC', 'APRES', 'ARRET', 'ACTIF', 'ANNEE']
LIST_OF_WORDS_TXT = [
'ASPIC\n',
'APRES\n',
'ARRET\n',
'ACTIF\n',
'ANNEE\n',
]
LIST_OF_WORDS_YML = [
'- ASPIC\n',
'- APRES\n',
'- ARRET\n',
'- ACTIF\n',
'- ANNEE\n',
]
DICT_OF_SAME_LENGTH = {
'A': ['ASPIC', 'ANNEE'],
'B': ['BELLE', 'BIERE', 'BUTTE'],
'C': ['COUPS'],
}
DICT_OF_SAME_LENGTH_TXT = [
'ASPIC\n',
'ANNEE\n',
'BELLE\n',
'BIERE\n',
'BUTTE\n',
'COUPS\n',
]
DICT_OF_SAME_LENGTH_YML = [
'A:\n',
'- ASPIC\n',
'- ANNEE\n',
'B:\n',
'- BELLE\n',
'- BIERE\n',
'- BUTTE\n',
'C:\n',
'- COUPS\n',
]
FULL_DIC = {
5: {
'A': ['ASPIC', 'ANNEE'],
'B': ['BELLE', 'BIERE', 'BUTTE'],
'C': ['COUPS'],
},
6: {
'M': ['MOUCHE'],
'S': ['SAUCES'],
},
}
FULL_DIC_TXT = [
'ASPIC\n',
'ANNEE\n',
'BELLE\n',
'BIERE\n',
'BUTTE\n',
'COUPS\n',
'MOUCHE\n',
'SAUCES\n',
]
FULL_DIC_YML = [
'5:\n',
' A:\n',
' - ASPIC\n',
' - ANNEE\n',
' B:\n',
' - BELLE\n',
' - BIERE\n',
' - BUTTE\n',
' C:\n',
' - COUPS\n',
'6:\n',
' M:\n',
' - MOUCHE\n',
' S:\n',
' - SAUCES\n',
]
FAKE_FULL_DIC = {
3: ['ART', 'ANS', 'AIR'],
5: ['ASPIC', 'APRES', 'ARRET'],
}
FAKE_FULL_DIC_TXT = [
'ART\n',
'ANS\n',
'AIR\n',
'ASPIC\n',
'APRES\n',
'ARRET\n',
]
FAKE_FULL_DIC_YML = [
'3:\n',
'- ART\n',
'- ANS\n',
'- AIR\n',
'5:\n',
'- ASPIC\n',
'- APRES\n',
'- ARRET\n',
]
class DictoolsTestReaderBasic(unittest.TestCase):
def setUp(self):
self.rd = Reader()
class DictoolsTestReaderConfig(DictoolsTestReaderBasic):
def _test_reader_config(self, *args, **kwargs):
"""`expected`: expected mock.call for Reader.config()"""
if 'expected_sub' in list(kwargs):
expected_sub = kwargs.pop('expected_sub')
else:
expected_sub = None
if 'expected_super' in list(kwargs):
expected_super = kwargs.pop('expected_super')
else:
expected_super = None
if 'pckg' in list(kwargs):
pckg = kwargs.pop('pckg')
elif len(args) >= 4:
pckg = args[3]
else:
pckg = None
with mock.patch(f'{READER}._config_substitutions') as sub_mock:
with mock.patch(f'{FILE_HANDLER}.config') as super_mock:
self.rd.config(*args, **kwargs)
super_mock.assert_called_once()
if pckg is not None:
self.assertEqual(self.rd.pckg, pckg)
if expected_sub is not None:
sub_mock.assert_called_once()
self.assertEqual(sub_mock.call_args, expected_sub)
if expected_super is not None:
self.assertEqual(super_mock.call_args, expected_super)
def test_reader_config_1(self):
self._test_reader_config()
def test_reader_config_2(self):
self._test_reader_config(
'path_to_dic',
expected_super=mock.call('path_to_dic', None),
)
def test_reader_config_3(self):
self._test_reader_config(
'path_to_dic',
'filetype',
expected_super=mock.call('path_to_dic', 'filetype'),
)
def test_reader_config_4(self):
self._test_reader_config(
'path/to/dic.yml',
'application/x-yaml',
'config.txt',
False,
expected_super=(
mock.call('path/to/dic.yml', 'application/x-yaml')
),
expected_sub=(
mock.call('config.txt')
),
)
def test_reader_config_5(self):
self._test_reader_config(
config='cfg.txt',
dic_path='path/to/dic.txt',
expected_super=(
mock.call('path/to/dic.txt', None)
),
expected_sub=(
mock.call('cfg.txt')
),
)
class DictoolsTestReaderAddSubstitution(DictoolsTestReaderBasic):
def setUp(self):
super().setUp()
self.assertEqual(self.rd._substitutions, dict())
def test_add_substitions_to_empty(self):
self.rd.add_substitution('é', 'e')
self.assertEqual(self.rd._substitutions, {'É': 'E'})
def test_add_substitions_to_existing(self):
self.rd._substitutions = {'É': 'E'}
self.rd.add_substitution('Ê', 'E')
self.assertEqual(self.rd._substitutions,
{'É': 'E', 'Ê': 'E'})
def test_add_substitions_modify(self):
self.rd._substitutions = {'$': 'Z'}
self.rd.add_substitution('$', 'S')
self.assertEqual(self.rd._substitutions,
{'$': 'S'})
CONFIG_SAMPLE = io.StringIO(
"# Comment"
"\n"
"é e\n"
"À a\n"
"$ S \n"
" ï I\n"
"W vv\n"
)
BROKEN_CONFIG_SAMPLE = io.StringIO(
"é e\n"
"À a\n"
"é\n"
)
class DictoolsTestReader_ConfigSubstitution(unittest.TestCase):
def setUp(self):
self.rd = Reader()
self.rd.dic_path = 'example.yml'
self.assertEqual(self.rd._substitutions, dict())
@mock.patch(f'{READER}.add_substitution')
@mock.patch('builtins.open')
def test__config_substitions(self, open_mock, add_mock):
open_mock.return_value = CONFIG_SAMPLE
self.rd._config_substitutions('cfg.txt')
calls = [
mock.call('é', 'e'),
mock.call('À', 'a'),
mock.call('$', 'S'),
mock.call('ï', 'I'),
mock.call('W', 'vv'),
]
add_mock.assert_called
self.assertEqual(add_mock.call_args_list, calls)
@mock.patch(f'{READER}.add_substitution')
@mock.patch('builtins.open')
def test_add_substitions_broken(self, open_mock, add_mock):
open_mock.return_value = BROKEN_CONFIG_SAMPLE
with self.assertRaises(dictools.ConfigFileException):
self.rd._config_substitutions('cfg.txt')
calls = [
mock.call('é', 'e'),
mock.call('À', 'a'),
]
self.assertEqual(add_mock.call_args_list, calls)
class DictoolsTestReaderParse(DictoolsTestReaderBasic):
def test_parse_no_path(self):
with self.assertRaises(dictools.FileHandlingException):
self.rd.parse()
def test_parse_no_filetype(self):
self.rd.dic_path = 'example.json'
with self.assertRaises(dictools.FileHandlingException):
self.rd.parse()
def test_parse_filetype_different_than_inferred(self):
self.rd.dic_path = 'example.txt'
self.rd.filetype = 'application/x-yaml'
with self.assertRaises(dictools.FileHandlingException):
self.rd.parse()
def test_parse_working(self):
self.rd.dic_path = 'example.txt'
self.rd.filetype = 'text/plain'
with mock.patch('motus.dictools.Reader._get_parser') as mock_get:
self.rd.parse()
mock_get.assert_called_once_with('text/plain')
def test_parse_working_2(self):
self.rd.dic_path = 'example.yml'
with mock.patch('motus.dictools.Reader._get_parser') as mock_get:
self.rd.parse()
mock_get.assert_called_once_with('application/x-yaml')
class DictoolsTestReader_GetParser(DictoolsTestReaderBasic):
def setUp(self):
super().setUp()
self.rd.dic_path = "example"
def test__get_parser_txt(self):
with mock.patch('motus.dictools.Reader._txt_parser') as mock_txt:
self.rd._get_parser('text/plain'),
mock_txt.assert_called_once_with(self.rd.dic_path)
def test__get_parser_yaml(self):
with mock.patch('motus.dictools.Reader._yaml_parser') as mock_yml:
self.rd._get_parser('application/x-yaml'),
mock_yml.assert_called_once_with(self.rd.dic_path)
def test__get_parser_error(self):
with self.assertRaises(dictools.FileHandlingException):
self.rd._get_parser('application/json')
# Not really unit tests but the parser involves too many method calls
# for unit testing to be convenient.
class DictoolsTestReader_TxtParser(DictoolsTestReaderBasic):
def _test__txt_parser(self, file, expected):
with mock.patch('builtins.open') as mock_open:
mock_open.return_value = file
d = self.rd._txt_parser('example.path')
self.assertEqual(d.content, expected)
def _test__yaml_parser(self, file, expected):
with mock.patch('builtins.open') as mock_open:
mock_open.return_value = file
d = self.rd._yaml_parser('example.path')
self.assertEqual(d.content, expected)
# TXT
def test__txt_parser_empty(self):
self._test__txt_parser(
io.StringIO(''.join(EMPTY_LIST_TXT)),
{}
)
def test__txt_parser_list(self):
self._test__txt_parser(
io.StringIO(''.join(LIST_OF_WORDS_TXT)),
LIST_OF_WORDS
)
def test__txt_parser_same_length(self):
self._test__txt_parser(
io.StringIO(''.join(DICT_OF_SAME_LENGTH_TXT)),
DICT_OF_SAME_LENGTH
)
def test__txt_parser_full_dic(self):
self._test__txt_parser(
io.StringIO(''.join(FULL_DIC_TXT)),
FULL_DIC
)
def test__txt_parser_fake_full_dic(self):
# The fake Dic is not valid, txt_parser returns only a valid Dic
# so the values won't be equal
with self.assertRaises(AssertionError):
self._test__txt_parser(
io.StringIO(''.join(FAKE_FULL_DIC_TXT)),
FAKE_FULL_DIC
)
# YAML
def test__yaml_parser_empty(self):
self._test__yaml_parser(
io.StringIO(''.join(EMPTY_LIST_YML)),
{}
)
def test__yaml_parser_list(self):
self._test__yaml_parser(
io.StringIO(''.join(LIST_OF_WORDS_YML)),
LIST_OF_WORDS
)
def test__yaml_parser_same_length(self):
self._test__yaml_parser(
io.StringIO(''.join(DICT_OF_SAME_LENGTH_YML)),
DICT_OF_SAME_LENGTH
)
def test__yaml_parser_full_dic(self):
self._test__yaml_parser(
io.StringIO(''.join(FULL_DIC_YML)),
FULL_DIC
)
def test__yaml_parser_fake_full_dic(self):
# The yaml parser doens't check the validity of the Dic, maybe an
# option should be provided
self._test__yaml_parser(
io.StringIO(''.join(FAKE_FULL_DIC_YML)),
FAKE_FULL_DIC
)
class DictoolsTestReaderClean(DictoolsTestReaderBasic):
def _test_clean(self, substitutions, word, expected):
self.rd._substitutions = substitutions
self.assertEqual(self.rd.clean(word), expected)
def test_clean_no_subs(self):
self._test_clean({}, 'APRICOT \n', 'APRICOT')
self._test_clean({}, ' pear\n', 'PEAR')
self._test_clean({}, ' BanAna ', 'BANANA')
def test_clean_subs(self):
self._test_clean({'À': 'A'}, 'àpricot \n', 'APRICOT')
self._test_clean({'Ö': 'O'}, ' pear\n', 'PEAR')
self._test_clean({'A': 'E'}, ' BanAna ', 'BENENE')
def test_clean_fails(self):
with self.assertRaises(dictools.NonAplhaWordException):
self._test_clean({'Ö': 'O'}, 'àpricot \n', 'APRICOT')
with self.assertRaises(dictools.NonAplhaWordException):
self._test_clean({'Ö': 'O'}, 'àpricoté \n', 'APRICOT')
class DictoolsTestWriterInstanciation(unittest.TestCase):
def _test_writer_instanciation(self, *args, **kwargs):
"""`expected`: expected mock.call for Writer.config()"""
if 'expected' in list(kwargs):
expected = kwargs.pop('expected')
else:
expected = None
with mock.patch(f'{FILE_HANDLER}.__init__') as fd_mock:
with mock.patch(f'{WRITER}.config') as cfg_mock:
rd = Writer(*args, **kwargs)
self.assertIsInstance(rd, FileHandler)
self.assertIsInstance(rd, Writer)
fd_mock.assert_called_once_with()
cfg_mock.assert_called_once()
if expected is not None:
self.assertEqual(cfg_mock.call_args, expected)
def test_writer_instanciation_1(self):
self._test_writer_instanciation(
expected=mock.call()
)
def test_writer_instanciation_2(self):
self._test_writer_instanciation(
'path_to_dic',
expected=mock.call('path_to_dic'),
)
def test_writer_instanciation_3(self):
self._test_writer_instanciation(
'path_to_dic',
'filetype',
expected=mock.call('path_to_dic', 'filetype'),
)
def test_writer_instanciation_4(self):
self._test_writer_instanciation(
'path/to/dic.txt',
filetype='text/plain',
expected=(
mock.call('path/to/dic.txt',
filetype='text/plain',)
)
)
class DictoolsTestWriterBasic(unittest.TestCase):
def setUp(self):
self.wt = Writer()
class DictoolsTestWriterConfig(DictoolsTestWriterBasic):
def _test_writer_config(self, *args, **kwargs):
expected = kwargs.pop('expected')
"""`expected`: expected mock.call for Writer.config()"""
with mock.patch(f'{FILE_HANDLER}.config') as super_mock:
self.wt.config(*args, **kwargs)
super_mock.assert_called_once()
self.assertEqual(super_mock.call_args, expected)
def test_writer_config_1(self):
self._test_writer_config(
expected=mock.call(None, None)
)
def test_writer_config_2(self):
self._test_writer_config(
'path_to_dic',
expected=mock.call('path_to_dic', None),
)
def test_writer_config_3(self):
self._test_writer_config(
'path_to_dic',
'filetype',
expected=mock.call('path_to_dic', 'filetype'),
)
def test_writer_config_4(self):
self._test_writer_config(
'path/to/dic.txt',
filetype='text/plain',
expected=(
mock.call('path/to/dic.txt',
'text/plain',)
)
)
class DictoolsTestWriterParse(DictoolsTestWriterBasic):
def setUp(self):
super().setUp()
self.dic = dictools.Dic()
def test_write_no_path(self):
with self.assertRaises(dictools.FileHandlingException):
self.wt.write(self.dic)
def test_write_no_filetype(self):
self.wt.dic_path = 'example'
with self.assertRaises(dictools.FileHandlingException):
self.wt.write(self.dic)
def test_write_filetype_different_than_inferred(self):
self.wt.dic_path = 'example.txt'
self.wt.filetype = 'application/x-yaml'
with self.assertRaises(dictools.FileHandlingException):
self.wt.write(self.dic)
def test_write_working(self):
self.wt.dic_path = 'example.txt'
self.wt.filetype = 'text/plain'
with mock.patch('motus.dictools.Writer._get_writer') as mock_get:
self.wt.write(self.dic)
mock_get.assert_called_once_with('text/plain')
def test_write_working_2(self):
self.wt.dic_path = 'example.yml'
with mock.patch('motus.dictools.Writer._get_writer') as mock_get:
self.wt.write(self.dic)
mock_get.assert_called_once_with('application/x-yaml')
class DictoolsTestWriter_GetParser(DictoolsTestWriterBasic):
def test__get_writer_txt(self):
self.assertEqual(
self.wt._get_writer('text/plain'),
self.wt._txt_writer
)
def test__get_writer_yaml(self):
self.assertEqual(
self.wt._get_writer('application/x-yaml'),
self.wt._yaml_writer
)
def test__get_writer_error(self):
with self.assertRaises(dictools.FileHandlingException):
self.wt._get_writer('application/json')
class TestDictoolsWriterWrite(DictoolsTestWriterBasic):
def setUp(self):
super().setUp()
self.dic = dictools.Dic()
tmp_handler, tmp_path = tempfile.mkstemp()
self.tmp_handler = tmp_handler
self.tmp_path = tmp_path
self.tmp_file = open(tmp_path, 'r')
def tearDown(self):
self.tmp_file.close()
os.close(self.tmp_handler)
class TestDictoolsWriterWriteYaml(TestDictoolsWriterWrite):
def _test_write_yaml(self, filetype, content, expected_file):
self.dic.content = content
self.wt.config(self.tmp_path, filetype)
self.wt._yaml_writer(self.dic, self.tmp_path)
self.assertEqual(
self.tmp_file.readlines(),
expected_file,
)
def test_write_yaml_empty(self):
self._test_write_yaml(
'application/x-yaml',
EMPTY_LIST,
['[]\n']
)
def test_write_yaml_list(self):
self._test_write_yaml(
'application/x-yaml',
LIST_OF_WORDS,
LIST_OF_WORDS_YML
)
def test_write_yaml_same_length(self):
self._test_write_yaml(
'application/x-yaml',
DICT_OF_SAME_LENGTH,
DICT_OF_SAME_LENGTH_YML
)
def test_write_yaml_full_dic(self):
self._test_write_yaml(
'application/x-yaml',
FULL_DIC,
FULL_DIC_YML
)
class TestDictoolsWriterWriteTxt(TestDictoolsWriterWrite):
def _test_write_txt(self, filetype, content, expected_file):
self.dic.content = content
self.wt._txt_writer(self.dic, self.tmp_path)
self.assertEqual(
self.tmp_file.readlines(),
expected_file,
)
def test_write_txt_empty(self):
self._test_write_txt(
'text/plain',
EMPTY_LIST,
[]
)
def test_write_txt_list(self):
self._test_write_txt(
'text/plain',
LIST_OF_WORDS,
LIST_OF_WORDS_TXT
)
def test_write_txt_same_length(self):
self._test_write_txt(
'text/plain',
DICT_OF_SAME_LENGTH,
DICT_OF_SAME_LENGTH_TXT
)
def test_write_txt_full_dic(self):
self._test_write_txt(
'text/plain',
FULL_DIC,
FULL_DIC_TXT
)
if __name__ == '__main__':
unittest.main()
```
#### File: motus/tests/test_ui.py
```python
from collections import Counter
import re
import unittest
from unittest import mock
from colorama import Fore, Style
from motus.ui import UI
class UITestSelectWordlength(unittest.TestCase):
def _test_select_wordlength(self, bounds, mocked_input, expected_res):
with mock.patch('builtins.input', side_effect=mocked_input):
res = UI.select_wordlength(bounds[0], bounds[1])
self.assertEqual(res, expected_res)
def test_select_wordlength_basic(self):
self._test_select_wordlength([7, 12], ['8'], 8)
def test_select_wordlength_change_bounds(self):
self._test_select_wordlength([1, 6], ['5'], 5)
def test_select_wordlength_extended(self):
self._test_select_wordlength([7, 12], ['99', 'w', '!', ' ', '9'], 9)
def test_select_wordlength_long_input(self):
self._test_select_wordlength([7, 12],
['99', 'w', '!', ' ', '10', '11'], 10)
class UITestPrompt(unittest.TestCase):
def _test_prompt_basic(self, fun, mocked_input, expected_res):
with mock.patch('builtins.input', side_effect=mocked_input):
res = fun()
self.assertEqual(res, expected_res)
class UITestPromptGuess(UITestPrompt):
def test_prompt_guess(self):
self._test_prompt_basic(UI.prompt_guess, ['bateau'], 'BATEAU')
self._test_prompt_basic(UI.prompt_guess, [' Avion'], 'AVION')
self._test_prompt_basic(UI.prompt_guess, ['MANGER '], 'MANGER')
self._test_prompt_basic(UI.prompt_guess, ['BANG BANG'], 'BANG BANG')
self._test_prompt_basic(UI.prompt_guess, ['!'], '!')
self._test_prompt_basic(UI.prompt_guess, [''], '')
class UITestAskReplay(UITestPrompt):
def test_ask_replay_yes(self):
self._test_prompt_basic(UI.ask_replay, ['yes'], True)
self._test_prompt_basic(UI.ask_replay, ['Yes'], True)
self._test_prompt_basic(UI.ask_replay, ['yeS'], True)
self._test_prompt_basic(UI.ask_replay, ['YES'], True)
self._test_prompt_basic(UI.ask_replay, ['y'], True)
self._test_prompt_basic(UI.ask_replay, ['Y'], True)
def test_ask_replay_no(self):
self._test_prompt_basic(UI.ask_replay, ['n'], False)
self._test_prompt_basic(UI.ask_replay, ['N'], False)
self._test_prompt_basic(UI.ask_replay, ['no'], False)
self._test_prompt_basic(UI.ask_replay, ['NO'], False)
self._test_prompt_basic(UI.ask_replay, ['No'], False)
def test_ask_replay_ambiguous(self):
self._test_prompt_basic(UI.ask_replay, [''], False)
self._test_prompt_basic(UI.ask_replay, ['\n'], False)
self._test_prompt_basic(UI.ask_replay, ['$'], False)
self._test_prompt_basic(UI.ask_replay, ['maybe'], False)
class UITestInitRound(unittest.TestCase):
def _test_init_round(self, wordlength):
UI.init_round(wordlength)
self.assertIsNotNone(UI.wordlength)
self.assertEqual(UI.wordlength, wordlength)
def test_init_round(self):
self._test_init_round(12)
self._test_init_round(8)
self._test_init_round(5)
self._test_init_round(1)
class UITestDisplay(unittest.TestCase):
ALL_FORES = [Fore.__getattribute__(attr) for attr in dir(Fore)
if attr[0].isalpha()]
ALL_STYLES = [Style.__getattribute__(attr) for attr in dir(Style)
if attr[0].isalpha()]
ALL_MODS = ALL_FORES + ALL_STYLES
def _to_regex(self, lst):
return str.join('|', [re.escape(elem) for elem in lst])
def _extract_message(self, mock_print):
mock_print.assert_called_once()
name, args, kwargs = mock_print.mock_calls[0]
message = args[0]
return message
def _no_mods(self, message):
r_mods = self._to_regex(self.ALL_MODS)
return re.sub(r_mods, '', message)
def _style_only(self, message):
r_styles = self._to_regex(self.ALL_STYLES)
return str.join('', re.findall(r_styles, message))
def _fore_only(self, message):
r_fores = self._to_regex(self.ALL_FORES)
return str.join('', re.findall(r_fores, message))
def _test_display_basic(self, fun, *args, **kwargs):
if 'expected_res' in list(kwargs):
expected_res = kwargs.pop('expected_res')
else:
raise Exception("_test_display_basic needs an 'expected_res' kwarg")
with mock.patch('builtins.print') as mock_print:
fun(*args, **kwargs)
message = self._extract_message(mock_print)
self.assertEqual(message, expected_res)
class UITestDisplayFirstWord(UITestDisplay):
def _test_display_first_word(self, wordlength, first_letter):
UI.wordlength = wordlength
with mock.patch('builtins.print') as mock_print:
UI.display_first_word(first_letter)
message = self._extract_message(mock_print)
self.assertEqual(len(message), wordlength * 3)
letters = Counter(message)
self.assertEqual(message[1], first_letter)
self.assertIn(first_letter, list(letters))
self.assertEqual(letters[first_letter], 1)
self.assertIn('-', list(letters))
self.assertEqual(letters['-'], wordlength - 1)
def test_display_first_word(self):
self._test_display_first_word(3, 'A')
self._test_display_first_word(12, 'W')
self._test_display_first_word(7, 'X')
class UITest_DisplayLetter(UITestDisplay):
# /!\ Method namespace broken for legibility
def _test_printed_no_mods(self, letter, hint, expected_naked_message):
with mock.patch('builtins.print') as mock_print:
UI._display_letter(letter, hint)
message = self._extract_message(mock_print)
naked_message = self._no_mods(message)
self.assertEqual(naked_message, expected_naked_message)
# /!\ Method namespace broken for legibility
def _test_printed_style(self, letter, hint, expected_style):
with mock.patch('builtins.print') as mock_print:
UI._display_letter(letter, hint)
message = self._extract_message(mock_print)
mod_section = message[:-3]
style = self._style_only(mod_section)
self.assertEqual(len(style), len(expected_style))
self.assertEqual(style, expected_style)
# /!\ Method namespace broken for legibility
def _test_printed_fore(self, letter, hint, expected_fore):
with mock.patch('builtins.print') as mock_print:
UI._display_letter(letter, hint)
message = self._extract_message(mock_print)
mod_section = message[:-3]
fore = self._fore_only(mod_section)
self.assertEqual(len(fore), len(expected_fore))
self.assertEqual(fore, expected_fore)
# Double underscore to respect namespace
def test__display_letter_no_mods(self):
self._test_printed_no_mods('A', 'R', '[A]')
self._test_printed_no_mods('X', 'W', ' X ')
self._test_printed_no_mods('B', 'M', '(B)')
self._test_printed_no_mods('W', 'W', ' W ')
self._test_printed_no_mods('N', 'R', '[N]')
# Double underscore to respect namespace
def test__display_letter_style(self):
self._test_printed_style('A', 'R', Style.BRIGHT)
self._test_printed_style('X', 'W', '')
self._test_printed_style('B', 'M', Style.BRIGHT)
self._test_printed_style('W', 'W', '')
self._test_printed_style('N', 'R', Style.BRIGHT)
# Double underscore to respect namespace
def test__display_letter_fore(self):
self._test_printed_fore('A', 'R', Fore.RED)
self._test_printed_fore('X', 'W', '')
self._test_printed_fore('B', 'M', Fore.YELLOW)
self._test_printed_fore('W', 'W', '')
self._test_printed_fore('N', 'R', Fore.RED)
def test__display_letter_error(self):
with self.assertRaises(TypeError):
UI._display_letter('B', '')
with self.assertRaises(TypeError):
UI._display_letter('G', 'A')
class UITestDisplaySolution(UITestDisplay):
def test_display_solution_short(self):
self._test_display_basic(
UI.display_solution, 'OUI',
expected_res="Sorry, the right answer was: OUI"
)
def test_display_solution_long(self):
self._test_display_basic(
UI.display_solution, 'MOUCHERON',
expected_res="Sorry, the right answer was: MOUCHERON"
)
def test_display_solution_empty(self):
self._test_display_basic(
UI.display_solution, '',
expected_res="Sorry, the right answer was: "
)
class UITestRightGuess(UITestDisplay):
def test_right_guess_short(self):
self._test_display_basic(
UI.right_guess, 'OUI',
expected_res="Congratulations, OUI was the right answer !"
)
def test_right_guess_long(self):
self._test_display_basic(
UI.right_guess, 'MOUCHERON',
expected_res="Congratulations, MOUCHERON was the right answer !"
)
def test_right_guess_empty(self):
self._test_display_basic(
UI.right_guess, '',
expected_res="Congratulations, was the right answer !"
)
class UITestDisplayScoreSolo(UITestDisplay):
def test_display_score_solo(self):
self._test_display_basic(
UI.display_score_solo, 0, 10,
expected_res="You have 0 wins over 10 rounds."
)
self._test_display_basic(
UI.display_score_solo, 8, 8,
expected_res="You have 8 wins over 8 rounds."
)
class UITestDisplayCorrection(unittest.TestCase):
def _extract_args(self, mock_call):
args, kwargs = mock_call
return args
def _extract_args_list(self, mock_disp):
return [self._extract_args(mock_call) for mock_call
in mock_disp.call_args_list]
def _test_display_correction(self, guess, correction, expected_res):
with mock.patch('motus.ui.UI._display_letter') as mock_disp:
UI.display_correction(guess, correction)
res = self._extract_args_list(mock_disp)
self.assertEqual(res, expected_res)
def test_display_correction_correct(self):
UI.wordlength = 7
self._test_display_correction(
'CORRECT', 'RRRRRRR',
[
('C', 'R'),
('O', 'R'),
('R', 'R'),
('R', 'R'),
('E', 'R'),
('C', 'R'),
('T', 'R'),
]
)
def test_display_correction_wrong(self):
UI.wordlength = 5
self._test_display_correction(
'WRONG', 'WWWWW',
[
('W', 'W'),
('R', 'W'),
('O', 'W'),
('N', 'W'),
('G', 'W'),
]
)
def test_display_correction_incorrect(self):
UI.wordlength = 9
self._test_display_correction(
'INCORRECT', 'RRWWMMWWR',
[
('I', 'R'),
('N', 'R'),
('C', 'W'),
('O', 'W'),
('R', 'M'),
('R', 'M'),
('E', 'W'),
('C', 'W'),
('T', 'R'),
]
)
def test_display_correction_incomplete(self):
UI.wordlength = 10
self._test_display_correction(
'INCOMPL', 'WWWWWWWWWW',
[
('I', 'W'),
('N', 'W'),
('C', 'W'),
('O', 'W'),
('M', 'W'),
('P', 'W'),
('L', 'W'),
('-', 'W'),
('-', 'W'),
('-', 'W'),
]
)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jolo-dev/show-solidarity",
"score": 3
} |
#### File: show-solidarity/function/put_object.py
```python
from src.image import SolidarityImage
import json
import numpy as np
image = SolidarityImage()
def handler(event, _context):
"""
Uses Rekognition APIs to detect faces for objects uploaded to S3.
"""
# Get the object from the event.
bucket = event["bucketName"]
key = event["key"]
image_base64 = event["body"]
try:
source_image = image.decode_img(image_base64)
# Call rekognition DetectFaces API to detect Text in S3 object.
image.write_image_to_s3(np.array(source_image), bucket, key)
return {
"key": key,
"bucketName": bucket,
}
except Exception as e:
print(
"Error putting object {} to bucket {}. Event {}".format(
key, bucket, json.dumps(event, indent=2)
)
)
raise e
```
#### File: show-solidarity/function/rekognition.py
```python
import json
from PIL import Image
from src.image import SolidarityImage
import base64
image = SolidarityImage()
# --------------- Main handler ------------------
def handler(event, _context):
"""
Uses Rekognition APIs to detect faces for objects uploaded to S3.
"""
# Get the object from the event.
bucket = event["source"]["Payload"]["bucketName"]
key = event["source"]["Payload"]["key"]
result_bucket = event["body"]["resultBucketName"]
print(bucket, key, result_bucket)
try:
# Call rekognition DetectFaces API to detect Text in S3 object.
response: Image = image.detect_faces(bucket, key)
img = image.add_background_frame(response)
print(response, base64.b64encode(img))
return {
"body": base64.b64encode(img),
"bucketName": result_bucket,
"key": key,
}
except Exception as e:
print(
"Error processing object {} from bucket {}. Event {}".format(
key, bucket, json.dumps(event, indent=2)
)
)
raise e
```
#### File: show-solidarity/infrastructure/solidarity_bucket.py
```python
from aws_cdk import CfnOutput
from aws_cdk.aws_s3 import (
Bucket,
BucketEncryption,
BlockPublicAccess,
CorsRule,
HttpMethods,
)
from constructs import Construct
class SolidarityBucket(Bucket):
def __init__(self, scope: Construct, id: str, **kwargs) -> None:
super().__init__(
scope,
id,
cors=[
CorsRule(
allowed_methods=[
HttpMethods.PUT,
HttpMethods.POST,
HttpMethods.DELETE,
],
allowed_origins=["*"],
allowed_headers=["*"],
max_age=3000,
)
],
encryption=BucketEncryption.S3_MANAGED,
block_public_access=BlockPublicAccess.BLOCK_ALL,
**kwargs,
)
CfnOutput(self, "BucketName", value=self.bucket_name)
CfnOutput(self, "BucketArn", value=self.bucket_arn)
```
#### File: show-solidarity/tests/test_lambda.py
```python
# from function.put_object import handler
# import json
# from os import path
# import pytest
# @pytest.fixture
# def test_event():
# with open(path.join(path.dirname(__file__), "event.json"), "r") as f:
# event = json.load(f)
# return event
# def test_lambda_handler(test_event):
# response = handler(test_event, None)
# assert response is not None
``` |
{
"source": "jolohan/detectron2",
"score": 2
} |
#### File: jolohan/detectron2/write_submission_from_json.py
```python
import json
from pprint import pprint
import numpy as np
from detectron.datasets.nuclei.mask_encoding import rle_encode
from pycocotools import mask as mask_util
import unicodecsv as csv
from scipy import ndimage
import sys
import time
from datetime import datetime, timedelta
def join_results(seg_filename, annotation_filename, result_filename,
intersection_thresh=0.3, mask_area_threshold=15,
accuracy_threshold=0.5):
TOL = 0.00001
annotations = load_json_file(annotation_filename)
hex_to_id, id_to_hex = make_hex_to_id_dic(annotations['images'])
#print(annotations['images'][0])
segmentations = load_json_file(seg_filename)
#print(len(segmentations))
#print(segmentations[0])
csv_res = [['ImageId', 'EncodedPixels']]
t0 = time.time()
t1 = time.time()
old_im_id = None
all_masks = None
all_masks_no_refine = None
img_ids = []
segmentations_per_image = {}
for i, seg in enumerate(segmentations):
im_id = seg['image_id']
if not segmentations_per_image.has_key(im_id):
segmentations_per_image[im_id] = []
segmentations_per_image[im_id].append(seg)
"""for i, id_key in enumerate(id_to_hex.keys()):
im_name = id_to_hex[id_key]
if id_key in segmentations_per_image:
pass
#csv_res.append([im_name, "1 1"])
else:
csv_res.append([im_name, "1 1"])"""
n = len(segmentations_per_image.keys())
for i, k in enumerate(segmentations_per_image.keys()):
all_segs_in_image = segmentations_per_image[k]
if i % 10 == 0:
t2 = time.time()
print(i/(n*1.0))
print("Processing file {} ({}%)".format(i, 100 * i // n)) #, end="")
print(" {}s (total: {}s)".format(t2 - t1, t2 - t0))
time_left = ((t2 - t0) / (i + TOL)) * (n-i)
time_left = ((t2 - t0)/(1.0*(i+TOL))) * (n-i)
#print("Time left:")
#print_time(time_left)
#print("\n")
t1 = t2
all_masks = None
all_masks_no_refine = None
for mask_number, mask in enumerate(all_segs_in_image):
#print(mask)
im_id = mask['image_id']
rle = mask['segmentation']
score = mask['score']
if score < accuracy_threshold:
continue
mask_int_orig = mask_util.decode(rle)
#u_rle = rle_encode(mask_int)
#print("u_rle:", u_rle)
mask_int = ndimage.morphology.binary_fill_holes(mask_int_orig.copy()).astype(np.uint8)
mask = mask_int > 0
mask_orig = mask_int_orig > 0
if all_masks is None:
all_masks = mask.copy()
all_masks[:] = False
all_masks_no_refine = mask_orig.copy()
all_masks_no_refine[:] = False
intersection = mask & all_masks
area_inter = intersection.sum()
if area_inter > 0:
# print("Area intersection > 0")
total_area = mask.sum()
if float(area_inter) / (float(total_area) + TOL) > intersection_thresh:
continue
mask = mask & ~all_masks
if mask.sum() < mask_area_threshold:
continue
mask_int[~mask] = 0
# add this to all_masks mask
all_masks = mask | all_masks
all_masks_no_refine = all_masks_no_refine | mask_orig
# rle = mask_util.encode(np.asarray(mask_int, order='F'))
# u_rle = mask_util.decode(rle)
u_rle = np.asarray(mask_int, order='F')
u_rle = rle_encode(u_rle)
# this
# u_rle = mask_util.decode(rle)
# u_rle = rle_encode(u_rle)
# this
#print(u_rle)
im_name = id_to_hex[im_id]
#print(u_rle)
#print(u_rle[0])
u_rle = [''.join(str(x)) for x in zip(u_rle[0::2], u_rle[1::2])]
u_rle = [x.replace(",", "") for x in u_rle]
u_rle = [x[1:-1] for x in u_rle]
u_rle = " ".join(x for x in u_rle)
#if (i % 1000 == 0):
# print(u_rle)
if u_rle.strip():
csv_res.append([im_name, u_rle])
write_csv(result_filename, csv_res)
def write_csv(filename, data):
# Write CSV file
with open(filename+'.csv', 'w') as fp:
writer = csv.writer(fp, encoding='utf-8')
# writer.writerow(["your", "header", "foo"]) # write header
writer.writerows(data)
print("Wrote file: "+ filename)
def print_time(seconds):
sec = timedelta(seconds)
d = datetime(1, 1, 1) + sec
print("MIN:SEC")
print("%d:%d" % (d.minute, d.second))
def load_json_file(filename):
with open(filename) as f:
data = json.load(f)
return data
def make_hex_to_id_dic(annotations):
hex_to_id = {}
id_to_hex = {}
#print(len(annotations))
for i, image in enumerate(annotations):
#print(image)
#print(image['file_name'])
filename = image['file_name'].split('.')[0]
image_id = image['id']
hex_to_id[filename] = image_id
id_to_hex[image_id] = filename
#print(image_id, filename)
#print(hex_to_id["jjgj"]) # make it crash
return hex_to_id, id_to_hex
def write_results(seg_filename, annotation_filename, result_filename):
join_results(seg_filename=seg_filename,
annotation_filename=annotation_filename,
result_filename=result_filename)
if __name__ == '__main__':
#annotation_filename = 'detectron/datasets/data/dsb18/annotations/test'
# seg_filename = "/detectron/output/test/dsb18_test/generalized_rcnn/" \
# "segmentations_dsb18_test_results.json"
seg_filename = "segm_18000.json"
annotation_filename = 'detectron/datasets/data/dsb18/annotations/stage1_test.json'
result_filename = 'test2.csv'
if len(sys.argv) > 1:
seg_filename = sys.argv[1]
if len(sys.argv) > 2:
annotation_filename = sys.argv[2]
if len(sys.argv) > 3:
result_filename = sys.argv[3]
print("Writing results from seg file: ", seg_filename)
print("And annotations file: ", annotation_filename)
#seg_filename = "segmentations_dsb18_test_results"
write_results(seg_filename=seg_filename,
annotation_filename=annotation_filename,
result_filename=result_filename)
``` |
{
"source": "jolorenzo/SlipStreamConnectors",
"score": 2
} |
#### File: tar/slipstream_cloudstack/CloudStackClientCloud.py
```python
import re
import time
from .LibcloudCloudstackPatch import patch_libcloud
patch_libcloud()
import os
from urlparse import urlparse
from threading import RLock
from slipstream.UserInfo import UserInfo
from slipstream.ConfigHolder import ConfigHolder
from slipstream.cloudconnectors.BaseCloudConnector import BaseCloudConnector
from slipstream.utils.tasksrunner import TasksRunner
from slipstream.NodeDecorator import NodeDecorator
from slipstream.util import override
import slipstream.util as util
import slipstream.exceptions.Exceptions as Exceptions
from libcloud.compute.base import KeyPair
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security
def getConnector(configHolder):
return getConnectorClass()(configHolder)
def getConnectorClass():
return CloudStackClientCloud
def instantiate_from_cimi(cimi_connector, cimi_cloud_credential):
user_info = UserInfo(cimi_connector['instanceName'])
cloud_params = {
UserInfo.CLOUD_USERNAME_KEY: cimi_cloud_credential['key'],
UserInfo.CLOUD_PASSWORD_KEY: cimi_cloud_credential['secret'],
'endpoint': cimi_connector.get('endpoint'),
'zone': cimi_connector.get('zone'),
}
user_info.set_cloud_params(cloud_params)
config_holder = ConfigHolder(options={'verboseLevel': 0, 'retry': False})
os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = cimi_connector['instanceName']
connector_instance = CloudStackClientCloud(config_holder)
connector_instance._initialization(user_info)
return connector_instance
STATE_MAP = {0: 'Running',
1: 'Rebooting',
2: 'Terminated',
3: 'Pending',
4: 'Unknown',
5: 'Stopped'}
class CloudStackClientCloud(BaseCloudConnector):
cloudName = 'cloudstack'
def __init__(self, configHolder):
libcloud.security.VERIFY_SSL_CERT = False
super(CloudStackClientCloud, self).__init__(configHolder)
self._set_capabilities(contextualization=True,
generate_password=True,
direct_ip_assignment=True,
orchestrator_can_kill_itself_or_its_vapp=True)
self._zone = None
self._zones = None
self._sizes = None
self._images = None
self._lock_zone = RLock()
self._lock_zones = RLock()
self._lock_sizes = RLock()
self._lock_images = RLock()
self._lock_libcloud_driver = RLock()
@property
def libcloud_driver(self):
with self._lock_libcloud_driver:
if not hasattr(self._thread_local, '_libcloud_driver'):
util.printDetail('Initializing libcloud driver')
self._thread_local._libcloud_driver = self._get_driver(self.user_info)
return self._thread_local._libcloud_driver
@property
def zones(self):
if self._zones is None:
with self._lock_zones:
if self._zones is None:
util.printDetail('Getting zones')
self._zones = self.libcloud_driver.list_locations()
return self._zones
@property
def zone(self):
if self._zone is None:
with self._lock_zone:
if self._zone is None:
util.printDetail('Getting zone')
self._zone = self._get_zone(self.user_info)
return self._zone
@property
def sizes(self):
if self._sizes is None:
with self._lock_sizes:
if self._sizes is None:
util.printDetail('Getting sizes')
self._sizes = self.libcloud_driver.list_sizes(location=self.zone)
return self._sizes
@property
def images(self):
if self._images is None:
with self._lock_images:
if self._images is None:
util.printDetail('Getting images')
self._images = self.libcloud_driver.list_images(location=self.zone)
return self._images
@override
def _initialization(self, user_info):
util.printStep('Initialize the CloudStack connector.')
self.user_info = user_info
if self.is_build_image():
raise NotImplementedError('The run category "%s" is not yet implemented' % self.run_category)
elif self.is_deployment():
try:
self._import_keypair(user_info)
except Exceptions.ExecutionException as e:
util.printError(e)
@override
def _finalization(self, user_info):
try:
kp_name = user_info.get_keypair_name()
if kp_name:
self._delete_keypair(kp_name)
except: # pylint: disable=bare-except
pass
@override
def _start_image(self, user_info, node_instance, vm_name):
return self._start_image_on_cloudstack(user_info, node_instance, vm_name)
def _start_image_on_cloudstack(self, user_info, node_instance, vm_name):
instance_name = self.format_instance_name(vm_name)
instance_type = node_instance.get_instance_type()
ip_type = node_instance.get_network_type()
keypair = None
contextualization_script = None
if not node_instance.is_windows():
keypair = user_info.get_keypair_name()
contextualization_script = self._get_bootstrap_script_if_not_build_image(node_instance)
security_groups = node_instance.get_security_groups()
security_groups = (len(security_groups) > 0) and security_groups or None
try:
size = [i for i in self.sizes if i.name == instance_type][0]
except IndexError:
raise Exceptions.ParameterNotFoundException("Couldn't find the specified instance type: %s" % instance_type)
image = self._get_image(node_instance)
if node_instance.is_windows():
instance = self.libcloud_driver.create_node(
name=instance_name,
size=size,
image=image,
location=self.zone,
ex_security_groups=security_groups)
else:
instance = self.libcloud_driver.create_node(
name=instance_name,
size=size,
image=image,
location=self.zone,
ex_keyname=keypair,
ex_userdata=contextualization_script,
ex_security_groups=security_groups)
ip = self._get_instance_ip_address(instance, ip_type)
if not ip:
raise Exceptions.ExecutionException("Couldn't find a '%s' IP" % ip_type)
vm = dict(instance=instance,
ip=ip,
id=instance.id)
return vm
def _get_zone(self, user_info):
zone_name = user_info.get_cloud('zone', '')
try:
return [i for i in self.zones if i.name.lower() == zone_name.lower()][0]
except IndexError:
raise Exceptions.ParameterNotFoundException("Couldn't find the specified zone: %s" % zone_name)
def _get_image(self, node_instance):
image_id = node_instance.get_image_id()
try:
return [i for i in self.images if i.id == image_id][0]
except IndexError:
raise Exceptions.ParameterNotFoundException("Couldn't find the specified image: %s" % image_id)
@override
def list_instances(self):
return self.libcloud_driver.list_nodes(location=self.zone)
@override
def _create_allow_all_security_group(self):
sg_name = NodeDecorator.SECURITY_GROUP_ALLOW_ALL_NAME
sg_desc = NodeDecorator.SECURITY_GROUP_ALLOW_ALL_DESCRIPTION
driver = self.libcloud_driver
if any([sg.get('name') == sg_name for sg in driver.ex_list_security_groups()]):
return
sg = driver.ex_create_security_group(sg_name, description=sg_desc)
driver.ex_authorize_security_group_ingress(sg_name, 'tcp', '0.0.0.0/0', 0, 65535)
driver.ex_authorize_security_group_ingress(sg_name, 'udp', '0.0.0.0/0', 0, 65535)
driver.ex_authorize_security_group_ingress(sg_name, 'icmp', '0.0.0.0/0')
def _stop_instances(self, instances):
tasksRunnner = TasksRunner(self._stop_instance,
max_workers=self.max_iaas_workers,
verbose=self.verboseLevel)
for instance in instances:
tasksRunnner.put_task(instance)
tasksRunnner.run_tasks()
tasksRunnner.wait_tasks_processed()
def _stop_instance(self, instance):
self.libcloud_driver.destroy_node(instance)
@override
def _stop_deployment(self):
instances = [vm['instance'] for vm in self.get_vms().itervalues()]
self._stop_instances(instances)
@override
def _stop_vms_by_ids(self, ids):
instances = [i for i in self.list_instances() if i.id in ids]
self._stop_instances(instances)
@staticmethod
def _get_driver(user_info):
CloudStack = get_driver(Provider.CLOUDSTACK)
url = urlparse(user_info.get_cloud_endpoint())
secure = (url.scheme == 'https')
return CloudStack(user_info.get_cloud_username(),
user_info.get_cloud_password(),
secure=secure,
host=url.hostname,
port=url.port,
path=url.path)
@override
def _vm_get_password(self, vm):
password = vm['instance'].extra.get('password', None)
print 'VM Password: ', password
return password
@override
def _vm_get_ip(self, vm):
return vm['ip']
@override
def _vm_get_id(self, vm):
return vm['id']
@override
def _vm_get_state(self, vm_instance):
return STATE_MAP[vm_instance.state]
def _get_vm_size(self, vm_instance):
try:
size = [i for i in self.sizes if i.id == vm_instance.extra.get('size_id')][0]
except IndexError:
return None
else:
return size
@override
def _vm_get_ip_from_list_instances(self, vm_instance):
return self._get_instance_ip_address(vm_instance)
@override
def _vm_get_id_from_list_instances(self, vm):
return vm.id
@override
def _vm_get_cpu(self, vm_instance):
size = self._get_vm_size(vm_instance)
if size and 'cpu' in size.extra:
return size.extra.get('cpu')
@override
def _vm_get_ram(self, vm_instance):
size = self._get_vm_size(vm_instance)
if size:
return size.ram
@override
def _vm_get_root_disk(self, vm_instance):
size = self._get_vm_size(vm_instance)
if size:
return size.disk
@override
def _vm_get_instance_type(self, vm_instance):
return vm_instance.extra.get('size_name')
@override
def _list_vm_sizes(self):
return self.sizes
@override
def _size_get_cpu(self, vm_size):
return vm_size.extra.get('cpu')
@override
def _size_get_ram(self, vm_size):
return vm_size.ram
@override
def _size_get_instance_type(self, vm_size):
return vm_size.name
def _get_instance_ip_address(self, instance, ipType='public'):
if ipType.lower() == 'private':
return (len(instance.private_ips) != 0) and instance.private_ips[0] or (len(instance.public_ips) != 0) and instance.public_ips[0] or ''
else:
return (len(instance.public_ips) != 0) and instance.public_ips[0] or (len(instance.private_ips) != 0) and instance.private_ips[0] or ''
def _import_keypair(self, user_info):
kp_name = 'ss-key-%i' % int(time.time() * 1000000)
public_key = user_info.get_public_keys()
try:
kp = self.libcloud_driver.import_key_pair_from_string(kp_name, public_key)
except Exception as e:
user_info.set_keypair_name(None)
raise Exceptions.ExecutionException('Cannot import the public key. Reason: %s' % e)
kp_name = kp.name
user_info.set_keypair_name(kp_name)
return kp_name
def _create_keypair_and_set_on_user_info(self, user_info):
kp_name = 'ss-build-image-%i' % int(time.time())
kp = self.libcloud_driver.create_key_pair(kp_name)
user_info.set_private_key(kp.private_key)
kp_name = kp.name
user_info.set_keypair_name(kp_name)
return kp_name
def _delete_keypair(self, kp_name):
kp = KeyPair(name=kp_name, public_key=None, fingerprint=None,
driver=self.libcloud_driver)
return self.libcloud_driver.delete_key_pair(kp)
def format_instance_name(self, name):
name = self.remove_bad_char_in_instance_name(name)
return self.truncate_instance_name(name)
def truncate_instance_name(self, name):
if len(name) <= 63:
return name
else:
return name[:31] + '-' + name[-31:]
def remove_bad_char_in_instance_name(self, name):
try:
newname = re.sub(r'[^a-zA-Z0-9-]', '', name)
m = re.search('[a-zA-Z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?', newname)
return m.string[m.start():m.end()]
except:
raise Exceptions.ExecutionException(
'Cannot handle the instance name "%s". Instance name can '
'contain ASCII letters "a" through "z", the digits "0" '
'through "9", and the hyphen ("-"), must be between 1 and 63 '
'characters long, and can\'t start or end with "-" '
'and can\'t start with digit' % name)
def _build_image(self, user_info, node_instance):
raise Exceptions.ExecutionException("%s doesn't implement build image feature." %
self.__class__.__name__)
```
#### File: tar/test/TestCloudStackClientCloudLive.py
```python
import os
import time
import unittest
from mock import Mock
from slipstream.cloudconnectors.BaseCloudConnector import BaseCloudConnector
from slipstream_cloudstack.CloudStackClientCloud import CloudStackClientCloud
from slipstream.ConfigHolder import ConfigHolder
from slipstream.UserInfo import UserInfo
from slipstream.NodeInstance import NodeInstance
from slipstream.NodeDecorator import RUN_CATEGORY_DEPLOYMENT, KEY_RUN_CATEGORY, NodeDecorator
from slipstream import util
CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'pyunit.credentials.properties')
# Example configuration file.
"""
[Test]
General.ssh.public.key = ssh-rsa ....
cloudstack.endpoint = https://api.exoscale.ch/compute
cloudstack.key = xxx
cloudstack.secret = yyy
cloudstack.zone = CH-GV2
cloudstack.template = 8c7e60ae-3a30-4031-a3e6-29832d85d7cb
cloudstack.instance.type = Micro
cloudstack.security.groups = default
cloudstack.max.iaas.workers = 2
""" # pylint: disable=pointless-string-statement
# pylint: disable=protected-access
class TestCloudStackClientCloud(unittest.TestCase):
connector_instance_name = 'cloudstack'
def constructKey(self, name):
return self.connector_instance_name + '.' + name
def setUp(self):
BaseCloudConnector._publish_vm_info = Mock() # pylint: disable=protected-access
os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = self.connector_instance_name
os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
os.environ['SLIPSTREAM_DIID'] = '00000000-0000-0000-0000-%s' % time.time()
if not os.path.exists(CONFIG_FILE):
raise Exception('Configuration file %s not found.' % CONFIG_FILE)
self.ch = ConfigHolder(configFile=CONFIG_FILE, context={'foo': 'bar'})
self.ch.set(KEY_RUN_CATEGORY, '')
self.ch.set('verboseLevel', self.ch.config['General.verbosity'])
self.client = CloudStackClientCloud(self.ch)
self.user_info = UserInfo(self.connector_instance_name)
self.user_info[self.constructKey('endpoint')] = self.ch.config['cloudstack.endpoint']
self.user_info[self.constructKey('zone')] = self.ch.config['cloudstack.zone']
self.user_info[self.constructKey('username')] = self.ch.config['cloudstack.key']
self.user_info[self.constructKey('password')] = self.ch.config['cloudstack.secret']
security_groups = self.ch.config['cloudstack.security.groups']
instance_type = self.ch.config['cloudstack.instance.type']
self.user_info['General.' + UserInfo.SSH_PUBKEY_KEY] = self.ch.config['General.ssh.public.key']
image_id = self.ch.config[self.constructKey('template')]
self.multiplicity = 2
self.max_iaas_workers = self.ch.config.get('cloudstack.max.iaas.workers',
str(self.multiplicity))
self.node_name = 'test_node'
self.node_instances = {}
for i in range(1, self.multiplicity + 1):
node_instance_name = self.node_name + '.' + str(i)
self.node_instances[node_instance_name] = NodeInstance({
NodeDecorator.NODE_NAME_KEY: self.node_name,
NodeDecorator.NODE_INSTANCE_NAME_KEY: node_instance_name,
'cloudservice': self.connector_instance_name,
# 'index': i,s
'image.platform': 'linux',
'image.imageId': image_id,
'image.id': image_id,
self.constructKey('instance.type'): instance_type,
self.constructKey('security.groups'): security_groups,
'network': 'private'
})
def tearDown(self):
os.environ.pop('SLIPSTREAM_CONNECTOR_INSTANCE')
os.environ.pop('SLIPSTREAM_BOOTSTRAP_BIN')
self.client = None
self.ch = None
def xtest_1_startStopImages(self):
self.client.run_category = RUN_CATEGORY_DEPLOYMENT
try:
self.client.start_nodes_and_clients(self.user_info, self.node_instances)
util.printAndFlush('Instances started')
vms = self.client.get_vms()
assert len(vms) == self.multiplicity
finally:
self.client.stop_deployment()
def xtest_2_buildImage(self):
raise NotImplementedError()
if __name__ == '__main__':
unittest.main()
```
#### File: tar/test/TestCloudStackClientCloud.py
```python
import os
import unittest
from slipstream.ConfigHolder import ConfigHolder
from slipstream_cloudstack.CloudStackClientCloud import CloudStackClientCloud
class TestCloudStackClientCloud(unittest.TestCase):
connector_instance_name = 'cloudstack'
def setUp(self):
os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = self.connector_instance_name
def tearDown(self):
pass
def test_init(self):
CloudStackClientCloud(ConfigHolder(context={'foo': 'bar'}))
if __name__ == '__main__':
unittest.main()
```
#### File: tar/slipstream_docker/DockerClientCluster.py
```python
import time
import slipstream.util as util
import slipstream.exceptions.Exceptions as Exceptions
from slipstream.util import override
from slipstream.cloudconnectors.BaseCloudConnector import BaseCloudConnector
from slipstream.utils.ssh import generate_keypair
import ssl
import urllib
import re
import base64
import requests
import json
import sys
def getConnector(config_holder):
return getConnectorClass()(config_holder)
def getConnectorClass():
return DockerClientCluster
class DockerClientCluster(BaseCloudConnector):
def _resize(self, node_instance):
raise Exceptions.ExecutionException('{0} doesn\'t implement resize feature.'.format(self.__class__.__name__))
def _detach_disk(self, node_instance):
raise Exceptions.ExecutionException('{0} doesn\'t implement detach disk feature.'.format(self.__class__.__name__))
def _attach_disk(self, node_instance):
raise Exceptions.ExecutionException('{0} doesn\'t implement attach disk feature.'.format(self.__class__.__name__))
cloudName = 'docker'
def __init__(self, config_holder):
super(DockerClientCluster, self).__init__(config_holder)
self._set_capabilities(contextualization=True)
self.user_info = None
@override
def _initialization(self, user_info, **kwargs):
util.printStep('Initialize the Docker connector.')
self.user_info = user_info
def format_instance_name(self, name):
new_name = self.remove_bad_char_in_instance_name(name)
return self.truncate_instance_name(new_name)
@staticmethod
def truncate_instance_name(name):
if len(name) <= 128:
return name
else:
return name[:63] + '-' + name[-63:]
@staticmethod
def remove_bad_char_in_instance_name(name):
return re.sub('[^a-zA-Z0-9-]', '', name)
def _set_instance_name(self, vm_name):
return self.format_instance_name(vm_name)
@override
def _start_image(self, user_info, node_instance, vm_name):
# Adapt naming convention from IaaS model
try:
service = json.loads(node_instance.get_cloud_parameter("service"))
except ValueError as ve:
raise ValueError("Requested service is not in JSON format - %s" % ve), None, sys.exc_info()[2]
except:
raise
service_name = service["Name"] if service.has_key("Name") else vm_name
util.printStep('Deploy service %s to %s' % (service_name, user_info.get_cloud_endpoint()))
return self._start_container_in_docker(user_info, node_instance, service_name)
def _start_container_in_docker(self, user_info, node_instance, service_name):
request_url = "%s/services/create" % (user_info.get_cloud_endpoint())
service = node_instance.get_cloud_parameter("service")
try:
create = requests.post(request_url, data=service,
headers={'Content-Type': 'application/json', 'Accept': 'application/json'})
except requests.exceptions.ConnectionError as e:
raise requests.exceptions.ConnectionError("Remote Docker API is not running - %s" % e), None, sys.exc_info()[2]
except:
raise
response_json = json.loads(create.text)
self.validate_start_image(response_json)
return response_json
def validate_start_image(self, response):
"""Takes the raw response from _start_container_in_docker
and checks whether the service creation request was successful or not"""
if len(response.keys()) == 1 and response.has_key("message"):
raise Exceptions.ExecutionException(response["message"])
@override
def list_instances(self):
request_url = "%s/services" % (self.user_info.get_cloud_endpoint())
services_list = requests.get(request_url)
# return a list of objects instead of plain text
return json.loads(services_list.text)
@override
def _stop_vms_by_ids(self, ids):
for service_id in ids:
delete_url = "%s/services/%s" % (self.user_info.get_cloud_endpoint(), service_id)
delete = requests.delete(delete_url)
if delete.text:
self._print_detail(delete.text)
@override
def _build_image(self, user_info, node_instance):
return self._build_container_image(user_info, node_instance)
def _build_container_image(self, user_info, node_instance):
#TODO: build docker image and upload to registry
return None
def _vm_get_name(self, vm):
# Return the service name
return vm["Spec"]["Name"]
def _vm_get_image_name(self, vm):
# Return the container image name
return vm["Spec"]["TaskTemplate"]["ContainerSpec"]["Image"]
def _vm_get_port_mappings(self, vm):
# string of hostPort:containerPort mappings
return "%s:%s" % (vm["Endpoint"]["Ports"][0]["PublishedPort"],
vm["Endpoint"]["Ports"][0]["TargetPort"])
def _vm_get_restart_policy(self, vm):
# Return the container restart policy
return vm["Spec"]["TaskTemplate"]["RestartPolicy"]["Condition"]
def _vm_get_creation_time(self, vm):
# Return the container creation time
return vm['CreatedAt']
def _vm_get_start_time(self, vm):
# Return the container creation time
return vm['UpdatedAt']
@override
def _vm_get_ip(self, vm):
if vm.has_key("Endpoint"):
return vm["Endpoint"]["VirtualIPs"][0]["Addr"]
else:
return vm
def _vm_get_replicas(self, vm):
return str(vm["Spec"]["Mode"]["Replicated"]["Replicas"])
@override
def _vm_get_id(self, vm):
return vm["ID"]
@override
def _vm_get_ip_from_list_instances(self, vm_instance):
return self._vm_get_ip(vm_instance)
@override
def _vm_get_instance_type(self, vm_instance):
return "service"
```
#### File: tar/slipstream_docker/DockerRunInstancesCommand.py
```python
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from slipstream.command.CloudClientCommand import main
from slipstream.command.RunInstancesCommand import RunInstancesCommand
from slipstream_docker.DockerCommand import DockerCommand
from slipstream.NodeDecorator import NodeDecorator
from slipstream.NodeInstance import NodeInstance
class DockerRunInstances(RunInstancesCommand, DockerCommand):
SERVICE_REQUEST = "service"
def __init__(self):
super(DockerRunInstances, self).__init__()
def set_cloud_specific_options(self, parser):
DockerCommand.set_cloud_specific_options(self, parser)
self.parser.add_option('--' + self.SERVICE_REQUEST, dest=self.SERVICE_REQUEST,
help='Service request to be passed to Docker',
default='', metavar='SERVICE-REQUEST')
def _set_command_specific_options(self, parser):
pass
def _get_command_specific_user_cloud_params(self):
# NodeDecorator.NATIVE_CONTEXTUALIZATION_KEY doesn't apply here, and it doesn't even exist anymore
# so simply force return {} as it line 97 of RunInstancesCommand.py
return {}
def _get_node_instance(self):
# the runtime parameters are not the same as for VMs
runtime_parameters = {
NodeDecorator.NODE_INSTANCE_NAME_KEY: self.get_node_instance_name(),
'cloudservice': self._cloud_instance_name
}
return NodeInstance(runtime_parameters)
def get_cloud_specific_node_inst_cloud_params(self):
node_params = DockerCommand.get_cloud_specific_node_inst_cloud_params(self)
node_params[self.SERVICE_REQUEST] = self.get_option(self.SERVICE_REQUEST)
return node_params
def _get_command_specific_node_inst_cloud_params(self):
# LOGIN_PASS_KEY does not apply to the Docker connector
cloud_params = {}
return cloud_params
def get_cloud_specific_mandatory_options(self):
return DockerCommand.get_cloud_specific_mandatory_options(self)
def _get_command_mandatory_options(self):
# Remove USER PASS from mandatory parameters as we might be dealing
# with a no protected cluster
return [self.SERVICE_REQUEST]
if __name__ == "__main__":
main(DockerRunInstances)
```
#### File: tar/slipstream_openstack/OpenStackCommand.py
```python
from slipstream.command.CloudClientCommand import CloudClientCommand
from slipstream_openstack.OpenStackClientCloud import OpenStackClientCloud, \
STATE_MAP
class OpenStackCommand(CloudClientCommand):
STATE_MAP = STATE_MAP
DOMAIN_KEY = 'domain'
REGION_KEY = 'region'
PROJECT_KEY = 'project'
ENDPOINT_KEY = 'endpoint'
SERVICE_TYPE_KEY = 'service-type'
SERVICE_NAME_KEY = 'service-name'
IDENTITY_VERSION_KEY = 'identity-version'
def __init__(self):
super(OpenStackCommand, self).__init__()
def get_connector_class(self):
return OpenStackClientCloud
def set_cloud_specific_options(self, parser):
parser.add_option('--' + self.ENDPOINT_KEY, dest=self.ENDPOINT_KEY, help='Identity service (Keystone)',
default='', metavar='ENDPOINT')
parser.add_option('--' + self.REGION_KEY, dest=self.REGION_KEY, help='Region (default: regionOne)',
default='regionOne', metavar='REGION')
parser.add_option('--' + self.DOMAIN_KEY, dest=self.DOMAIN_KEY, help='Domain (Identity v3 only)',
default='', metavar='DOMAIN')
parser.add_option('--' + self.PROJECT_KEY, dest=self.PROJECT_KEY, help='Project (Tenant)',
default='', metavar='PROJECT')
parser.add_option('--' + self.IDENTITY_VERSION_KEY, dest=self.IDENTITY_VERSION_KEY,
help='Identity API version (v2|v3)', default='v2', metavar='VERSION')
parser.add_option('--' + self.SERVICE_TYPE_KEY, dest=self.SERVICE_TYPE_KEY,
help='Type-name of the service which provides the instances functionality (default: compute)',
default='compute', metavar='TYPE')
parser.add_option('--' + self.SERVICE_NAME_KEY, dest=self.SERVICE_NAME_KEY,
help='Name of the service which provides the instances functionality (optional)',
default=None, metavar='NAME')
def get_cloud_specific_user_cloud_params(self):
return {'tenant-name': self.get_option(self.PROJECT_KEY),
'serviceRegion': self.get_option(self.REGION_KEY),
'domain-name': self.get_option(self.DOMAIN_KEY),
self.ENDPOINT_KEY: self.get_option(self.ENDPOINT_KEY),
'serviceType': self.get_option(self.SERVICE_TYPE_KEY),
'serviceName': self.get_option(self.SERVICE_NAME_KEY),
'identityVersion': self.get_option(self.IDENTITY_VERSION_KEY)}
def get_cloud_specific_mandatory_options(self):
return [self.REGION_KEY,
self.PROJECT_KEY,
self.ENDPOINT_KEY,
self.SERVICE_TYPE_KEY]
```
#### File: tar/slipstream_openstack/TestBaseLive.py
```python
import os
import sys
import unittest
import traceback
from mock import Mock
from pprint import pprint as pp
from slipstream import util
from slipstream.ConfigHolder import ConfigHolder
from slipstream.NodeInstance import NodeInstance
from slipstream.SlipStreamHttpClient import UserInfo
from slipstream.NodeDecorator import KEY_RUN_CATEGORY
from slipstream.NodeDecorator import RUN_CATEGORY_DEPLOYMENT
def publish_vm_info(self, vm, node_instance):
# pylint: disable=unused-argument, protected-access
print '%s, %s' % (self._vm_get_id(vm), self._vm_get_ip(vm))
class TestBaseLive(unittest.TestCase):
cin = ''
node_instances = {} # of NodeInstance()
multiplicity = 0
max_iaas_workers = 1
def construct_key(self, name):
return self.cin + '.' + name
def _conf_val(self, key, default=None):
conf_key = self.construct_key(key)
if default:
return self.ch.config.get(conf_key, default)
return self.ch.config[conf_key]
def _build_user_info(self, keys):
self.user_info = UserInfo(self.cin)
self.user_info['General.' + UserInfo.SSH_PUBKEY_KEY] = self.ch.config[
'General.ssh.public.key']
for k in keys:
self.user_info[self.construct_key(k)] = self._conf_val(k)
def _load_config(self, conf_file):
if not os.path.exists(conf_file):
raise Exception('Configuration file %s not found.' % conf_file)
self.ch = ConfigHolder(configFile=conf_file, context={'foo': 'bar'})
self.ch.set(KEY_RUN_CATEGORY, '')
def _build_client(self, testedCls):
testedCls._publish_vm_info = publish_vm_info # pylint: disable=protected-access
self.client = testedCls(self.ch)
def _get_ex_msg(self, ex):
if hasattr(ex, 'message'):
return ex.message
if hasattr(ex, 'arg'):
return ex.arg
return ''
def _setUp(self, testedCls, conf_file, conf_keys):
"""(Re-)sets the following fields
self.ch - ConfigHolder
self.client - instance of BaseCloudConnector
self.user_info - UserInfo
self.multiplicity - int
self.max_iaas_workers - str
"""
os.environ['SLIPSTREAM_CONNECTOR_INSTANCE'] = self.cin
os.environ['SLIPSTREAM_BOOTSTRAP_BIN'] = 'http://example.com/bootstrap'
os.environ['SLIPSTREAM_DIID'] = '00000000-0000-0000-0000-000000000000'
self._load_config(conf_file)
self._build_client(testedCls)
self._build_user_info(conf_keys)
pp(self.user_info)
self.multiplicity = int(self._conf_val('multiplicity', 2))
self.max_iaas_workers = self._conf_val('max.iaas.workers',
str(self.multiplicity))
def _test_start_stop_images(self):
"Live test that starts and stops VMs on a cloud."
self.client.run_category = RUN_CATEGORY_DEPLOYMENT
success = True
error = ''
try:
self.client.start_nodes_and_clients(self.user_info,
self.node_instances)
vms = self.client.get_vms()
assert len(vms) == self.multiplicity
util.printAction('Instances started.')
pp(vms)
except Exception as ex:
success = False
error = self._get_ex_msg(ex)
util.printError("Exception caught while starting instances!")
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
finally:
util.printAction("Stopping deployment.")
self.client.stop_deployment()
self.assertEquals(success, True, error)
``` |
{
"source": "jolsfd/imagenamer",
"score": 3
} |
#### File: imagenamer/src/menu.py
```python
import os
from src.settings import Settings
from src.rename import Rename
from colorama import Fore, Back, Style
class Menu:
def __init__(self, path_to_settings):
self.settings_objects = Settings(path_to_settings)
self.settings = self.settings_objects.load_settings()
self.error = self.settings_objects.check_settings(self.settings)
self.rename_object = Rename(self.settings)
def rename(self):
excluded_folders = []
exclude_input = " "
path = input("Please input a path: ")
while len(exclude_input) > 0:
exclude_input = input(
f'Optional! Exclude folder (Press "ENTER" to skip!): '
)
if len(exclude_input) > 0:
excluded_folders.append(exclude_input)
if "\\" in path:
path = path.replace("\\", "/")
if os.path.exists(path):
number_of_images, number_of_raws = self.rename_object.collect_files(
path, excluded_folders
)
print(f"Rename {number_of_images} images and {number_of_raws} raws")
user_input = input("Yes/No [y/n]")
if user_input == "y":
self.rename_object.rename_images()
else:
print(Fore.RED + f"{path} does not exist!" + Fore.RESET)
self.rename_object.clear()
def new_settings(self):
print(f'Enter new settings. If you do not want to change press "Enter"\n')
# Get new attributes from user
print(f'Current format: {self.settings["format"]}')
print(
f'"$Y": Year ; "$M": Month ; "$D": Day ; "$h": Hour ; "$m": Minute ; "$s": Seconds ; "MODEL": Camera Model'
)
new_format = input(f"Enter new file format: ")
print(Fore.CYAN + f"\nAll Extensions with dot!" + Fore.RESET)
add_image_extension = input(f"Add new image extension: ")
add_raw_extension = input(f"Add new raw extension: ")
del_image_extension = input(f"Remove image extension:")
del_raw_extension = input(f"Remove raw extension: ")
if self.settings["raw_rename"] == False:
raw_rename_input = input(f"Do you want to enable Raw Rename ? [y/n]")
else:
raw_rename_input = input(f"Do you want to disable Raw Rename ? [y/n]")
if self.settings["safe_rename"] == False:
safe_rename_input = input(f"Do you want to enable Safe Rename ? [y/n]")
else:
safe_rename_input = input(f"Do you want to disable Safe Rename ? [y/n]")
# Make new settings
new_settings = self.settings
# Change attributes in new settings
if raw_rename_input == "y":
if self.settings["raw_rename"] == False:
new_settings["raw_rename"] = True
else:
new_settings["raw_rename"] = False
if safe_rename_input == "y":
if self.settings["safe_rename"] == False:
new_settings["safe_rename"] = True
else:
new_settings["safe_rename"] = False
if len(new_format) > 0:
new_settings["format"] = new_format
new_settings["safe_string"] = new_format.partition("$")[0]
if len(add_image_extension) > 0:
new_settings["image_ext"].append(add_image_extension)
if len(add_raw_extension) > 0:
new_settings["raw_ext"].append(add_raw_extension)
if len(del_image_extension) > 0:
try:
new_settings["image_ext"].remove(del_image_extension)
except ValueError:
pass
if len(del_raw_extension) > 0:
try:
new_settings["raw_ext"].remove(del_raw_extension)
except ValueError:
pass
# Print new settings
print(
"\n"
f"Safe String: {new_settings['safe_string']}\n"
f"File Format: {new_settings['format']}\n"
f"Image extensions: {new_settings['image_ext']}\n"
f"Raw extensions: {new_settings['raw_ext']}\n"
f"Raw Rename: {new_settings['raw_rename']}\n"
f"Safe Rename: {new_settings['safe_rename']}\n"
)
# Confirm new settings
confirm = input(f"Do you confirm settings ? [y/n]")
if confirm == "y":
# Save new settings into json file
self.settings_objects.save_settings(new_settings)
self.rename_object.update_settings(new_settings)
else:
print(Fore.RED + f"New settings were not saved.\n" + Fore.RESET)
def help(self):
print(
Fore.BLUE
+ f'"rename"'
+ Fore.RESET
+ f" - renames all images in a folder structure\n"
+ Fore.BLUE
+ f'"settings"'
+ Fore.RESET
+ f" - change settings\n"
+ Fore.BLUE
+ f'"quit"'
+ Fore.RESET
+ f" - quits the application\n"
+ Fore.LIGHTMAGENTA_EX
+ f"\nFor more Help please visit https://github.com/jolsfd/imagenamer/ \n"
+ Fore.RESET
)
``` |
{
"source": "jolsfd/rsa-algorithm",
"score": 3
} |
#### File: jolsfd/rsa-algorithm/bench_rsa.py
```python
import time
import platform
import cpuinfo
import examples
from rsa import RSA
class Benchmark:
def __init__(self, n: int, e: int, d: int, m: int, c: int, text: str) -> None:
self.n = n
self.e = e
self.d = d
self.m = m
self.c = c
self.text = text
self.rsa = RSA()
def bench_encrypt(self, n: int) -> float:
start = time.time()
for i in range(n):
self.rsa.encrypt(self.m, self.e, self.n)
return time.time() - start
def bench_decrypt(self, n: int) -> float:
start = time.time()
for i in range(n):
self.rsa.decrypt(self.c, self.d, self.n)
return time.time() - start
def bench_encrypt_text(self, n: int) -> float:
start = time.time()
for i in range(n):
self.text_c = self.rsa.encrypt_text(self.text, self.d, self.n)
return time.time() - start
def bench_decrypt_text(self, n: int) -> float:
start = time.time()
for i in range(n):
self.rsa.decrypt_text(self.text_c, self.d, self.n)
return time.time() - start
if __name__ == "__main__":
m = examples.m_4096
c = examples.c_4096
text = """Die Kryptographie ist eine Wissenschaft, die sich mit den Methoden beschäftigt, die durch Verschlüsselung und verwandte Verfahren Daten vor unbefugtem Zugriff schützen sollen. Das eine wichtige Hilfsmittel der Kryptographie ist die Matehmatik, denn nur durch mathematische Denkweise und mithilfe von mathematischen Kenntnissen ist es möglich, Verfahren zur sicheren Verschlüsselung zu entwickeln. Das andere wichtige Hilfsmittel ist der Computer. Dieser führt die Verschlüsselungsverfahren aus und leistet wichtige Dienste bei der Untersuchung von kryptografischen Methoden und Schwachstellen."""
# Symsteminformationen
print("=" * 40, "System Information", "=" * 40)
computer = platform.uname()
print(f"System: {computer.system}")
print(f"Node Name: {computer.node}")
print(f"Release: {computer.release}")
print(f"Version: {computer.version}")
print(f"Processor: {computer.processor}")
print()
# CPU Information
print(f"=" * 40, "CPU Information", "=" * 40)
info = cpuinfo.get_cpu_info()
name = info["brand_raw"]
cores = info["count"]
print(f"Name: {name}")
print(f"CPU Cores: {cores}")
print()
# Python Information
print("=" * 40, "Python Information", "=" * 40)
print(f"Version: {platform.python_version()}")
print(f"Compiler: {platform.python_compiler()}")
print()
# Treiber Code
count = 100
print(f"Running Benchmark {count} times...")
bench = Benchmark(examples.n_4096, examples.e_4096, examples.d_4096, m, c, text)
time_decrypt = bench.bench_encrypt(count)
print(f"{time_decrypt}s for encryption.")
time_encrypt = bench.bench_decrypt(count)
print(f"{time_encrypt}s for decryption.")
time_decrypt_text = bench.bench_encrypt_text(count)
print(f"{time_decrypt_text}s for text encryption.")
time_encrypt_text = bench.bench_decrypt_text(count)
print(f"{time_encrypt_text}s for text decryption.")
```
#### File: jolsfd/rsa-algorithm/cli_rsa.py
```python
import argparse
from typing import List, Tuple
from rsa import RSA
def read_key_file(name: str) -> List[int]:
"""
Schlüssel von Datei lesen.
"""
file = open(name, "r")
try:
lines = file.readlines()
return [int(line) for line in lines]
finally:
file.close()
def write_key_file(name: str, key: List[int]):
"""
Schlüssel in Datei schreiben.
"""
file = open(name, "w")
try:
[file.write(str(el) + "\n") for el in key]
finally:
file.close()
def main():
# defaults
default_file = "key.txt"
rsa = RSA()
# Main parser
parser = argparse.ArgumentParser(
prog="RSA Cli",
description="Command Line Interface als Beispielhafte Anwendung für RSA",
)
# key files
parser.add_argument(
"--file",
type=str,
default=default_file,
help="Dateispeicherort für Schlüsseldatei",
)
subparsers = parser.add_subparsers(help="sub-command help", dest="command")
# generate command
generate_parser = subparsers.add_parser("generate", help="Generate rsa key files")
generate_parser.add_argument(
"--bits",
type=int,
default=2048,
help="Länge des RSA-Moduls, welches durch 8 teilbar sein muss",
)
# encrypt command
decrypt_parser = subparsers.add_parser("encrypt", help="Encrypt message")
decrypt_parser.add_argument("message", type=str, help="Naricht")
# decrypt command
encrypt_parser = subparsers.add_parser("decrypt", help="Decrypt message")
encrypt_parser.add_argument(
"cipher", type=int, nargs="+", help="Verschlüsselter Text"
)
# parse arguments
args = parser.parse_args()
if args.command == "generate":
print(f"Generating RSA key with {args.bits}bits...")
if args.bits % 8 != 0:
print("Bits müssen durch 8 teilbar sein!")
public, private = rsa.generate_keys(args.bits)
write_key_file(args.file, [public[0], public[1], private[0]])
elif args.command == "encrypt":
print(f"Encrypting text message...")
key = read_key_file(args.file)
cipher = rsa.encrypt_text(args.message, int(key[0]), int(key[1]))
print("=" * 20, "Cipher", "=" * 20)
[print(c, end=" ") for c in cipher]
print("\n" + "=" * 48)
elif args.command == "decrypt":
print(f"Decrypting text message...")
key = read_key_file(args.file)
message = rsa.decrypt_text(args.cipher, int(key[2]), int(key[1]))
print("=" * 20, "Message", "=" * 20)
print(message)
print("=" * 49)
if __name__ == "__main__":
main()
``` |
{
"source": "jolsfd/unsafe",
"score": 4
} |
#### File: jolsfd/unsafe/HashPassphrase.py
```python
from hashlib import pbkdf2_hmac
import string
chars = string.digits + string.ascii_letters + string.punctuation
salt = "<PASSWORD>"
def convert_bytes_to_string(hashed_bytes, length):
number = int.from_bytes(hashed_bytes, byteorder="big")
string = ""
while number > 0 and len(string) < length:
string = string + chars[number % len(chars)]
number = number // len(chars)
return string
def convert_hash_string(hash_string):
hashed_bytes = pbkdf2_hmac(
"sha512", hash_string.encode("utf-8"), salt.encode("utf-8"), 4096
)
hashed_string = convert_bytes_to_string(hashed_bytes, 16)
return hashed_string
if __name__ == "__main__":
hash_string = input("Hash String >>>")
while len(hash_string) < 1:
hash_string = input("Hash String >>>")
hashed_string = convert_hash_string(hash_string)
print(f"Hashed Passphrase >>> {hashed_string}")
``` |
{
"source": "jolsten/float-interpreter",
"score": 3
} |
#### File: typeconvert/types/ibm64.py
```python
import numpy as np
from numba import njit, vectorize
signatures = [
'f8(u8)',
]
def func(value: np.uint64) -> np.float64:
r"""Convert uint to IBM64
Interprets an unsigned integer as a IBM 64-bit Float and
returns an IEEE 64-bit Float.
Parameters
----------
value : unsigned integer
Unsigned integer value of the data.
Returns
-------
np.float64
A float containing the interpretation of `value`.
Examples
--------
>>> out = func(0x4019999A)
>>> type(out), out
(<class 'numpy.float64'>, 0.1)
"""
value = np.uint64(value)
s = (value >> np.uint8(63)) * np.uint64(1)
e = (value >> np.uint8(56)) & np.uint64(0x7F)
m = (value & np.uint64(0x00FFFFFFFFFFFFFF))
S = np.int8(-1) ** s
E = np.int8(e) - np.int8(64)
M = np.float64(m) / np.float64(2**56)
return np.float64(S * M * np.float64(16)**E)
jfunc = njit(signatures)(func)
ufunc = vectorize(signatures)(func)
```
#### File: typeconvert/types/ti32.py
```python
import numpy as np
from numba import njit, vectorize
from .twoscomp import jfunc as uint_to_twoscomp
signatures = [
'f8(u4)',
]
def func(value: np.uint32) -> np.float64:
r"""Convert uint to TI32
Interprets an unsigned integer as a Texas Instruments 32-bit Float and
returns an IEEE 64-bit Float.
Parameters
----------
value : unsigned integer
Unsigned integer value of the data.
Returns
-------
np.float64
A float containing the interpretation of `value`.
Examples
--------
>>> out = func(0xFF800000)
>>> type(out), out
(<class 'numpy.float64'>, -1.0)
"""
# Reference:
# https://www.ti.com/lit/an/spra400/spra400.pdf
value = np.uint32(value)
e = uint_to_twoscomp(
(value & np.uint32(0xFF000000)) >> np.uint8(24), np.uint8(8)
)
s = (value & np.uint32(0x00800000)) >> np.uint8(23)
m = (value & np.uint32(0x007FFFFF))
if e == np.int64(-128):
return np.float64(0)
S = np.float64(-2) ** s
E = np.float64(e)
M = np.float64(m)
return (S + M/np.float64(2**23)) * np.float64(2) ** E
jfunc = njit(signatures)(func)
ufunc = vectorize(signatures)(func)
```
#### File: float-interpreter/tests/test_dec32.py
```python
import pytest
import numpy as np
from itertools import zip_longest
from typeconvert.types.dec32 import func, jfunc, ufunc
# https://pubs.usgs.gov/of/2005/1424/
TEST_ARRAY_SIZE = 100
TEST_CASES = [
# F4
(0x40800000, 1.000000),
(0xC0800000, -1.000000),
(0x41600000, 3.500000),
(0xC1600000, -3.500000),
(0x41490FD0, 3.141590),
(0xC1490FD0, -3.141590),
(0x7DF0BDC2, 9.9999999E+36),
(0xFDF0BDC2, -9.9999999E+36),
(0x03081CEA, 9.9999999E-38),
(0x83081CEA, -9.9999999E-38),
(0x409E0652, 1.234568),
(0xC09E0652, -1.234568),
(0x7FFFFFFF, 1.7014118e+38), # last two not from reference
(0xFFFFFFFF, -1.7014118e+38),
]
tests = []
for val_in, val_out in TEST_CASES:
tests.append((np.uint32(val_in), pytest.approx(val_out)))
@pytest.mark.parametrize('val_in, val_out', tests)
def test_func(val_in, val_out):
print('func', val_in, val_out)
assert func(val_in) == val_out
@pytest.mark.parametrize('val_in, val_out', tests)
def test_njit(val_in, val_out):
print('jfunc', val_in, val_out)
assert jfunc(val_in) == val_out
@pytest.mark.parametrize('val_in, val_out', tests)
def test_vectorize(val_in, val_out):
print('ufunc', val_in, val_out)
data = np.array([val_in] * TEST_ARRAY_SIZE)
expected = [val_out] * TEST_ARRAY_SIZE
assert all([a == b for a, b in zip_longest(ufunc(data), expected)])
```
#### File: float-interpreter/tests/test_ibm64.py
```python
import pytest
import numpy as np
from itertools import zip_longest
from typeconvert.types.ibm64 import func, jfunc, ufunc
# Reference:
# https://en.wikipedia.org/wiki/IBM_hexadecimal_floating-point
TEST_ARRAY_SIZE = 100
TEST_CASES = [
(0x0010000000000000, 5.397605e-79),
(0x8010000000000000, -5.397605e-79),
(0x0000000000000000, 0.0),
(0x401999999999999A, 0.1),
(0xC01999999999999A, -0.1),
]
tests = []
for val_in, val_out in TEST_CASES:
tests.append((np.uint64(val_in), pytest.approx(val_out)))
@pytest.mark.parametrize('val_in, val_out', tests)
def test_func(val_in, val_out):
print('func', val_in, val_out)
assert func(val_in) == val_out
@pytest.mark.parametrize('val_in, val_out', tests)
def test_njit(val_in, val_out):
print('jfunc', val_in, val_out)
assert jfunc(val_in) == val_out
@pytest.mark.parametrize('val_in, val_out', tests)
def test_vectorize(val_in, val_out):
print('ufunc', val_in, val_out)
data = np.array([val_in] * TEST_ARRAY_SIZE)
expected = [val_out] * TEST_ARRAY_SIZE
assert all([a == b for a, b in zip_longest(ufunc(data), expected)])
```
#### File: float-interpreter/tests/test_ti32.py
```python
import pytest
import numpy as np
from itertools import zip_longest
from typeconvert.types.ti32 import func, jfunc, ufunc
TEST_ARRAY_SIZE = 100
# References:
# https://www.ti.com/lit/an/spra400/spra400.pdf
# https://stackoverflow.com/questions/64687130/convert-ti-tms320c30-32-bits-float-to-ieee-float-in-python
TEST_CASES = (
(0x7F, 0, 0b11111111111111111111111, (2-2**-23) * 2**127),
(0x7F, 0, 0b11111111111111111111110, (2-2**-22) * 2**127),
(0x7F, 0, 0b11111111111111111111101, (2-2**-21+2**-23) * 2**127),
(0x7F, 0, 0b11111111111111111111100, (2-2**-21) * 2**127),
(0x7F, 0, 0b00000000000000000000000, 2**127),
(0x7E, 0, 0b11111111111111111111111, (2-2**-23) * 2**126),
(0x7E, 0, 0b11111111111111111111110, (2-2**-22) * 2**126),
(0x7E, 0, 0b11111111111111111111101, (2-2**-21+2**-23) * 2**126),
(0x00, 0, 0b00000000000000000000000, 1),
(0xFF, 0, 0b11111111111111111111111, 1-2**-24),
(0xFF, 0, 0b11111111111111111111110, 1-2**-23),
(0xFF, 0, 0b11111111111111111111101, 1-2**-22+2**-24),
(0xFF, 0, 0b00000000000000000000000, 2**-1),
(0xFE, 0, 0b11111111111111111111111, (2-2**-23) * 2**-2),
(0xFE, 0, 0b11111111111111111111110, (2-2**-22) * 2**-2),
(0xFE, 0, 0b11111111111111111111101, (2-2**-21+2**-23) * 2**-2),
(0x82, 0, 0b00000000000000000000000, 2**-126),
(0x81, 0, 0b11111111111111111111111, (2-2**-23) * 2**-127),
(0x81, 0, 0b11111111111111111111110, (2-2**-22) * 2**-127),
(0x81, 0, 0b11111111111111111111101, (2-2**-21+2**-23) * 2**-127),
(0x81, 0, 0b11111111111111111111100, (2-2**-21) * 2**-127),
(0x81, 0, 0b00000000000000000000010, (1+2**-22) * 2**-127),
(0x81, 0, 0b00000000000000000000001, (1+2**-23) * 2**-127),
(0x81, 0, 0b00000000000000000000000, 2**-127),
# e = -128 implies zero
(0x80, 0, 0b11111111111111111111111, 0.0),
(0x80, 0, 0b11111111111111111111110, 0.0),
(0x80, 0, 0b11111111111111111111101, 0.0),
(0x80, 0, 0b00000000000000000000001, 0.0),
(0x80, 0, 0b00000000000000000000000, 0.0),
(0x80, 1, 0b11111111111111111111111, 0.0),
(0x80, 1, 0b11111111111111111111110, 0.0),
(0x80, 1, 0b11111111111111111111101, 0.0),
(0x80, 1, 0b00000000000000000000011, 0.0),
(0x80, 1, 0b00000000000000000000010, 0.0),
(0x80, 1, 0b00000000000000000000001, 0.0),
(0x80, 1, 0b00000000000000000000000, 0.0),
(0x81, 1, 0b11111111111111111111111, (-1-2**-23) * 2**-127),
(0x81, 1, 0b11111111111111111111110, (-1-2**-22) * 2**-127),
(0x81, 1, 0b11111111111111111111101, (-1-2**-21+2**-23) * 2**-127),
(0x81, 1, 0b00000000000000000000010, (-2+2**-22) * 2**-127),
(0x81, 1, 0b00000000000000000000001, (-2+2**-23) * 2**-127),
(0x81, 1, 0b00000000000000000000000, - 2**-126),
(0x82, 1, 0b11111111111111111111111, (-1-2**-23) * 2**-126),
(0x82, 1, 0b11111111111111111111110, (-1-2**-22) * 2**-126),
(0x82, 1, 0b11111111111111111111101, (-1-2**-21+2**-23) * 2**-126),
(0xFF, 1, 0b00000000000000000000001, (-1+2**-24)),
(0xFF, 1, 0b00000000000000000000000, -1.0),
# These three tests appear to be wrong,
# exp = 0 should yield 2**0, not 2**-1
# (0x00, 1, 0b11111111111111111111111, (-1-2**-23) * 2**-1),
# (0x00, 1, 0b11111111111111111111110, (-1-2**-22) * 2**-1),
# (0x00, 1, 0b11111111111111111111101, (-1-2**-21+2**-23) * 2**-1), #
(0xFF, 1, 0b11111111111111111111111, (-1-2**-23) * 2**-1),
(0xFF, 1, 0b11111111111111111111110, (-1-2**-22) * 2**-1),
(0xFF, 1, 0b11111111111111111111101, (-1-2**-21+2**-23) * 2**-1),
(0x00, 1, 0b11111111111111111111111, (-1-2**-23) * 2**0),
(0x00, 1, 0b11111111111111111111110, (-1-2**-22) * 2**0),
(0x00, 1, 0b11111111111111111111101, (-1-2**-21+2**-23) * 2**0),
(0x00, 1, 0b00000000000000000000001, (-2+2**-23)),
(0x00, 1, 0b00000000000000000000000, -2),
(0x01, 1, 0b11111111111111111111111, -2-2**-22),
(0x01, 1, 0b11111111111111111111110, -2-2**-21),
(0x01, 1, 0b11111111111111111111101, -2-2**-20+2**-22),
(0x7F, 1, 0b00000000000000000000001, (-2+2**-23) * 2**127),
(0x7F, 1, 0b00000000000000000000000, - 2**128),
)
tests = []
for e, s, m, val_out in TEST_CASES:
val_in = np.uint32((e << 24) + (s << 23) + m)
tests.append((val_in, pytest.approx(val_out)))
@pytest.mark.parametrize('val_in, val_out', tests)
def test_func(val_in, val_out):
print('func')
print(f'val_in = {val_in:08x}')
assert func(val_in) == val_out
@pytest.mark.parametrize('val_in, val_out', tests)
def test_njit(val_in, val_out):
print('jfunc', val_in, val_out)
assert jfunc(val_in) == val_out
@pytest.mark.parametrize('val_in, val_out', tests)
def test_vectorize(val_in, val_out):
print('ufunc')
data = np.array([val_in] * TEST_ARRAY_SIZE)
expected = [val_out] * TEST_ARRAY_SIZE
assert all([a == b for a, b in zip_longest(ufunc(data), expected)])
``` |
{
"source": "jolth/gpservi-2.0.0",
"score": 3
} |
#### File: gpservi-2.0.0/DB/pgSQL.py
```python
import sys
import traceback
import psycopg2 as pgsql
def connection(args=None):
"""
args, puede ser una cadena con todos los datos para conectarse a la base de datos o
simplemente enviarse sin datos, para lo cual tomara la configuración por defecto
almacenada en el fichero de configuración "config.cfg" (en la sección [DATABASE]).
así:
Usage:
>>> from DB.pgSQL import connection
>>> connection("dbname='test010' user='postgres' host='localhost' password='<PASSWORD>'") # Con argumentos
>>> connection() # Sin argumento
<connection object at 0xb715a72c; dsn: 'dbname='test009' user='postgres' host='localhost' password=<PASSWORD>', closed: 0>
>>> conn = connection()
>>> cursor = conn.cursor()
>>> cursor.execute("select * from gps")
>>> print cursor.fetchall()
[(11, 'GPS0003', 2, False, datetime.datetime(2012, 7, 13, 8, 11, 31, 945952, tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=1140, name=None))), ...]
>>>
"""
if args is None:
from Load.loadconfig import load
args = {}
args['dbname'] = load('DATABASE', 'DBNAME')
args['user'] = load('DATABASE', 'USER')
args['host'] = load('DATABASE', 'HOST')
args['password'] = load('DATABASE', 'PASSWORD')
args = " ".join(["%s=\'%s\'" % (k, v) for k, v in args.items()])
# Conexión a la base de datos:
try:
conn = pgsql.connect(args)
except pgsql.OperationalError, e:
print >> sys.stderr, "\nNo se pudo poner en marcha la base de datos.\n"
print >> sys.stderr, e
print >> sys.stdout, 'Error: Revisar el archivo de error.log'
sys.exit(1)
# Retornamos la conexión
return conn
class PgSQL(object):
"""
Crea un obejto conexión para la base de datos especificada.
Recibe los mismos datos que la función connection(args=None). Por lo tanto, si se quiere usar la
conexión a la base de datos por defecto se debe llamar a PgSQL() sin argumentos, asi:
>>> conn = pgSQL.PgSQL()
Usage:
>>> import pgSQL
>>> db = pgSQL.PgSQL("dbname='test009' user='postgres' host='localhost' password='<PASSWORD>'")
>>> db
<pgSQL.PgSQL object at 0xb740e5ec>
>>> db.conn
<connection object at 0xb718a72c; dsn: 'dbname='test009' user='postgres' host='localhost' password=<PASSWORD>', closed: 0>
>>> dir(db.conn)
['DataError', 'DatabaseError', 'Error', 'IntegrityError', 'InterfaceError', 'InternalError', 'NotSupportedError', \
'OperationalError', 'ProgrammingError', 'Warning', '__class__', '__delattr__', '__doc__', '__format__', '__getattribute__', \
'__hash__', '__init__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', \
'__subclasshook__', 'async', 'binary_types', 'close', 'closed', 'commit', 'cursor', 'dsn', 'encoding', 'fileno', 'get_backend_pid', \
'get_parameter_status', 'get_transaction_status', 'isexecuting', 'isolation_level', 'lobject', 'notices', 'notifies', 'poll', \
'protocol_version', 'reset', 'rollback', 'server_version', 'set_client_encoding', 'set_isolation_level', 'status', 'string_types']
>>>
>>> db.cur
<cursor object at 0x83916bc; closed: 0>
>>>
"""
def __init__(self, args=None):
if args is not None: self.conn = connection(args)
else: self.conn = connection()
self.status = self.conn.status # Status
self.procpid = self.conn.get_backend_pid() # Get backend process id.
self.cur = self.conn.cursor() # Return a new cursor.
#print "procpid:", self.procpid#(Print de Prueba) # process id (Print de Prueba)
def exe(self, query, data=None):
"""
query, debe ser una cadena que contenga la Query SQL, así:
"SELECT * FROM gps"
data, debe ser una tupla o diccionario que cotenga los datos a
pasar a la Query, así:
"INSERT INTO test (num, data) VALUES (%s, %s)", (42, 'bar')
usage:
>>> import pgSQL
>>> db = pgSQL.PgSQL("dbname='test009' user='postgres' host='localhost' password='<PASSWORD>'")
>>> db.exe("SELECT * FROM gps")
>>> db.cur.fetchall()
[(11, 'GPS0003', 2, False, datetime.datetime(2012, 6, 10, 8, 11, 31, 945952, \
tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=1140, name=None))), (14, 'ANT051', \
1, False, datetime.datetime(2012, 7, 13, 9, 5, 42, 747214, tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=1140, name=None)))]
>>>
>>> db = pgSQL.PgSQL("dbname='test009' user='postgres' host='localhost' password='<PASSWORD>'")
>>> db.exe("SELECT * FROM gps WHERE id=14")
>>> db.cur.fetchone()
(14, 'ANT051', 1, False, datetime.datetime(2012, 7, 13, 9, 5, 42, 747214, tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=1140, name=None)))
>>>
>>> db.cur.fetchall()
[(14, 'ANT051', 1, False, datetime.datetime(2012, 7, 13, 9, 5, 42, 747214, tzinfo=psycopg2.tz.FixedOffsetTimezone(offset=1140, name=None)))]
>>>
>>> db = pgSQL.PgSQL("dbname='test009' user='postgres' host='localhost' password='<PASSWORD>'")
>>> db.exe("INSERT INTO gps (name, type) VALUES (%s, %s)", ('GPS0004', 2))
'INSERT 0 1'
>>>
# Si no, existen datos en una consoulta se puede manajar así:
>>> db = pgSQL.PgSQL("dbname='test009' user='postgres' host='localhost' password='<PASSWORD>'")
>>> r = db.exe("SELECT * FROM gps WHERE id=10")
Actualizando y Cerranda la conexión
>>> if r is not None: print "Record: ", r
... else: "No existen datos"
...
'No existen datos'
>>>
"""
record = None
if data is not None:
try:
self.cur.execute(query, data)
return self.cur.statusmessage # Deberia retornar 1, si el insert se realizo
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print >> sys.stderr, traceback.format_exc(exc_type)
#return self.conn.status
return self.conn.get_transaction_status() # Deberia retornar 0, si el insert no se realizo
finally:
print >> sys.stderr, "Actualizando y Cerranda la conexión"
# Realizamos los cambios en la DB
self.conn.commit()
# Cerramos la comunicación
self.cur.close()
self.conn.close()
else:
try:
self.cur.execute(query) # Execute the Query
record = self.cur.fetchall() or record
return record # Retornamo una lista con los resultados
# de la consulta o None si no obtine nada
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
#print >> sys.stderr, traceback.format_exc(exc_type)
print >> sys.stderr, "".join(traceback.format_exception_only(exc_type, exc_value))
#return self.conn.status
return self.conn.get_transaction_status() # Deberia retornar -1, si sucede un Error.
finally:
print >> sys.stderr, "Actualizando y Cerranda la conexión"
# Realizamos los cambios en la DB
self.conn.commit()
# Cerramos la comunicación
self.cur.close()
self.conn.close()
def exemany(self): pass
```
#### File: gpservi-2.0.0/SendDevices/SD.py
```python
import os
import sys
from datetime import datetime
class sendData:
"""
"""
def __init__(self, f, server, address, data):
#self.server = server
#self.address = address
self.data = data
self.file = f
#self.file = os.getcwd() + '/out'
with open(self.file, 'r') as f:
# Nombre del Dispositivo
self.device = f.readline().replace('\n', '');
#print >>sys.stderr, data
#if self.device == self.data['id']:
if data.find(self.device) is not -1:
print "################# Device a Enviar Data:", self.device
self.send(server, address)
def send(self, server, address):
#self.server.sendto('>SXADP10103148111847<', self.address)
with open(self.file, 'r') as f:
f.readline()
s = 0
for l in f:
# Send data
#server.sendto('>SXADP10103148111847<', self.address)
#sent = server.sendto('0×00 0×01 0×04 0×00'+l, address)
print >>sys.stderr, '<-' * 34
print >>sys.stderr, 'Fecha: %s' % datetime.now()
print >>sys.stderr, 'ID: %s' % self.device
print >>sys.stderr, 'IP/Port: %s/%s' % (address[0], address[1])
d = '\x00\x01\x04\x00 ' + l
#print >>sys.stderr, d
#sent = server.sendto('\x00\x01\x04\x00 '+l.replace('\n', ''), address)
sent = server.sendto(d, address)
print >>sys.stderr, 'Sending: "%s"' % l
#if l == 'AT$RESET' or l == 'at$reset'
# Eliminamos el fichero de Envio
#print >>sys.stderr, 'Eliminando %s' % self.file
os.remove(self.file)
# Receive response
print >>sys.stderr, 'Waiting to Receive:\n'
d, s = server.recvfrom(4096)
#print >>sys.stderr, 'received "%s"' % d
print >>sys.stderr, d
print >>sys.stderr, '->' * 34
#os.delete(self.file)
#os.delete(self.file)
``` |
{
"source": "joltwallet/jolt_wallet",
"score": 2
} |
#### File: joltwallet/jolt_wallet/idf_ext.py
```python
import os
import os.path as osp
import importlib.util
from pprint import pprint
def add_unit_test_app_extension(base_actions, project_path=os.getcwd()):
_spec = importlib.util.spec_from_file_location("idf_ext", osp.join(os.environ["IDF_PATH"], "tools", "unit-test-app", "idf_ext.py"))
_unit_test_app_idf_ext = importlib.util.module_from_spec(_spec)
_spec.loader.exec_module(_unit_test_app_idf_ext)
extensions = _unit_test_app_idf_ext.action_extensions(base_actions, project_path=project_path)
return extensions
def action_extensions(base_actions, project_path=os.getcwd()):
# Get the generic `build_target(target_name, ctx, args)`
build_target = base_actions["actions"]["all"]["callback"]
# Add the unit-testing-app extension
extensions = add_unit_test_app_extension(base_actions, project_path=project_path)
# All functions under extensions['global_action_callbacks']
# are always executed
def callback(ctx, global_args, tasks):
#if 'define_cache_entry' not in global_args:
# global_args.define_cache_entry = []
pass
extensions['global_action_callbacks'].append(callback)
# Functions registered under extensions["actions"]["my_command_name"]
# are invoked when `idf.py my_command_name` is ran
def build_test(build_name, ctx, args):
#args['no_warnings'] = True
# Always add the jolt_os test component
# This UNIT_TESTING is now available to CMake as a variable,
# but it must be parsed and passed along by the CMakeLists.txt
# to the actual build process.
args['define_cache_entry'].extend([
'TEST_ALL=0', 'TEST_COMPONENTS=jolt_os', 'UNIT_TESTING=1',
])
build_target("all", ctx, args)
def build_compress(build_name, ctx, args):
build_target(build_name, ctx, args)
extensions["actions"]["tests"] = {
"callback": build_test,
}
extensions["actions"]["compress"] = {
"callback": build_compress,
}
return extensions
```
#### File: jolt_wallet/pyutils/rng_dump.py
```python
import argparse
import os, sys
import serial
from time import sleep, time
import logging
# Configure logging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
log = logging.getLogger('elf2jelf')
def parse_args():
this_path = os.path.dirname(__file__)
default_output_fn = os.path.join(this_path, '..', 'rng.raw')
parser = argparse.ArgumentParser()
parser.add_argument('--output', type=str, default=default_output_fn,
help='File to write raw values to.')
parser.add_argument('-n', type=int, default=5000000,
help="Number of 32-bit ints to generate. Defaults to 5 million.")
parser.add_argument('--baudrate', type=int, default=230400,
help="Baudrate")
parser.add_argument('--port', type=str, default='/dev/ttyUSB0',
help="Serial Port")
parser.add_argument('--verbose', '-v', type=str, default='INFO',
help='''
Valid options:
SILENT
INFO
DEBUG
''')
args = parser.parse_args()
dargs = vars(args)
global log
logging_level = args.verbose.upper()
if logging_level == 'INFO':
log.setLevel(logging.INFO)
elif logging_level == 'DEBUG':
log.setLevel(logging.DEBUG)
else:
raise("Invalid Logging Verbosity")
return (args, dargs)
def consume(ser):
ser.read(size=100000, )
def main(args):
args, dargs = parse_args()
# need to clear DTR, then clear RTS to properly reset device
ser = serial.Serial(dsrdtr=True)
ser.baudrate = args.baudrate
ser.port = args.port
ser.dtr = 1
ser.rts = 1
ser.timeout = 3
ser.open()
ser.dtr = 0
ser.rts = 0
log.info("Waiting for device to boot/initialize")
sleep(5) # Wait for device to boot
consume(ser)
cmd = b"rng %d\n" % (args.n * 4)
log.debug('Sending "%s" command' % cmd.decode('utf-8'))
ser.write( cmd )
t_start = time()
with open(args.output, 'wb') as f:
echo = ser.read(size=len(cmd)+1)
assert( echo[:-2] == cmd[:-1] )
assert( echo[-2] == 13 ) # carriage return
assert( echo[-1] == 10 ) # newline
for i in range(args.n):
data = ser.read(size=4)
f.write(data)
t_end = time()
print("Dumped %d 32-bit RNG values in %.2f seconds" % (args.n, t_end - t_start))
if __name__ == '__main__':
main(sys.argv)
``` |
{
"source": "jolufan/donkeycar_test",
"score": 2
} |
#### File: donkeycar/management/base.py
```python
import argparse
import os
import shutil
import stat
import sys
import donkeycar as dk
from donkeycar.utils import *
PACKAGE_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
TEMPLATES_PATH = os.path.join(PACKAGE_PATH, 'templates')
HELP_CONFIG = 'location of config file to use. default: ./config.py'
def make_dir(path):
real_path = os.path.expanduser(path)
print('making dir ', real_path)
if not os.path.exists(real_path):
os.makedirs(real_path)
return real_path
def load_config(config_path):
'''
load a config from the given path
'''
conf = os.path.expanduser(config_path)
if not os.path.exists(conf):
print("No config file at location: %s. Add --config to specify\
location or run from dir containing config.py." % conf)
return None
try:
cfg = dk.load_config(conf)
except:
print("Exception while loading config from", conf)
return None
return cfg
class BaseCommand(object):
pass
class CreateCar(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='createcar', usage='%(prog)s [options]')
parser.add_argument('--path', default=None, help='path where to create car folder')
parser.add_argument('--template', default=None, help='name of car template to use')
parser.add_argument('--overwrite', action='store_true', help='should replace existing files')
parsed_args = parser.parse_args(args)
return parsed_args
def run(self, args):
args = self.parse_args(args)
self.create_car(path=args.path, template=args.template, overwrite=args.overwrite)
def create_car(self, path, template='complete', overwrite=False):
"""
This script sets up the folder structure for donkey to work.
It must run without donkey installed so that people installing with
docker can build the folder structure for docker to mount to.
"""
# these are neeeded incase None is passed as path
path = path or '~/mycar'
template = template or 'complete'
print("Creating car folder: {}".format(path))
path = make_dir(path)
print("Creating data & model folders.")
folders = ['models', 'data', 'logs']
folder_paths = [os.path.join(path, f) for f in folders]
for fp in folder_paths:
make_dir(fp)
# add car application and config files if they don't exist
app_template_path = os.path.join(TEMPLATES_PATH, template+'.py')
config_template_path = os.path.join(TEMPLATES_PATH, 'cfg_' + template + '.py')
myconfig_template_path = os.path.join(TEMPLATES_PATH, 'myconfig.py')
train_template_path = os.path.join(TEMPLATES_PATH, 'train.py')
calibrate_template_path = os.path.join(TEMPLATES_PATH, 'calibrate.py')
car_app_path = os.path.join(path, 'manage.py')
car_config_path = os.path.join(path, 'config.py')
mycar_config_path = os.path.join(path, 'myconfig.py')
train_app_path = os.path.join(path, 'train.py')
calibrate_app_path = os.path.join(path, 'calibrate.py')
if os.path.exists(car_app_path) and not overwrite:
print('Car app already exists. Delete it and rerun createcar to replace.')
else:
print("Copying car application template: {}".format(template))
shutil.copyfile(app_template_path, car_app_path)
os.chmod(car_app_path, stat.S_IRWXU)
if os.path.exists(car_config_path) and not overwrite:
print('Car config already exists. Delete it and rerun createcar to replace.')
else:
print("Copying car config defaults. Adjust these before starting your car.")
shutil.copyfile(config_template_path, car_config_path)
if os.path.exists(train_app_path) and not overwrite:
print('Train already exists. Delete it and rerun createcar to replace.')
else:
print("Copying train script. Adjust these before starting your car.")
shutil.copyfile(train_template_path, train_app_path)
os.chmod(train_app_path, stat.S_IRWXU)
if os.path.exists(calibrate_app_path) and not overwrite:
print('Calibrate already exists. Delete it and rerun createcar to replace.')
else:
print("Copying calibrate script. Adjust these before starting your car.")
shutil.copyfile(calibrate_template_path, calibrate_app_path)
os.chmod(calibrate_app_path, stat.S_IRWXU)
if not os.path.exists(mycar_config_path):
print("Copying my car config overrides")
shutil.copyfile(myconfig_template_path, mycar_config_path)
# now copy file contents from config to myconfig, with all lines
# commented out.
cfg = open(car_config_path, "rt")
mcfg = open(mycar_config_path, "at")
copy = False
for line in cfg:
if "import os" in line:
copy = True
if copy:
mcfg.write("# " + line)
cfg.close()
mcfg.close()
print("Donkey setup complete.")
class Train(BaseCommand):
def parse_args(self, args):
HELP_FRAMEWORK = 'the AI framework to use (tensorflow|pytorch). ' \
'Defaults to config.DEFAULT_AI_FRAMEWORK'
parser = argparse.ArgumentParser(prog='train', usage='%(prog)s [options]')
parser.add_argument('--tub', nargs='+', help='tub data for training')
parser.add_argument('--model', default=None, help='output model name')
parser.add_argument('--type', default=None, help='model type')
parser.add_argument('--config', default='./config.py', help=HELP_CONFIG)
parser.add_argument('--framework',
choices=['tensorflow', 'pytorch', None],
required=False,
help=HELP_FRAMEWORK)
parser.add_argument('--checkpoint', type=str,
help='location of checkpoint to resume training from')
parser.add_argument('--transfer', type=str, help='transfer model')
parser.add_argument('--comment', type=str,
help='comment added to model database - use '
'double quotes for multiple words')
parsed_args = parser.parse_args(args)
return parsed_args
def run(self, args):
args = self.parse_args(args)
args.tub = ','.join(args.tub)
cfg = load_config(args.config)
framework = args.framework if args.framework \
else getattr(cfg, 'DEFAULT_AI_FRAMEWORK', 'tensorflow')
if framework == 'tensorflow':
from donkeycar.pipeline.training import train
train(cfg, args.tub, args.model, args.type, args.transfer,
args.comment)
elif framework == 'pytorch':
from donkeycar.parts.pytorch.torch_train import train
train(cfg, args.tub, args.model, args.type,
checkpoint_path=args.checkpoint)
else:
print(f"Unrecognized framework: {framework}. Please specify one of "
f"'tensorflow' or 'pytorch'")
def execute_from_command_line():
"""
This is the function linked to the "donkey" terminal command.
"""
commands = {
'createcar': CreateCar,
'train': Train,
}
args = sys.argv[:]
if len(args) > 1 and args[1] in commands.keys():
command = commands[args[1]]
c = command()
c.run(args[2:])
else:
dk.utils.eprint('Usage: The available commands are:')
dk.utils.eprint(list(commands.keys()))
if __name__ == "__main__":
execute_from_command_line()
``` |
{
"source": "Jolumine/exp-trk",
"score": 2
} |
#### File: exp-trk/src/setup.py
```python
import os
import platform
import urllib.request
import requests
import logging
import threading
class Setup:
Add_Logo_Link = "https://cdn-icons-png.flaticon.com/512/992/992651.png"
Admin_Logo_Link = "https://cdn-icons-png.flaticon.com/512/2099/2099058.png"
Delete_Logo_Link = "https://cdn-icons-png.flaticon.com/512/3096/3096673.png"
Expense_Logo_Link = "https://cdn-icons-png.flaticon.com/512/61/61584.png"
Eye_Logo_Link = "https://cdn-icons-png.flaticon.com/512/159/159604.png"
Information_Logo_Link = "https://cdn-icons-png.flaticon.com/512/1/1176.png"
Graph_Icon_Link = "https://cdn-icons-png.flaticon.com/512/3121/3121571.png"
Login_Icon_Link = "https://cdn-icons-png.flaticon.com/512/126/126486.png"
Main_Logo_Link = "https://cdn-icons-png.flaticon.com/512/2/2144.png"
New_User_Logo_Link = "https://cdn1.iconfinder.com/data/icons/avatar-1-2/512/Add_User1-512.png"
Wrong_Logo_Link = "https://cdn-icons-png.flaticon.com/512/1828/1828774.png"
Mod_Logo_Link = "https://cdn-icons-png.flaticon.com/512/61/61456.png"
Export_Logo_Link = "https://cdn-icons-png.flaticon.com/512/151/151900.png"
Transfer_Logo_Link = "https://cdn-icons-png.flaticon.com/512/876/876784.png"
Warning_Logo_Link = "https://cdn-icons-png.flaticon.com/512/159/159469.png"
Stats_Logo_Link = "https://cdn-icons-png.flaticon.com/512/876/876171.png"
Next_Logo_Link = "https://cdn-icons-png.flaticon.com/512/318/318476.png"
Menu_Logo_Link = "https://cdn-icons-png.flaticon.com/512/56/56763.png"
Exit_Logo_Link = "https://cdn-icons-png.flaticon.com/512/1286/1286853.png"
Generate_Logo_Link = "https://cdn-icons-png.flaticon.com/128/3600/3600963.png"
Copy_Logo_Link = "https://cdn-icons-png.flaticon.com/512/60/60990.png"
def __init__(self):
self.root = f"C:\\Users\\{os.getlogin()}\\AppData\\local\\Expense_Tracker"
if self.get_os() == "Windows":
self.dirs()
thread1 = threading.Thread(target=self.download(), args=(None,))
thread2 = threading.Thread(target=self.create_log(), args=(None,))
thread1.start()
thread2.start()
else:
raise OSError("[!] Unsupported OS")
def dirs(self) -> None:
if self.check(self.root) == False:
os.chdir(f"C:/Users/{os.getlogin()}/AppData\\local")
os.mkdir("Expense_Tracker")
os.chdir(self.root)
os.mkdir("resources")
os.chdir(self.root+"\\resources")
os.mkdir("Logos")
os.chdir(self.root)
os.mkdir("cache")
os.mkdir("users")
os.mkdir("admin")
with open(self.root+"\\admin\\admindata.json", "w") as f:
f.close()
with open(self.root+"\\logs.log", "w") as f:
f.close()
logging.basicConfig(filename=f"C:\\Users\\{os.getlogin()}\\AppData\\local\\Expense_Tracker\\logs.log", encoding="utf-8", format='%(asctime)s %(message)s', level=logging.DEBUG)
logging.info("Folder creation [OK]")
def cache(self) -> None:
os.chdir(self.root+"\\cache")
with open(".cache", "w") as f:
f.close()
def get_os(self) -> str:
return platform.system()
def download(self) -> None:
if self.check_connection() and self.check(self.root+"\\resources\\Add_Logo.png") == False:
urllib.request.urlretrieve(self.Add_Logo_Link, f'{self.root+"/resources/"}Logos/Add_Logo.png')
urllib.request.urlretrieve(self.Admin_Logo_Link, f'{self.root+"/resources/"}Logos/Admin_Logo.png')
urllib.request.urlretrieve(self.Main_Logo_Link, f'{self.root+"/resources/"}Logos/Main_Logo.png')
urllib.request.urlretrieve(self.Delete_Logo_Link, f'{self.root+"/resources/"}Logos/Delete_Logo.png')
urllib.request.urlretrieve(self.Expense_Logo_Link, f'{self.root+"/resources/"}Logos/Expense_Logo.png')
urllib.request.urlretrieve(self.Eye_Logo_Link, f'{self.root+"/resources/"}Logos/Eye_Logo.jpg')
urllib.request.urlretrieve(self.Graph_Icon_Link, f'{self.root+"/resources/"}Logos/Graph_Logo.png')
urllib.request.urlretrieve(self.Login_Icon_Link, f'{self.root+"/resources/"}Logos/Login_Icon.png')
urllib.request.urlretrieve(self.New_User_Logo_Link, f'{self.root+"/resources/"}Logos/New_User_Logo.png')
urllib.request.urlretrieve(self.Information_Logo_Link, f'{self.root+"/resources/"}Logos/Info_Logo.png')
urllib.request.urlretrieve(self.Admin_Logo_Link, f'{self.root+"/resources/"}Logos/Welcome.png')
urllib.request.urlretrieve(self.Wrong_Logo_Link, f'{self.root+"/resources/"}Logos/Wrong_Icon.png')
urllib.request.urlretrieve(self.Mod_Logo_Link, f'{self.root+"/resources/"}Logos/Mod_Icon.png')
urllib.request.urlretrieve(self.Export_Logo_Link, f'{self.root+"/resources/"}Logos/Export_Icon.png')
urllib.request.urlretrieve(self.Transfer_Logo_Link, f'{self.root+"/resources/"}Logos/Transfer_Icon.png')
urllib.request.urlretrieve(self.Warning_Logo_Link, f'{self.root+"/resources/"}Logos/Warning_Logo.png')
urllib.request.urlretrieve(self.Stats_Logo_Link, f'{self.root+"/resources/"}Logos/Stats_Logo.png')
urllib.request.urlretrieve(self.Next_Logo_Link, f'{self.root+"/resources/"}Logos/Next_Logo.png')
urllib.request.urlretrieve(self.Menu_Logo_Link, f'{self.root+"/resources/"}Logos/Menu_Logo.png')
urllib.request.urlretrieve(self.Exit_Logo_Link, f'{self.root+"/resources/"}Logos/Exit_Logo.png')
urllib.request.urlretrieve(self.Generate_Logo_Link, f'{self.root+"/resources/"}Logos/Gen_Logo.png')
urllib.request.urlretrieve(self.Copy_Logo_Link, f'{self.root+"/resources/"}Logos/Copy_Logo.png')
logging.basicConfig(filename=f"C:\\Users\\{os.getlogin()}\\AppData\\local\\Expense_Tracker\\logs.log", encoding="utf-8", format='%(asctime)s %(message)s', level=logging.DEBUG)
logging.info("Logo Download [OK]")
def create_log(self) -> None:
if self.check(self.root+"//logs.log"):
pass
else:
with open(f"C:\\Users\\{os.getlogin()}\\AppData\\local\\Expense_Tracker\\logs.log", "w") as file:
file.close()
def check(self, folder) -> bool:
if os.path.exists(folder):
return True
else:
return False
def check_connection(self) -> bool:
try:
request = requests.get("https://www.google.com", timeout=5)
except Exception:
return False
else:
logging.info("Internet Connection [OK]")
return True
```
#### File: src/Templates/Home.py
```python
from PyQt5.QtWidgets import QDialog, QLabel, QHBoxLayout, QPushButton, QVBoxLayout, QFrame
from PyQt5.QtGui import QIcon
from .Expense.Home_Expense import Home_Expense
from .Revenue.Home_Income import Home_Income
from .Plot.Plot_Window import Plot_Window
from .Transfer.Transfer_Window import Transfer_Window
from .Passive_Income.Passive_Income import Passive_Income_Window
from .Passive_Expense.Passive_Expense import Passive_Expense_Window
from .Export.Export_Window import Export_Window
from .Profile_Stats.Statisitic_Window import Statistic_Window
from .Settings.Settings_Window import Settings_Window
from ..const import Main_Logo, Menu_Logo, Transfer_Logo, Stats_Logo, Export_Logo, Money_Logo, Graph_Icon, Settings_Logo
class Root_Window(QDialog):
def __init__(self, active_user, parent=None):
super().__init__(parent)
self.active_user = active_user
self.exp_label = QLabel(self)
self.exp_label.setText("Expenses Menu: ")
self.Expense = QPushButton("Expenses", self)
self.Expense.setToolTip("Click to open the Menu for Expenses")
self.Expense.setIcon(QIcon(Menu_Logo))
self.Expense.clicked.connect(self.expense)
self.rev_label = QLabel(self)
self.rev_label.setText("Revenue Menu: ")
self.Revenue = QPushButton("Revenues", self)
self.Revenue.setToolTip("Click to open the Menu for your Revenues")
self.Revenue.setIcon(QIcon(Menu_Logo))
self.Revenue.clicked.connect(self.revenue)
self.pass_label = QLabel(self)
self.pass_label.setText("Passive income: ")
self.pass_btn_income = QPushButton("Passive Income", self)
self.pass_btn_income.setToolTip("Click to add a source of passive income")
self.pass_btn_income.setIcon(QIcon(Money_Logo))
self.pass_btn_income.clicked.connect(self.passive_income)
self.pass_label_exp = QLabel(self)
self.pass_label_exp.setText("Passive Expense: ")
self.pass_btn_exp = QPushButton("Passive Expense", self)
self.pass_btn_exp.setToolTip("Click to add a source of passive expense")
self.pass_btn_exp.setIcon(QIcon(Money_Logo))
self.pass_btn_exp.clicked.connect(self.passive_expense)
self.plot_label = QLabel(self)
self.plot_label.setText("Plot Menu: ")
self.Plot = QPushButton("Plot", self)
self.Plot.setToolTip("Click to open the Plot Menu")
self.Plot.setIcon(QIcon(Graph_Icon))
self.Plot.clicked.connect(self.plot)
self.transfer_label = QLabel(self)
self.transfer_label.setText("Transfer Menu: ")
self.transfer_btn = QPushButton("Transfer", self)
self.transfer_btn.setToolTip("Click to open the transfer menu")
self.transfer_btn.setIcon(QIcon(Transfer_Logo))
self.transfer_btn.clicked.connect(self.transfer)
self.stats_label = QLabel(self)
self.stats_label.setText("Statistics: ")
self.stats_btn = QPushButton("Statistics", self)
self.stats_btn.setToolTip("Click to open the profile statistics")
self.stats_btn.setIcon(QIcon(Stats_Logo))
self.stats_btn.clicked.connect(self.stats)
self.export_label = QLabel(self)
self.export_label.setText("Export: ")
self.exportbtn = QPushButton("Export", self)
self.exportbtn.setToolTip("Click to open the Export Menu")
self.exportbtn.setIcon(QIcon(Export_Logo))
self.exportbtn.clicked.connect(self.export)
self.settings_label = QLabel(self)
self.settings_label.setText("Settings: ")
self.settingsbtn = QPushButton("Settings", self)
self.settingsbtn.setToolTip("Click to open the Settings")
self.settingsbtn.setIcon(QIcon(Settings_Logo))
self.settingsbtn.clicked.connect(self.settings)
expense_layout = QHBoxLayout()
expense_layout.addWidget(self.exp_label)
expense_layout.addWidget(self.Expense)
rev_layout = QHBoxLayout()
rev_layout.addWidget(self.rev_label)
rev_layout.addWidget(self.Revenue)
pass_layout = QHBoxLayout()
pass_layout.addWidget(self.pass_label)
pass_layout.addWidget(self.pass_btn_income)
pass_layout_exp = QHBoxLayout()
pass_layout_exp.addWidget(self.pass_label_exp)
pass_layout_exp.addWidget(self.pass_btn_exp)
plt_layout = QHBoxLayout()
plt_layout.addWidget(self.plot_label)
plt_layout.addWidget(self.Plot)
stats_layout = QHBoxLayout()
stats_layout.addWidget(self.stats_label)
stats_layout.addWidget(self.stats_btn)
trans_layout = QHBoxLayout()
trans_layout.addWidget(self.transfer_label)
trans_layout.addWidget(self.transfer_btn)
export_layout = QHBoxLayout()
export_layout.addWidget(self.export_label)
export_layout.addWidget(self.exportbtn)
settings_layout = QHBoxLayout()
settings_layout.addWidget(self.settings_label)
settings_layout.addWidget(self.settingsbtn)
# New Layout
space_item_1 = QFrame()
space_item_1.setStyleSheet("background-color: darkgray")
space_item_1.setFixedWidth(10)
space_item_2 = QFrame()
space_item_2.setStyleSheet("background-color: darkgray")
space_item_2.setFixedWidth(10)
space_item_3 = QFrame()
space_item_3.setStyleSheet("background-color: darkgray")
space_item_3.setFixedWidth(10)
space_item_4 = QFrame()
space_item_4.setStyleSheet("background-color: darkgray")
space_item_4.setFixedWidth(10)
col_1 = QVBoxLayout()
col_1.addLayout(expense_layout)
col_1.addLayout(rev_layout)
col_1.addLayout(plt_layout)
col_2 = QVBoxLayout()
col_2.addLayout(pass_layout)
col_2.addLayout(pass_layout_exp)
col_2.addLayout(trans_layout)
col_3 = QVBoxLayout()
col_3.addLayout(stats_layout)
col_3.addLayout(export_layout)
col_3.addLayout(settings_layout)
root_1 = QHBoxLayout()
root_1.addWidget(space_item_1)
root_1.addLayout(col_1)
root_1.addWidget(space_item_2)
root_1.addLayout(col_2)
root_1.addWidget(space_item_3)
root_1.addLayout(col_3)
root_1.addWidget(space_item_4)
self.setWindowTitle("Home")
self.setLayout(root_1)
self.setGeometry(300, 300, 500, 650)
self.setWindowIcon(QIcon(Main_Logo))
self.exec_()
def expense(self):
dialog = Home_Expense(self.active_user)
def revenue(self):
dialog = Home_Income(self.active_user)
def passive_income(self):
dialog = Passive_Income_Window(self.active_user)
def passive_expense(self):
dialog = Passive_Expense_Window(self.active_user)
def plot(self):
dialog = Plot_Window(self.active_user)
def transfer(self):
dialog = Transfer_Window(self.active_user)
def stats(self):
dialog = Statistic_Window(self.active_user)
def export(self):
dialog = Export_Window(self.active_user)
def settings(self):
dialog = Settings_Window(self.active_user)
```
#### File: Templates/Login/Login_Screen.py
```python
from PyQt5.QtWidgets import QPushButton, QLineEdit, QHBoxLayout, QVBoxLayout, QDialog, QLabel
from PyQt5.QtGui import QIcon
from ..Admin.Admin_Login import Admin_Login
from .New_User import New_User
from ..Home import Root_Window
from ...const import Login_Icon, Eye_Logo, Wrong_Logo, log_file, Exit_Logo, Next_Logo, New_User_Logo, Admin_Logo
from .security import check_login
from ...algos import get_folder_number
from ..Passive_Income.check_transfer import check_transfer_income
from ..Passive_Expense.check_transfer import check_transfer_expenses
import logging
class Login_Page(QDialog):
def __init__(self):
super().__init__()
self.username = QLineEdit(self)
self.username.setPlaceholderText("Username")
self.username.setToolTip("Enter your username")
self.password = QLineEdit(self)
self.password.setEchoMode(QLineEdit.Password)
self.password.setPlaceholderText("Password")
self.password.setToolTip("Enter your password")
self.showPassword = QPushButton(self)
self.showPassword.setIcon(QIcon(Eye_Logo))
self.showPassword.setToolTip("Click to show or hide your password")
self.showPassword.setFixedHeight(38)
self.showPassword.clicked.connect(self.showPassFunc)
self.login = QPushButton("Login", self)
self.login.setToolTip("Click to login")
self.login.setIcon(QIcon(Next_Logo))
self.login.clicked.connect(self.loginFunction)
self.admin = QPushButton("Admin", self)
self.admin.setToolTip("Click to get access to the Admin Window")
self.admin.setIcon(QIcon(Admin_Logo))
self.admin.clicked.connect(self.Admin)
self.new = QPushButton("New User", self)
self.new.setToolTip("Click to create a new User")
self.new.setIcon(QIcon(New_User_Logo))
self.new.clicked.connect(self.newUser)
self.exit = QPushButton("Exit", self)
self.exit.setToolTip("Click to exit")
self.exit.setIcon(QIcon(Exit_Logo))
self.exit.clicked.connect(self.close)
password = QHBoxLayout()
password.addWidget(self.password)
password.addWidget(self.showPassword)
layout = QVBoxLayout()
layout.addWidget(self.username)
layout.addLayout(password)
layout.addWidget(self.login)
layout.addWidget(self.admin)
layout.addWidget(self.new)
layout.addWidget(self.exit)
self.setWindowTitle("Login")
self.setGeometry(300, 300, 400, 700)
self.setWindowIcon(QIcon(Login_Icon))
self.setLayout(layout)
self.show()
def loginFunction(self) -> None:
username = self.username.text()
password = <PASSWORD>()
folder_number = get_folder_number(username, password)
if check_login(username, password):
self.close()
check_transfer_expenses(folder_number)
check_transfer_income(folder_number)
logging.basicConfig(filename=log_file, encoding="utf-8", format='%(asctime)s %(message)s', level=logging.DEBUG)
logging.info(f"{username} is logged in.")
root = Root_Window(folder_number)
else:
info = QDialog()
info.setWindowTitle("Login failed")
info.setWindowIcon(QIcon(Wrong_Logo))
info.setGeometry(300, 300, 320, 100)
infotext = QLabel(info)
infotext.setText("The Username or password is incorrect, please retry...")
info_layout = QVBoxLayout()
info_layout.addWidget(infotext)
info.setLayout(info_layout)
info.exec_()
logging.basicConfig(filename=log_file, encoding="utf-8", format='%(asctime)s %(message)s', level=logging.DEBUG)
logging.info(f"Login failed.")
def newUser(self):
interface = New_User()
def Admin(self):
dial = Admin_Login()
def showPassFunc(self):
if self.password.echoMode() == 0:
self.password.setEchoMode(QLineEdit.Password)
else:
self.password.setEchoMode(QLineEdit.Normal)
```
#### File: Templates/Login/security.py
```python
import json
import os
import hashlib
def check_login(username, password) -> bool:
path = f"C:\\Users\\{os.getlogin()}\\AppData\\local\\Expense_Tracker\\users"
all = os.listdir(path)
for i in all:
file = path + f"\\{i}\\data.json"
with open(file, "r") as f:
parsed = json.load(f)
if parsed["Username"] == username and parsed["Password"] == get_hash(password):
return True
else:
pass
def get_hash(message: str):
message = message.encode("utf-8")
hash = hashlib.sha3_512(message)
return hash.hexdigest()
```
#### File: Templates/Revenue/Add_Revenue.py
```python
from PyQt5.QtWidgets import QComboBox, QLabel, QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QTextEdit, QDoubleSpinBox
from PyQt5.QtGui import QIcon
from ...const import Add_Logo, months, years, types, days, field_names
import csv
class Add_Income(QDialog):
def __init__(self, active_user, parent=None):
super().__init__(parent)
self.active = active_user
self.amount_label = QLabel(self)
self.amount_label.setText("Amount: ")
self.amount = QDoubleSpinBox(self)
self.amount.setToolTip("Enter the amount")
self.amount.setMaximum(10000)
self.day_label = QLabel(self)
self.day_label.setText("Day: ")
self.day = QComboBox(self)
self.day.setToolTip("Enter the number of the day")
self.day.addItems(days)
self.month_label = QLabel(self)
self.month_label.setText("Month: ")
self.month = QComboBox(self)
self.month.setToolTip("Enter the actual month")
self.month.addItems(months)
self.year_label = QLabel(self)
self.year_label.setText("Year: ")
self.year = QComboBox(self)
self.year.setToolTip("Set the actual year")
self.year.addItems(years)
self.type_label = QLabel(self)
self.type_label.setText("Type: ")
self.type = QComboBox(self)
self.type.addItems(types)
self.descr_label = QLabel(self)
self.descr_label.setText("Description: ")
self.descr = QTextEdit(self)
self.descr.setFixedHeight(60)
self.descr.setFixedWidth(200)
self.add = QPushButton("Add", self)
self.add.setToolTip("Click to add the Expense")
self.add.clicked.connect(self.AddRev)
amount_layout = QHBoxLayout()
amount_layout.addWidget(self.amount_label)
amount_layout.addWidget(self.amount)
day_layout = QHBoxLayout()
day_layout.addWidget(self.day_label)
day_layout.addWidget(self.day)
month_layout = QHBoxLayout()
month_layout.addWidget(self.month_label)
month_layout.addWidget(self.month)
year_layout = QHBoxLayout()
year_layout.addWidget(self.year_label)
year_layout.addWidget(self.year)
type_layout = QHBoxLayout()
type_layout.addWidget(self.type_label)
type_layout.addWidget(self.type)
descr_layout = QHBoxLayout()
descr_layout.addWidget(self.descr_label)
descr_layout.addWidget(self.descr)
root = QVBoxLayout()
root.addLayout(amount_layout)
root.addLayout(day_layout)
root.addLayout(month_layout)
root.addLayout(year_layout)
root.addLayout(type_layout)
root.addLayout(descr_layout)
root.addWidget(self.add)
self.setWindowIcon(QIcon(Add_Logo))
self.setWindowTitle("Add Revenue")
self.setGeometry(500, 500, 400, 550)
self.setLayout(root)
self.exec_()
def AddRev(self):
amount = self.amount.value()
day = self.day.currentText()
month = self.month.currentText()
year = self.year.currentText()
typ = self.type.currentText()
descr = self.descr.toPlainText()
file = f"C:\\Users\\<NAME>\\AppData\\local\\Expense_Tracker\\users\\{self.active}\\income.csv"
with open(file, "a") as f:
writer = csv.DictWriter(f, fieldnames=field_names, lineterminator="\n")
data = {
"Amount": amount,
"Day": day,
"Month": month,
"Year": year,
"Type":typ,
"Description": descr
}
writer.writerow(data)
f.close()
self.close()
```
#### File: Templates/Revenue/Delete_Revenue.py
```python
from PyQt5.QtWidgets import QComboBox, QDialog, QPushButton, QVBoxLayout
from PyQt5.QtGui import QIcon
from ...const import field_names, Delete_Logo
import csv
class Delete_Revenue(QDialog):
def __init__(self, active_user,parent=None):
super().__init__(parent)
self.active = active_user
all = []
self.file = f"C:\\Users\\<NAME>\\AppData\\local\\Expense_Tracker\\users\\{self.active}\\income.csv"
with open(self.file, "r") as f:
reader = csv.DictReader(f, fieldnames=field_names)
for row in reader:
output = row["Amount"] + "€-" + row["Day"] + "-" + row["Month"] + "-" + row["Year"]
if output == "Amount€-Day-Month-Year":
pass
else:
all.append(output)
self.all = QComboBox(self)
self.all.setToolTip("Select the Revenue Amount you want to delete")
self.all.addItems(all)
self.delete = QPushButton("Delete", self)
self.delete.setToolTip("Click to delete the Amount")
self.delete.clicked.connect(self.DeleteRev)
layout = QVBoxLayout()
layout.addWidget(self.all)
layout.addWidget(self.delete)
self.setWindowIcon(QIcon(Delete_Logo))
self.setWindowTitle("Delete Revenue")
self.setGeometry(500, 500, 400, 550)
self.setLayout(layout)
self.exec_()
def DeleteRev(self):
selected = self.all.currentText()
splitted = selected.split("-")
update = []
with open(self.file, "r", newline='') as file:
reader = csv.DictReader(file)
for row in reader:
if row["Amount"] + "€" == splitted[0] and row["Day"] == splitted[1] and row["Month"] == splitted[2] and row["Year"] == splitted[3]:
pass
else:
update.append(row)
with open(self.file, "w", newline="") as f:
f.close()
with open(self.file, "a", newline="") as f:
f.write("Amount,Day,Month,Year,Type,Description\n")
writer = csv.DictWriter(f, fieldnames=field_names, delimiter=",")
for i in range(len(update)):
amount = update[i]["Amount"]
day = update[i]["Day"]
month = update[i]["Month"]
year = update[i]["Year"]
row = {"Amount": amount, "Day" : day, "Month" : month, "Year": year}
writer.writerow(row)
self.close()
``` |
{
"source": "joluoch/data_structures",
"score": 4
} |
#### File: data_structures/backtracking/backtracking.py
```python
def letterCombinations(self, digits: str) -> List[str]:
if not digits:
return []
ans = ['']
digitToLetters = ['', '', 'abc', 'def', 'ghi',
'jkl', 'mno', 'pqrs', 'tuv', 'wxyz']
for d in digits:
temp = []
for s in ans:
for c in digitToLetters[ord(d) - ord('0')]:
temp.append(s + c)
ans = temp
return ans
'''search word one '''
def exist(board,word):
row,col = len(board),len(board[0])
path = set()
def dfs(r,c,i):
if i == len(word):
return True
if (r <0 or c<0 or
r>=row or c>=col or
word[i] != board[r][c]or
(r,c) in path):
return False
path.add((r,c))
res = (dfs(r+1,c,i+1) or
dfs(r-1,c,i+1) or
dfs(r,c+1,i+1) or
dfs(r,c-1,i+1))
path.remove((r,c))
return res
for r in range(row):
for c in range(col):
if (dfs(r,c,0)): return True
return False
'''wildcard matching
Given an input string (s) and a pattern (p), implement wildcard pattern matching with support for '?' and '*' where:
'?' Matches any single character.
'*' Matches any sequence of characters (including the empty sequence).
The matching should cover the entire input string (not partial).
'''
def isMatch(self, s: str, p: str) -> bool:
s_len, p_len = len(s), len(p)
s_idx = p_idx = 0
star_idx = s_tmp_idx = -1
while s_idx < s_len:
# If the pattern character = string character
# or pattern character = '?'
if p_idx < p_len and p[p_idx] in ['?', s[s_idx]]:
s_idx += 1
p_idx += 1
# If pattern character = '*'
elif p_idx < p_len and p[p_idx] == '*':
# Check the situation
# when '*' matches no characters
star_idx = p_idx #save start position for possible back tracking
s_tmp_idx = s_idx # stringpointer
p_idx += 1
# If pattern character != string character
# or pattern is used up
# and there was no '*' character in pattern
elif star_idx == -1:
return False
# If pattern character != string character
# or pattern is used up
# and there was '*' character in pattern before
else:
# Backtrack: check the situation
# when '*' matches one more character
p_idx = star_idx + 1
s_idx = s_tmp_idx + 1
s_tmp_idx = s_idx
# The remaining characters in the pattern should all be '*' characters
return all(p[i] == '*' for i in range(p_idx, p_len))
'''regular expression matching
TIme : O(s.p)
top-down dynamic programming and memoization
'''
def isMatch(self, s: str, p: str) -> bool:
cache = {}
def dfs(i,j):
if (i,j) in cache:
return cache[(i,j)]
if i >= len(s) and j >= len(p):
return True
if j>= len(p):
return False
match = i < len(s) and (s[i] == p[j] or p[j] == '.')
if (j+1) < len(p) and p[j+1] == '*':
cache[(i,j)] = (dfs(i,j+2)or #dont use *
(match and dfs(i+1,j))) #use it
return cache[(i,j)]
if match:
cache [(i,j)] = dfs(i+1,j+1)
return cache[(i,j)]
cache[(i,j)] = False
return False
return dfs(0,0)
```
#### File: data_structures/linkedlist/flatten.py
```python
class Soltion:
def flatten(self,head):
if head != None:
self.flatten_rec(head)
return head
def flatten_rec(self,head):
curr,tail = head,head
while curr != None:
child = curr.child
next = curr.next
if child != None:
tail = self.flatten_rec(child)
tail.next = next
if next !=None:
next.prev = tail
curr.next = child
child.prev = curr
curr.child = None
curr = tail
else:
curr = next
if curr!= None:
tail = curr
return tail
#https://www.youtube.com/watch?v=RIyPgR7AF7M
```
#### File: data_structures/stacks_and_queue/consecutive1.py
```python
def findMaxConsecutiveOnes(self, nums) -> int:
ans = 0
sum = 0
for num in nums:
if num == 0:
sum = 0
else:
sum += num
ans = max(ans, sum)
return ans
```
#### File: data_structures/stacks_and_queue/evaluatepostfix.py
```python
def evalRPN(tokens: List[str]) -> int:
stack = [] #make a stack to hold the numberes and result calculation
for char in tokens: #iterate through the tokens
if char not in "+*-/": #if token is not an operator then it is a number so we add to stack
stack.append(int(char))
else: #if it is an operator then we pop the last two values from the stack and the left + right
r,l = stack.pop(),stack.pop()
if char == "*":
stack.append(l*r)
elif char == "/":
stack.append(int(float(l)/r))
elif char == "+":
stack.append(l+r)
elif char == "-":
stack.append(l-r)
return stack [-1]
#explanation : https://www.youtube.com/watch?v=3wGTlsLnZE4&list=PLLOxZwkBK52Akgqf4oSWPOQO9ROWS_9rc&index=20
``` |
{
"source": "jolurn/my-blog",
"score": 3
} |
#### File: jolurn/my-blog/app.py
```python
from api.hello_api import HelloWorld
# from api.categoria_api import CategoriaAPI
from flask import Flask
from flask_mysqldb import MySQL
from flask_restful import Resource, Api
app = Flask(__name__)
mysql = MySQL(app) #toma nuestro obje e inicializa / amplia nuestra funcuionalidad de nuestra app
api = Api(app)
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = '<PASSWORD>'
app.config['MYSQL_DB'] = 'myblog'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
class PostCategoriaAPI(Resource):
def get(self, id):
cur = mysql.connection.cursor()
cur.execute('''select p.titulo, c.nombre as categoria from myblog.post as p left join myblog.categoria as c on p.idcategoria = c.idcategoria where p.idcategoria= ''' + id )
result = cur.fetchall()
return str(result)
class CategoriaAPI(Resource):
def get(self):
cur = mysql.connection.cursor()
cur.execute("select * from categoria")
result = cur.fetchall()
return str(result)
class PostAPI(Resource):
def get(self):
cur = mysql.connection.cursor()
cur.execute("select * from post")
result = cur.fetchall()
return str(result)
api.add_resource(PostAPI, '/post')#agregar recurso
api.add_resource(CategoriaAPI, '/categoria')
api.add_resource(PostCategoriaAPI, '/categoria/<id>/post')
``` |
{
"source": "jolyDev/orto-ray",
"score": 2
} |
#### File: orto-ray/Core/DicomDataManager.py
```python
import os
import pydicom
import pydicom.uid
import numpy as np
import scipy
import scipy.ndimage
import cupyx.scipy.ndimage
import cupy as cp
from Core.Projection import View
from Core.Projection import view_to_int
import Algorithms.Trim
import cv2
import SimpleITK
def getSlice(data, index: int, view: View):
if view is View.FRONTAL and data.shape[0] >= index:
return data[index, :, :]
elif view is View.PROFILE and data.shape[1] >= index:
return data[:, index, :]
elif view is View.HORIZONTAL and data.shape[2] >= index:
return data[:, :, index]
class DicomDataManager():
class Rotation:
x: float = 0.0
y: float = 0.0
z: float = 0.0
def __init__(self, dicom_rooth_path):
self.listeners = []
self.origin = []
self.loadDicom(dicom_rooth_path)
self.rotation = DicomDataManager.Rotation()
self.updateBounds()
def updateBounds(self):
shape = self.modified.shape
self.x_max = shape[0] - 1
self.x_min = 0
self.y_max = shape[1] - 1
self.y_min = 0
self.z_max = shape[2] - 1
self.z_min = 0
def subscribe(self, listener):
self.listeners.append(listener)
def _dataChanged(self):
self.updateBounds()
for subscriber in self.listeners:
subscriber.on3DDataChanged()
def getMax(self, view: View):
return self.origin.shape[view_to_int(view)]
def getMaxModified(self, view: View):
return self.modified.shape[view_to_int(view)]
def getSlice(self, index: int, view: View):
return getSlice(self.modified, index, view)
def get(self):
return self.origin
def trim(self, x_max, x_min, y_max, y_min, z_max, z_min):
self.x_max = int(x_max)
self.x_min = int(x_min)
self.y_max = int(y_max)
self.y_min = int(y_min)
self.z_max = int(z_max)
self.z_min = int(z_min)
self.modified = Algorithms.Trim.Trim(self.origin, self.x_max, self.x_min, self.y_max, self.y_min, self.z_max, self.z_min)
self._dataChanged()
return self.modified
def rotate(self, angles):
init_min = self.origin.min()
init_max = self.origin.max()
# rotate around x axis
x = angles[0] - self.rotation.x
self.rotation.x = x
data_gpu = cp.asarray(self.modified)
rotated = cupyx.scipy.ndimage.rotate(data_gpu, x, (1, 2), order=1)
# rotate around y axis
y = angles[1] - self.rotation.y
self.rotation.y = y
rotated = cupyx.scipy.ndimage.rotate(rotated, y, (0, 2), order=1)
# rotate around z axis
z = angles[2] - self.rotation.z
self.rotation.z = z
rotated = cupyx.scipy.ndimage.rotate(rotated, z, (0, 1), order=1)
self.modified = np.clip(cp.asnumpy(rotated), init_min, init_max)
self.x_min = int(0)
self.y_min = int(0)
self.z_min = int(0)
self._dataChanged()
def getOrigin(self):
return self.origin
def resetModification(self):
self.modified = self.getOriginDeepCopy()
self._dataChanged()
def getOriginDeepCopy(self):
return np.copy(self.origin)
def getModified(self):
return self.modified
def setNewData(self, new_origin):
self.origin = new_origin
self.modified = self.getOriginDeepCopy()
self._dataChanged()
def denoise(self, data):
i = 0
for slice in data:
print()
return data#np.array(img_dilation, dtype=np.int64)
#cv2.imshow('Input', img)
#cv2.imshow('Erosion', img_erosion)
#cv2.imshow('Dilation', img_dilation)
#itk_data = SimpleITK.GetImageFromArray(data)
#itk_data = SimpleITK.CurvatureFlow(itk_data, 0.125, 5)
#itk_data = SimpleITK.VotingBinaryHoleFilling(image1=itk_data)
#majorityThreshold=1,
#backgroundValue=0,
#foregroundValue=labelWhiteMatter)
#return np.array(SimpleITK.GetArrayFromImage(itk_data), dtype=np.int64)
#data = scipy.ndimage.uniform_filter(data, size=1)
#data = scipy.ndimage.gaussian_filter(data, sigma=1)
#return data
def loadDicom(self, dicom_rooth_path):
old_data = self.getOriginDeepCopy()
try:
slices = [pydicom.read_file(dicom_rooth_path + '/' + s) for s in os.listdir(dicom_rooth_path)]
slices.sort(key=lambda x: int(x.InstanceNumber))
# pixel aspects, assuming all slices are the same
ps = slices[0].PixelSpacing
ss = slices[0].SliceThickness
ax_aspect = ps[1] / ps[0]
sag_aspect = ps[1] / ss
cor_aspect = ss / ps[0]
# create 3D array
img_shape = list(slices[0].pixel_array.shape)
img_shape.append(len(slices))
self.origin = np.zeros(img_shape)
kernel = np.ones((5, 5), np.uint8)
# fill 3D array with the images from the files
for i, s in enumerate(slices):
img2d = np.array(s.pixel_array, dtype=np.int64)
self.origin[:, :, i] = img2d
img_erosion = cv2.erode(self.origin[:, :, i], kernel, iterations=1)
img_dilation = cv2.dilate(img_erosion, kernel, iterations=1)
i = i + 1
if i == 50:
cv2.imshow('Input', img_dilation)
self.origin = np.array(self.origin, dtype=np.int64)
self.modified = self.getOriginDeepCopy()
self._dataChanged()
except Exception as e:
print(e)
self.origin = old_data
self.modified = old_data
```
#### File: orto-ray/Core/render3d.py
```python
import pyvista as pv
def volume(data):
dataX = pv.wrap(data)
dataX.plot(volume=True, eye_dome_lighting=True, parallel_projection=True) # Volume render
```
#### File: orto-ray/UI/Render3d.py
```python
import sys
# Setting the Qt bindings for QtPy
import os
os.environ["QT_API"] = "pyqt5"
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QRadioButton
import numpy as np
import pyvista as pv
from pyvistaqt import QtInteractor
import Style as styling
class Render3D(QMainWindow):
def __init__(self, data, parent=None, show=True):
QtWidgets.QMainWindow.__init__(self, parent)
# create the frame
self.frame = QtWidgets.QFrame()
vlayout = QtWidgets.QVBoxLayout()
# add the pyvista interactor object
self.plotter = QtInteractor(self.frame)
self.plotter.set_background("royalblue", top="aliceblue")
vlayout.addWidget(self.plotter.interactor)
self.frame.setLayout(vlayout)
self.setCentralWidget(self.frame)
self.update3D(data)
def update3D(self, data):
self.plotter.clear()
self.plotter.add_bounding_box()
self.plotter.add_volume(data, cmap="gray")
self.show()
def updateSlice(self, data):
self.plotter.clear()
self.plotter.add_bounding_box()
opacity = [0, 0.1, 0.1, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
self.plotter.add_volume(data, cmap="gray", opacity=opacity)
self.show()
```
#### File: orto-ray/UI/Slider.py
```python
from PyQt5.QtWidgets import (QWidget, QSlider, QHBoxLayout,
QLabel, QApplication)
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
import sys
class SliderImpl(QSlider):
def __init__(self, callback):
super().__init__()
self.callback = callback
def mouseReleaseEvent(self, event):
super(QSlider, self).mouseReleaseEvent(event)
self.callback()
class Slider(QWidget):
def __init__(self, parent, min, max, callback, release_callback):
super().__init__()
self.parent = parent
self.callback = callback
self.index = int((max - min ) / 2)
hbox = QHBoxLayout()
self.slider = SliderImpl(release_callback)
self.slider.setOrientation(Qt.Horizontal)
self.slider.setRange(min, max)
self.slider.setValue(self.index)
self.slider.setFocusPolicy(Qt.NoFocus)
self.slider.setPageStep(1)
self.slider.valueChanged.connect(self.onDataChanged)
self.label = QLabel(str(min), self)
self.label.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
self.label.setMinimumWidth(80)
self.label.setText(str(self.index))
hbox.addWidget(self.slider)
hbox.addSpacing(15)
hbox.addWidget(self.label)
self.setLayout(hbox)
def onDataChanged(self, value):
self.index = value
self.label.setText(str(value))
self.callback()
def setRange(self, min, max):
if min > self.slider.value():
self.index = min
self.slider.setValue(self.index)
self.callback()
if max < self.slider.value():
self.index = max
self.slider.setValue(self.index)
self.callback()
self.slider.setValue(self.index)
self.onDataChanged(self.index)
self.slider.setRange(min, max)
def getIndex(self):
return self.index
```
#### File: orto-ray/Utilities/logger.py
```python
import numpy as np
import matplotlib.pyplot as plt
def log_image(image):
plt.imshow(image)
plt.show()
``` |
{
"source": "jolynch/python-hqsom",
"score": 3
} |
#### File: python-hqsom/hqsom/letter_learning.py
```python
import numpy as np
from PIL import Image
import sys, traceback
import timeit
class Letter(object):
"""
Convenience class to read in the letter images and convert them
to useful numpy arrays
"""
def __init__(self, letter, noise=None):
image_loc = "data/alphabet/%s.png" % letter.capitalize()
im = Image.open(image_loc)
# Trim the data
self.data = np.array(im.convert("L")) / 255.0
self.data = np.compress([False] * 4 + [True] * 8 + [False] * 4,
self.data, axis=1)
self.data = np.compress([False] * 1 + [True] * 8 + [False] * 1,
self.data, axis=0)
self.data = self.data.reshape(1, 64)[0]
if noise is not None:
noise_array = np.random.normal(0.0, noise, self.data.shape)
self.data += noise_array
def __getitem__(self, index):
return self.data[index]
def score_letters(genome, setup_data=None):
"""
We want to form the concept of a letter that is noise tolerant, so we show the
network lots of A's, B's, C's and D's and then ask it to cluster noisy
variants. A basic purity metric is used as the scoring function
"""
try:
data = setup_data["data"]
letters = setup_data["types"]
output_size = genome.output_size
clusters = setup_data["clusters"]
timeout = setup_data["timeout"]
hqsom = genome.to_hierarchy()
total_repeat = 5
letter_repeat = 25
# Slow genomes are unfit
mock_data = [.1] * genome.input_size
single_update = timeit.timeit(lambda: hqsom.update(mock_data), number=1)
if single_update > timeout:
return -50
# Testing Strategy:
# Expose the network to the clean letter many times, then reset the
# RSOM difference matrices and do the same with the next letter.
# Do this total_repeat times, and that's all she wrote
for repeat in range(total_repeat):
for letter in letters:
for i in range(letter_repeat):
hqsom.update(data["clean-%s" % letter])
hqsom.reset()
all_results = {}
all_labels = [i for j in clusters for i in j]
for label in all_labels:
all_results[label] = hqsom.activation_vector(data[label],
continuous_output=True)
# Purity metric, we like overfitting because we're trying to encompass a
# lot of noise tolerance, so if we can make big clusters that separate the
# data, cool
purity = [0] * genome.output_size
def score_column(column, clusters, results):
cluster_scores = {}
for cluster in clusters:
cluster_score = 0
for name in cluster:
cluster_score += results[name][column]
cluster_scores[cluster] = cluster_score
cluster_scores = cluster_scores.items()
scores = [score for (i, score) in cluster_scores]
golden = np.argmax(scores)
return 2 * scores[golden] - np.sum(scores)
for i in range(output_size):
purity[i] = score_column(i, clusters, all_results)
for cluster in clusters:
print cluster[0], [hqsom.activation_vector(data[c]) for c in cluster]
return np.sum(purity)
except Exception as e:
print "Tester failed, could be related to genome"
print str(e)
print genome
traceback.print_exc(file=sys.stdout)
return -100
def setup_letters():
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
data = {}
clusters = []
for l in letters:
data["clean-%s" % l] = Letter(l).data
np.random.seed(15717)
for l in letters:
noisy_letters = []
for i in range(1):
data["noisy-%s-%d" % (l, i)] = Letter(l, .1).data
noisy_letters.append("noisy-%s-%d" % (l, i))
clusters.append(tuple(["clean-%s" % l] + noisy_letters))
return [l for l in letters], data, clusters
## "letter_first" output size 5
## Testing setup for the ABCD separation
## Noise of .1 stddev, 5 repeats of 25 exposure per letter
## 5 data points, 1 clean data point for training => letter_first
## "twelve" output size 16
## Testing setup for the ABCD EFGH IJKL separation
## Noise of .25 stddev, 3 repeats of 25 exposure per letter
## 10 data points, 1 clean data point for training
## high_noise output size 5
## Testing setup for the ABCD separation
## Noise of .3 stdev, 5 repeats of 25 exposure per letter
## 10 data points, 1 clean data point for training
```
#### File: python-hqsom/hqsom/som_test.py
```python
from som import *
from rsom import *
from hqsom import *
from hqsom_audio import *
from preproc.images import *
import preproc.audio as audio
import getopt, sys
import traceback
#import matplotlib.pyplot as plt
import pickle
import genetic_algo
tests = ("som","rsom", "hqsom", "hqsom_noise", "hqsom_noise_multiple", "image_gen", "hqsom_77_network", "hqsom_77", "audio")
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
use_pure = False
input_vectors = np.array([
[0.1 , 0.1 , 0.1 , 0.1],
[.01 ,.001 , 0.6 , 0.8 ],
[0.3 , 0.3 , 0.3 , 0.3],
[0.0 , 0.8 , 0.0 , 0.0],
[1.0 , 0.9 , 0.95, 0.82],
[0.35,0.95 , 0.24, 0.76]])
rate, spread, size, input_size = .4, .2, len(input_vectors), len(input_vectors[0])
def test_som():
som1 = SOM(input_size, size, pure=use_pure)
assert som1
#Test that a single vector can be trained on
print "-- Training on single input --"
for i in range(10):
som1.update(input_vectors[0], rate, spread)
print "Got MSE of {}".format(som1.mse(input_vectors[0]))
assert som1.mse(input_vectors[0]) < 1e-3
#Test that all vectors can be trained on in a 1:1 network input_size = size
som1 = SOM(input_size, size, pure=use_pure)
print "-- Training on all inputs --"
for i in range(1000):
som1.update(input_vectors[i%len(input_vectors)], rate, spread)
total_mse = 0
for inp in input_vectors:
total_mse += som1.mse(inp)
print "Got MSE of {}".format(som1.mse(inp))
assert som1.mse(inp) < .3
assert total_mse < .05 * len(input_vectors)
#Checking Activation vectors
activated = set()
for inp in input_vectors:
activated.add(som1.bmu(inp))
print "Applying signal: {}".format(inp)
print "Activating {}".format(som1.units[som1.bmu(inp)])
err = abs(len(activated) - len(input_vectors))
print activated
print "All activated units: {}".format(activated)
print "Error: {} vs max {}".format(err, .5*len(input_vectors))
assert err <= .5*len(input_vectors)
#For paper, disregard
#data = np.transpose(np.array([
#[.3 , .7 , .1 , .14 , .01],
#[.3 , .1 , .01 , .16 , .9],
#[.3 , .03 , .8 , .7 , .01]]))
#som1 = SOM(3,5,True)
#som1.units = data
#som1.update(np.array((.1,.1,.1)), .2, 1)
#print som1.units.transpose()
#I'm kind of unsure how to really test this ...
def test_rsom():
rsom1 = RSOM(input_size, size, pure=use_pure)
alpha = .3
#Test a time dependent sequence
print "-- Training on alternating values --"
for i in range(1000):
rsom1.update(input_vectors[i%2], rate, spread, alpha)
rsom1.update(input_vectors[2], rate, spread, alpha)
rsom1.reset()
assert rsom1.differences[0][0] == 0
for i in range(3):
print "Got MSE of {}".format(rsom1.mse(input_vectors[i]))
print "Activation vector: {}".format(rsom1.activation_vector(input_vectors[i], True))
assert rsom1.mse(input_vectors[i%2]) < .3
def test_hqsom():
test_data = np.array([
[0,0,0,0,0,0,0,0,0],
[1,1,1,0,0,0,0,0,0],
[0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,1,1,1],
[1,0,0,1,0,0,1,0,0],
[0,1,0,0,1,0,0,1,0],
[0,0,1,0,0,1,0,0,1]])
g1,g2,s1,s2,a = .1,.1,16,90,.1
hqsom = HQSOM(9,18,3, use_pure_implementation=use_pure)
def flush(num):
for l in range(num):
hqsom.update(test_data[0], g1,g2,s1,s2,a)
num_cycles, num_repeats = 25, 11
total_run_count, seq_count = num_cycles*num_repeats*9, 0
for j in range(num_cycles):
for i in range(num_repeats):
print "update {}/{}".format(seq_count,total_run_count)
hqsom.reset()
seq = ()
if i %2 == 0:
seq = (1,2,3,1,2,3)
else:
seq = (4,5,6,4,5,6)
for k in seq:
hqsom.update(test_data[k], g1, g2, s1, s2, a)
hqsom.reset()
seq_count += 9
c = [hqsom.activation_vector(t) for t in test_data]
print c
assert c[0] != c[1] and c[1] != c[4]
assert c[1] == c[2] and c[2] == c[3]
assert c[4] == c[5] and c[5] == c[6]
assert c[3] != c[4]
def test_hqsom_noise(noise_std=.1):
test_data = np.array([
[0,0,0,0,0,0,0,0,0],
[1,1,1,0,0,0,0,0,0],
[0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,1,1,1],
[1,0,0,1,0,0,1,0,0],
[0,1,0,0,1,0,0,1,0],
[0,0,1,0,0,1,0,0,1]])
#Add in gausian noise
noise = np.random.normal(0.0,noise_std,test_data.shape)
test_data = test_data + noise
g1,g2,s1,s2,a = .1,.1,16,90,.1
#Due to the noise we have to add many more map units
hqsom = HQSOM(9,18,3, use_pure_implementation=use_pure)
print "bleh"
def flush(num):
for l in range(num):
hqsom.update(test_data[0], g1,g2,s1,s2,a)
num_cycles, num_repeats = 25, 11
total_run_count, seq_count = num_cycles*num_repeats*9, 0
for j in range(num_cycles):
for i in range(num_repeats):
print "update {}/{}".format(seq_count,total_run_count)
hqsom.reset()
if i %2 == 0:
seq = (1,2,3,1,2,3)
else:
seq = (4,5,6,4,5,6)
for k in seq:
hqsom.update(test_data[k], g1, g2, s1, s2, a)
hqsom.reset()
seq_count += 9
#Re-do the test data to test on different noisy data
print "Generating different test data for activating"
test_data = np.array([
[0,0,0,0,0,0,0,0,0],
[1,1,1,0,0,0,0,0,0],
[0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,1,1,1],
[1,0,0,1,0,0,1,0,0],
[0,1,0,0,1,0,0,1,0],
[0,0,1,0,0,1,0,0,1]])
#Add in gausian noise
noise = np.random.normal(0.0,noise_std,test_data.shape)
test_data = test_data + noise
g1,g2,s1,s2,a = .1,.1,16,90,.1
c = [hqsom.activation_vector(t) for t in test_data]
print c
assert c[0] != c[1] and c[1] != c[4]
assert c[1] == c[2] and c[2] == c[3]
assert c[4] == c[5] and c[5] == c[6]
assert c[3] != c[4]
def test_hqsom_noise_multiple():
num_errors, num_tests, noise_std = 0, 100, .2
for i in range(num_tests):
try:
test_hqsom_noise(noise_std)
except:
num_errors += 1
print "Passed {} out of {}".format(num_tests-num_errors, num_tests)
assert num_errors < .25 * num_tests
def enumerate_spiral(l):
coords, coord, original_l = [], [0,0], l
while l > 0:
#Go down
for i in range(l):
if not tuple(coord) in coords:
coords.append(tuple(coord))
coord[1]+=1
#print "going down from {} to {}".format(coords[-1], coord)
if l < original_l:
l -= 1
#Go right
for i in range(l):
if not tuple(coord) in coords:
coords.append(tuple(coord))
coord[0]+=1
#print "going right from {} to {}".format(coords[-1], coord)
#Go up
for i in range(l):
if not tuple(coord) in coords:
coords.append(tuple(coord))
coord[1]-=1
#print "going up from {} to {}".format(coords[-1], coord)
l -= 1
#Go left
for i in range(l):
if not tuple(coord) in coords:
coords.append(tuple(coord))
coord[0]-=1
#print "going left from {} to {}".format(coords[-1], coord)
coords.append(coord)
return coords
def test_hqsom_77_network():
output_size =17
hqsom = PaperFig3Hierarchy(65,17,513,output_size, use_pure_implementation=use_pure)
g1,g2,g3,g4,s1,s2,s3,s4,a1,a2 = .1,.01,.1,.001, 16.0, 100.0, 4.0, 200.0, .1, .01
data_image = Square_Image(5,(1,1))
data = data_image.data()
hqsom.update(data,g1,g2,s1,s2,a1,g3,g4,s3,s4,a2)
print hqsom.activation_vector(data,False,True)
assert hqsom.activation_vector(data) != None
def test_hqsom_77():
#genetic_algo.Generate the test sequence, note that we must do a spiral exposure to get the
#correct temporal-spatial representations in the SOMS
#7x7 only has one possible test (cycled twice of course)
coord_test = {"large":[(7,0,0),(7,0,0)]}
#5x5 has 9 possible positions (cycled twice of course)
coord_test["medium"] = [(5,i,j) for (i,j) in enumerate_spiral(2)]
coord_test["medium"] = coord_test["medium"][::-1] + coord_test["medium"]
#3x3 has 25 possible positions (cycled twice of course)
coord_test["small"] = [(3,i,j) for (i,j) in enumerate_spiral(4)]
coord_test["small"] = coord_test["small"][::-1] + coord_test["small"]
#######The available data sets
square_data, diamond_data, x_data = [], [], []
#First we spiral out, then back in for each data set
for data_type,data_class,data_container in [("square", Square_Image, square_data),
("diamond", Diamond_Image, diamond_data),
("x", X_Image, x_data)]:
for data_set in ("large","medium", "small"):
for (w,x,y) in coord_test[data_set]:
if data_type == "diamond":
w,x,y = w/2, x+w/2, y+w/2
image_data = data_class(w,(x,y))
data_container.append(image_data.data())
image_data.save("data/{}_#{}#_".format(data_type, str(len(data_container)).zfill(2)))
blank_data = [Data_Image().data() for i in range(20)]
#print len(square_data)
#print len(diamond_data)
#print len(x_data)
#Paper settings
#Make sure we don't use any of our "improvements"
#bottom_som_size, bottom_rsom_size, top_som_size, output_size = 65,17,513,17
#hqsom = PaperFig3Hierarchy(bottom_som_size,
#bottom_rsom_size,
#top_som_size,output_size,
#use_pure_implementation = use_pure)
#g1,g2,g3,g4,s1,s2,s3,s4,a1,a2 = .1,.01,.1,.001, 16.0, 100.0, 4.0, 250.0, .1, .01
#run_name = "PAPER_RUN_GAUSSIAN_"
#num_cycles, data_sets, num_repeats = 150, [("SQUARE",square_data), ("DIAMOND",diamond_data), ("X",x_data)], 5
#Really good TWO classifier:
#bottom_som_size, bottom_rsom_size, top_som_size, output_size = 10,80,10,5
#hqsom = PaperFig3Hierarchy(bottom_som_size,
#bottom_rsom_size,
#top_som_size,output_size,
#use_pure_implementation = True)
#g1,g2,g3,g4,s1,s2,s3,s4,a1,a2 = .2,.4,.1,.5, 10.0, 80.0, 14.0, 100.0, .8, .01
#run_name = "TWO_CLASS_"
#num_cycles, data_sets, num_repeats = 1, [("SQUARE",square_data), ("DIAMOND",diamond_data), ("X",x_data)], 1
#Our settings
bottom_som_size, bottom_rsom_size, top_som_size, output_size = 40, 25, 150, 7
hqsom = PaperFig3Hierarchy(bottom_som_size,
bottom_rsom_size,
top_som_size,output_size,
use_pure_implementation=use_pure)
g1,g2,g3,g4,s1,s2,s3,s4,a1,a2 = 0.1,0.01,0.1,0.05,20.0,150.0,15.0,250.0,0.1,0.02
run_name = "REFERENCE_19_OUR_SETTINGS_"
num_cycles, data_sets, num_repeats = 50, [("SQUARE",square_data), ("DIAMOND",diamond_data), ("X",x_data)], 4
seq_num = 0
MAP_Image(hqsom.top_hqsom.rsom.units, "output/{}INITIAL_TOP_RSOM_".format(run_name)).save()
total_run_count = num_cycles * len(data_sets)*(len(data_sets[0][1])*num_repeats)
for i in range(num_cycles):
for data_type, data_set in data_sets:
for j in range(num_repeats):
MAP_Image(hqsom.top_hqsom.rsom.units,"output/{}TOP_RSOM_{}_{}_{}".format(run_name,i,data_type,j)).save()
for d in data_set:
hqsom.update(d,g1,g2,s1,s2,a1,g3,g4,s3,s4,a2)
print "{} update {}/{}".format(data_type, seq_num, total_run_count)
print "{} current BMU: {}".format(data_type, hqsom.activation_vector(d))
seq_num += 1
data_type = "BLANK"
#Instead of training on blank data
print "Resetting SOMS"
hqsom.reset()
MAP_Image(hqsom.top_hqsom.rsom.units,"output/{}TOP_RSOM_{}_{}".format(run_name,i,data_type)).save()
#for d in blank_data:
#hqsom.update(d,g1,g2,s1,s2,a1,g3,g4,s3,s4,a2)
#print "{} update {}/{}".format(data_type, seq_num, total_run_count)
#print "{} current BMU: {}".format(data_type, hqsom.activation_vector(d))
#seq_num += 1
print "Collecting Classification Data, please wait this can take time"
data_sets = [("BLANK", blank_data)]+data_sets
output_hash = {"BLANK":[0]*output_size,"SQUARE":[0]*output_size,"DIAMOND":[0]*output_size,"X":[0]*output_size}
for data_name, data_collection in data_sets:
for i in data_collection:
result = hqsom.activation_vector(i)
output_hash[data_name][result] += 1
print "Run: {}".format(run_name)
print "Using the parameters g1,g2,g3,g4,s1,s2,s3,s4,a1,a2 = {},{},{},{},{},{},{},{},{},{}".format(g1,g2,g3,g4,s1,s2,s3,s4,a1,a2)
print "Using {} cycles of each data set repeated {} times".format(num_cycles, num_repeats)
print "BSOM, BRSOM, TSOM, TRSOM sizes: {}, {}, {}, {}".format(bottom_som_size, bottom_rsom_size, top_som_size, output_size)
for data_name, data_collection in data_sets:
mode = np.argmax(output_hash[data_name])
num_items = float(len(data_collection))
print "#"*80
print "Data Set: {}".format(data_name)
print "Most Frequently Classified As (MODE): {}".format(mode)
results = np.array(output_hash[data_name])
print "Full Distribution over Final RSOM Map Space:"
print results / num_items
MAP_Image(hqsom.bottom_hqsom_list[5].rsom.units,"output/{}FINAL_MIDDLE_RSOM".format(run_name)).save()
#WE ONLY SUPPORT wave files of the <b>same bitrate</b>
def test_audio(hqsom=None):
print "Loading songs into memory"
song_rock = audio.Spectrogram("data/music/Californication.wav")
song_rock2 = audio.Spectrogram("data/music/ByWay.wav")
song_techno = audio.Spectrogram("data/music/Everybody.wav")
song_techno2 = audio.Spectrogram("data/music/DayNNight.wav")
song_classical = audio.Spectrogram("data/music/Bells.wav")
song_classical2 = audio.Spectrogram("data/music/Symp9.wav")
print "Done loading songs into memory"
songs = [
("Techno", song_techno),
("TechnoTEST", song_techno2),
("Classical", song_classical),
("ClassicalTEST", song_classical2),
("Rock", song_rock),
("RockTEST", song_rock2),
]
song_types = [i for (i,j) in songs]
num_seconds, test_length = .1, 10
#Get num_second second slices of each song, looking to a cache first
try:
(n,saved_songs,final_data) = pickle.load(open("cache.p", "rb"))
if not n == num_seconds or not saved_songs == tuple(song_types):
raise Exception
print "Found data in cache, skipping generation"
except:
print "genetic_algo.Generating ffts"
raw_data = dict([(i,None) for i in song_types])
for (song_type, song_file) in songs:
print "genetic_algo.Generating data on the fly for {} song".format(song_type)
fft_length = song_file.sample_rate * num_seconds
#To get a power of 2
fft_length = int(2**np.ceil(np.log(fft_length)/np.log(2)));
print "Using fft_length of {}".format(fft_length)
raw_data[song_type] = song_file.get_spectrogram(fft_length)
print "Reshaping ffts into length 128 inputs"
final_data = {}
for song_type in song_types:
data = raw_data[song_type]
new_data = np.zeros((data.shape[0], 128))
bucket_sum, spect = 0, None
for spect_index in range(len(data)):
print "{} of {} Spectrograms processed".format(spect_index, len(data))
spect = data[spect_index]
window_size = len(spect) / 128
bucket_sum = 0
for i in range(128):
#bucket_sum = np.mean(spect[i*window_size:i*window_size+window_size])
new_data[spect_index][i] = spect[i*window_size]
#new_data[spect_index] = new_data[spect_index] - min(new_data[spect_index])
#new_data[spect_index] = new_data[spect_index] / np.linalg.norm(new_data[spect_index])
final_data[song_type] = new_data
pickle.dump((num_seconds, tuple(song_types), final_data), open("cache.p","wb"))
"""
plt.matshow(np.transpose(final_data["Rock"]))
plt.title("Rock")
plt.matshow(np.transpose(final_data["Techno"]))
plt.title("Techno")
plt.matshow(np.transpose(final_data["Classical"]))
plt.title("Classical")
plt.matshow(np.transpose(final_data["ClassicalTEST"]))
plt.title("Classical_TEST_DATA")
plt.matshow(np.transpose(final_data["TechnoTEST"]))
plt.title("Techno_TEST_DATA")
plt.matshow(np.transpose(final_data["RockTEST"]))
plt.title("Rock_TEST_DATA")
"""
if hqsom is None:
output_size = 5
hqsom = Hierarchy1D(
LayerConf1D(2, 64, 128, 0,
50, 0.2, 200,
40, .7, 0.15, 100, use_pure),
LayerConf1D(2, 1, 2, 0,
50, 0.2, 200,
20, .7, 0.15, 100, use_pure),
LayerConf1D(1, 2, 2, 0,
32, 0.2, 200,
output_size, .05, 0.2, 100, use_pure),
)
else:
output_size = hqsom.output_size
hqsom = hqsom.to_hierarchy()
#hqsom = NaiveAudioClassifier(bottom_som_size,
#bottom_rsom_size,
#top_som_size,output_size,
#use_pure_implementation = True)
#hqsom = genetic_algo.Genome(128, output_size).to_hierarchy()
#genome = genetic_algo.Genome(128, 5, [genetic_algo.Gene(128, 1, [128, 1, 0.5349470927446156, 58, 0.16262059789324113, 93, 69, 0.38946495945845583, 0.18591242958088183, 449]),
# genetic_algo.Gene(1, 1, [1, 1, 0.9697823529658623, 67, 0.06338912516811035, 484, 5, 0.07069243885373111, 0.30821633466399, 312])])
#genome = genetic_algo.Genome(128, 5, [
# genetic_algo.Gene(128, 1, [128, 1, 0.8191182230079156, 86, 0.13323972043189236, 175, 31, 0.3806979377580392, 0.8121811036319838, 98]),
# genetic_algo.Gene(1, 1, [1, 1, 0.8727135450401478, 62, 0.3453597203536144, 121, 50, 0.755878448191539, 0.6818380459687157, 325]),
# genetic_algo.Gene(1, 1, [1, 1, 0.4174074007331876, 89, 0.7549203282530946, 50, 5, 0.7849685525193116, 0.5789786448249847, 263])
# ])
#hqsom = genome.to_hierarchy()
print hqsom.layer_configs
run_name = "AUDIO_TEST"
#Testing schema:
# 1) Expose to entirety of three songs
# 2) Pick 3 random sequences of test_length in size from each song, run through
# 3) Clear at each in between
seq_num = 0
num_cycles, num_repeats = 1, 1
total_run_count = num_cycles*sum([(len(final_data[x])) for x in song_types])
for i in range(num_cycles):
for song_type in song_types:
if song_type == "ClassicalTEST" or song_type == "TechnoTEST" or song_type == "RockTEST":
print "Skipping test data: {}".format(song_type)
continue
for spectrum in final_data[song_type]:
hqsom.update(spectrum)
#print hqsom.activation_vector(spectrum, True, True)
print "{} update {}/{}".format(song_type, seq_num, total_run_count)
seq_num += 1
print "Resetting RSOMs"
hqsom.reset()
total_run_count = num_cycles*2*len(song_types)*test_length
seq_num = 0
for i in range(num_cycles*2):
for song_type in song_types:
if song_type == "ClassicalTEST" or song_type == "TechnoTEST" or song_type == "RockTEST":
print "Skipping test data: {}".format(song_type)
continue
num_spectrograms = len(final_data[song_type])
r_index = np.random.randint(0,num_spectrograms-test_length)
for index in range(r_index, r_index+test_length):
hqsom.update(final_data[song_type][index])
#print hqsom.activation_vector(spectrum, False, True)
print "{} update {}/{}".format(song_type, seq_num, total_run_count)
seq_num += 1
print "Resetting RSOMs"
hqsom.reset()
print "Run: {}".format(run_name)
print "Using Network:"
print hqsom.layer_configs
print "num_cycles, num_repeats, num_seconds, test_length = {}, {}, {}, {}".format(num_cycles, num_repeats, num_seconds, test_length)
for data_name in song_types:
print "#"*80
print "Results for {}".format(data_name)
data_collection = final_data[data_name]
results =[0]*output_size
for spect in data_collection:
results[hqsom.activation_vector(spect)] += 1
t = sum(results)
results = [float(i)/t for i in results]
results = np.array(results)
print "Final Distribution Over Map Space"
print results
print "MODE: {}".format(np.argmax(results))
#plt.show()
if __name__ == "__main__":
try:
opts, args = getopt.getopt(sys.argv[1:], "t:l", ["list","test="])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
#So that we get reproduceable results
np.random.seed(15717)
for o,a in opts:
if o in ("-t", "--test"):
print "Running {} test:".format(a)
try:
eval("test_"+a)()
except Exception as e:
print e
traceback.print_exc(file=sys.stdout)
print "!!! ERROR !!!"
else:
print "SUCCESS"
elif o in ("-l", "--list"):
print "List of tests: {}".format(tests)
# print help information and exit:
if len(opts) == 0:
print "Running all Tests"
for test in tests:
print "#"*80
print "Running test on: {}".format(test)
print "-"*80
try:
eval("test_"+test)()
except Exception as e :
print e
traceback.print_exc(file=sys.stdout)
print "!!! ERROR !!!"
else:
print "SUCCESS"
print "#"*80
``` |
{
"source": "jolynch/python_performance_toolkit",
"score": 3
} |
#### File: notebooks/queueing_theory/simulator.py
```python
import random
import numpy as np
import simpy
class RequestSimulator(object):
""" Simulates a M/G/k process common in request processing (computing)
:param worker_desc: A tuple of (count, capacity) to construct workers with
:param local_balancer: A function which takes the current request number
and the list of workers and returns the index of the
worker to send the next request to
:param latency_fn: A parameterless function which returns the number of
milliseconds a request took to process
:param number_of_requests: The number of requests to run through the
simulator
:param request_per_s: The rate of requests per second.
"""
def __init__(
self, worker_desc, load_balancer, latency_fn,
number_of_requests, request_per_s
):
self.worker_desc = worker_desc
self.load_balancer = load_balancer
self.latency_fn = latency_fn
self.number_of_requests = int(number_of_requests)
self.request_interval_ms = 1. / (request_per_s / 1000.)
self.data = []
def simulate(self):
# Setup and start the simulation
random.seed(1)
np.random.seed(1)
self.env = simpy.Environment()
count, cap = self.worker_desc
self.workers = [
simpy.Resource(self.env, capacity=cap) for i in range(count)
]
self.env.process(self.generate_requests())
self.env.run()
def generate_requests(self):
for i in range(self.number_of_requests):
idx = self.load_balancer(i, self.workers)
worker = self.workers[idx]
response = self.process_request(
'Request%02d' % i, worker,
)
self.env.process(response)
# Exponential inter-arrival times == Poisson
arrival_interval = random.expovariate(
1.0 / self.request_interval_ms
)
yield self.env.timeout(arrival_interval)
def process_request(self, name, worker):
""" Request arrives, possibly queues, and then processes"""
t_arrive = self.env.now
with worker.request() as req:
yield req
t_start = self.env.now
t_queued = t_start - t_arrive
# Let the operation take w.e. amount of time the latency
# function tells us to
yield self.env.timeout(self.latency_fn())
t_done = self.env.now
t_processing = t_done - t_start
t_total_response = t_done - t_arrive
datum = (t_queued, t_processing, t_total_response)
self.data.append(datum)
def queue_size(resource):
return resource.count + len(resource.queue)
def random_lb(request_num, workers):
return random.randint(0, len(workers) - 1)
def rr_lb(request_num, workers):
return request_num % len(workers)
def choice_two_lb(request_num, workers):
r1 = random_lb(request_num, workers)
r2 = random_lb(request_num, workers)
if queue_size(workers[r1]) < queue_size(workers[r2]):
return r1
return r2
def choice_two_adjacent_lb(request_num, workers):
r1 = random_lb(request_num, workers)
if r1 + 2 >= len(workers):
r2 = r1 - 1
r3 = r1 - 2
else:
r2 = r1 + 1
r3 = r1 + 2
iq = [(queue_size(workers[i]), i) for i in (r1, r2, r3)]
return (sorted(iq)[0][1])
def shortest_queue_lb(request_num, workers):
idx = 0
for i in range(len(workers)):
if queue_size(workers[i]) < queue_size(workers[idx]):
idx = i
return idx
lb_algos = {
'choice_two': choice_two_lb,
'random': random_lb,
'roundrobin': rr_lb,
'JSQ': shortest_queue_lb,
}
def pareto(mean, shape):
# mean = scale * shape / (shape - 1)
# solve for scale given mean and shape (aka skew)
scale = mean - mean / shape
return lambda: ((np.random.pareto(shape) + 1) * scale)
def expon(mean):
return lambda: random.expovariate(1.0 / mean)
# Simulate the various choices
NUM_REQUESTS = 50000
QPS = 20000
AVG_RESPONSE_MS = 0.4
SERVERS = 10
def run_simulation(
worker_desc, load_balancer, num_requests, request_per_s, latency_fn
):
simulator = RequestSimulator(
worker_desc, load_balancer, latency_fn,
num_requests, request_per_s
)
simulator.simulate()
return simulator.data
# M/G/k queue mean slowdown
for i in run_simulation(
(1, SERVERS), rr_lb, NUM_REQUESTS, QPS, pareto(AVG_RESPONSE_MS, 2)):
print(i[2] / i[1])
```
#### File: queueing_theory/src/latency_aware_simulator.py
```python
import random
from collections import namedtuple
import numpy as np
import simpy
LatencyDatum = namedtuple(
'LatencyDatum',
('t_queued', 't_processing', 't_total')
)
class LatencyAwareRequestSimulator(object):
""" Simulates a M/G/k process common in request processing (computing)
:param worker_desc: A list of ints of capacities to construct workers with
:param local_balancer: A function which takes the current request number
the list of workers and the request time and returns the index of the
worker to send the next request to
:param latency_fn: A function which takes the curent request number and
returns the number of milliseconds a request took to process
:param number_of_requests: The number of requests to run through the
simulator
:param request_per_s: The rate of requests per second.
"""
def __init__(
self, worker_desc, load_balancer, latency_fn,
number_of_requests, request_per_s):
self.worker_desc = worker_desc
self.load_balancer = load_balancer
self.latency_fn = latency_fn
self.number_of_requests = int(number_of_requests)
self.request_interval_ms = 1. / (request_per_s / 1000.)
self.data = []
self.requests_per_worker = {}
def simulate(self):
# Setup and start the simulation
random.seed(1)
np.random.seed(1)
self.env = simpy.Environment()
self.workers = []
idx = 0
for cap in self.worker_desc:
self.workers.append(simpy.Resource(self.env, capacity=cap))
self.requests_per_worker[idx] = 0
idx += 1
self.env.process(self.generate_requests())
self.env.run()
def generate_requests(self):
for i in range(self.number_of_requests):
t_processing = self.latency_fn(i)
idx = self.load_balancer(i, self.workers, t_processing)
self.requests_per_worker[idx] += 1
worker = self.workers[idx]
response = self.process_request(
i, worker, t_processing
)
self.env.process(response)
# Exponential inter-arrival times == Poisson
arrival_interval = random.expovariate(
1.0 / self.request_interval_ms
)
yield self.env.timeout(arrival_interval)
def process_request(self, request_id, worker, duration):
""" Request arrives, possibly queues, and then processes"""
t_arrive = self.env.now
with worker.request() as req:
yield req
t_start = self.env.now
t_queued = t_start - t_arrive
yield self.env.timeout(duration)
t_done = self.env.now
t_processing = t_done - t_start
t_total_response = t_done - t_arrive
datum = LatencyDatum(t_queued, t_processing, t_total_response)
self.data.append(datum)
def run_simulation(
worker_desc, load_balancer, num_requests, request_per_s, latency_fn):
simulator = LatencyAwareRequestSimulator(
worker_desc, load_balancer, latency_fn,
num_requests, request_per_s
)
simulator.simulate()
return simulator.data, simulator.requests_per_worker
```
#### File: queueing_theory/src/latency_distributions.py
```python
import random
import numpy as np
def zone(request):
return "abc"[request % 3]
def service(mean, slow, shape, slow_freq, slow_count):
scale = mean - mean / shape
scale_slow = slow - slow / shape
def latency(request, worker):
base = ((np.random.pareto(shape) + 1) * scale)
if (zone(request) != worker.zone):
base += 0.8
if (request % slow_freq) < slow_count:
add_l = ((np.random.pareto(shape) + 1) * scale_slow)
else:
add_l = 0
return base + add_l
return latency
def pareto(mean, shape):
# mean = scale * shape / (shape - 1)
# solve for scale given mean and shape (aka skew)
scale = mean - mean / shape
def latency(request, worker):
return ((np.random.pareto(shape) + 1) * scale)
return latency
def expon(mean):
def latency(request, worker):
return random.expovariate(1.0 / mean)
return latency
```
#### File: queueing_theory/src/speculation_simulator.py
```python
import random
from collections import namedtuple
import numpy as np
import simpy
LatencyDatum = namedtuple(
'LatencyDatum',
('t_queued', 't_processing', 't_total')
)
class SpeculatingRequestExecutor(object):
""" Simulates a M/G/k process common in request processing (computing) but
with always on speculation to another host
:param worker_desc: A tuple of (count, capacity) to construct workers with
:param local_balancer: A function which takes the current request number
and the list of workers and returns the index of the worker to
send the next request to
:param latency_fn: A function which takes the curent request number and the
worker that was assigned by the load balancer and returns the number of
milliseconds a request took to process
:param number_of_requests: The number of requests to run through the
simulator
:param request_per_s: The rate of requests per second.
"""
def __init__(
self, worker_desc, load_balancer, latency_fn,
number_of_requests, request_per_s):
self.worker_desc = worker_desc
self.load_balancer = load_balancer
self.latency_fn = latency_fn
self.number_of_requests = int(number_of_requests)
self.request_interval_ms = 1. / (request_per_s / 1000.)
self.received_first = {'1': 0, '2': 0}
self.data = []
def simulate(self):
# Setup and start the simulation
random.seed(1)
np.random.seed(1)
self.env = simpy.Environment()
count, cap = self.worker_desc
self.workers = [
simpy.Resource(self.env, capacity=cap) for i in range(count)
]
self.env.process(self.generate_requests())
self.env.run()
def generate_requests(self):
for i in range(self.number_of_requests):
workers = []
for j in range(2):
idx = self.load_balancer(i, self.workers)
workers.append(self.workers[idx])
response = self.process_request(
i, workers[0], workers[1],
)
self.env.process(response)
# Exponential inter-arrival times == Poisson
arrival_interval = random.expovariate(
1.0 / self.request_interval_ms
)
yield self.env.timeout(arrival_interval)
def process_request(self, request_id, worker1, worker2):
""" Request arrives, possibly queues, and then processes"""
t_arrive = self.env.now
req1 = worker1.request()
req2 = worker2.request()
try:
result = yield req1 | req2
if req1 in result:
self.received_first['1'] += 1
req2.cancel()
req2.resource.release(req2)
else:
self.received_first['2'] += 1
req1.cancel()
req1.resource.release(req1)
t_start = self.env.now
t_queued = t_start - t_arrive
# Let the operation take w.e. amount of time the latency
# function tells us to
yield self.env.timeout(self.latency_fn(request_id))
t_done = self.env.now
t_processing = t_done - t_start
t_total_response = t_done - t_arrive
self.data.append(LatencyDatum(
t_queued, t_processing, t_total_response))
finally:
worker1.release(req1)
worker2.release(req2)
def run_speculation(
worker_desc, load_balancer, num_requests, request_per_s, latency_fn):
simulator = SpeculatingRequestExecutor(
worker_desc, load_balancer, latency_fn,
num_requests, request_per_s
)
simulator.simulate()
return simulator.data, simulator.received_first
``` |
{
"source": "jolynch/service-capacity-modeling",
"score": 2
} |
#### File: service_capacity_modeling/hardware/__init__.py
```python
import json
import logging
import os
from typing import Dict
from typing import Optional
from service_capacity_modeling.interface import Drive
from service_capacity_modeling.interface import GlobalHardware
from service_capacity_modeling.interface import Hardware
from service_capacity_modeling.interface import Instance
from service_capacity_modeling.interface import Pricing
from service_capacity_modeling.interface import Service
logger = logging.getLogger(__name__)
def load_pricing(pricing: Dict) -> Pricing:
return Pricing(regions=pricing)
def load_hardware(hardware: Dict) -> Hardware:
return Hardware(**hardware)
def price_hardware(hardware: Hardware, pricing: Pricing) -> GlobalHardware:
regions: Dict[str, Hardware] = {}
for region, region_pricing in pricing.regions.items():
priced_instances: Dict[str, Instance] = {}
priced_drives: Dict[str, Drive] = {}
priced_services: Dict[str, Service] = {}
for instance, iprice in region_pricing.instances.items():
priced_instances[instance] = hardware.instances[instance].copy()
priced_instances[instance].annual_cost = iprice.annual_cost
if iprice.lifecycle is not None:
priced_instances[instance].lifecycle = iprice.lifecycle
for drive, dprice in region_pricing.drives.items():
priced_drives[drive] = hardware.drives[drive].copy()
priced_drives[drive].annual_cost_per_gib = dprice.annual_cost_per_gib
priced_drives[
drive
].annual_cost_per_read_io = dprice.annual_cost_per_read_io
priced_drives[
drive
].annual_cost_per_write_io = dprice.annual_cost_per_write_io
for svc, svc_price in region_pricing.services.items():
priced_services[svc] = hardware.services[svc].copy()
priced_services[svc].annual_cost_per_gib = svc_price.annual_cost_per_gib
priced_services[
svc
].annual_cost_per_read_io = svc_price.annual_cost_per_read_io
priced_services[
svc
].annual_cost_per_write_io = svc_price.annual_cost_per_write_io
regions[region] = Hardware(
instances=priced_instances,
drives=priced_drives,
services=priced_services,
zones_in_region=region_pricing.zones_in_region,
)
return GlobalHardware(regions=regions)
def load_hardware_from_disk(
price_path=os.environ.get("PRICE_PATH"),
shape_path=os.environ.get("HARDWARE_SHAPES"),
) -> GlobalHardware:
if price_path is not None and shape_path is not None:
with open(price_path) as pfd:
pricing = load_pricing(json.load(pfd))
with open(shape_path) as sfd:
hardware = load_hardware(json.load(sfd))
return price_hardware(hardware=hardware, pricing=pricing)
else:
return GlobalHardware(regions={})
def load_hardware_from_s3(bucket, path) -> GlobalHardware:
try:
# boto is a heavy dependency so we only want to take it if
# someone will be using it ...
try:
import boto3
import botocore
except ImportError:
return GlobalHardware(regions={})
s3 = boto3.resource("s3")
obj = s3.Object(bucket, path)
data = json.loads(obj.get()["Body"].read().decode("utf-8"))
return GlobalHardware(**data)
except botocore.exceptions.ClientError as exp:
logger.exception(exp)
return GlobalHardware(regions={})
class HardwareShapes:
def __init__(self):
self._hardware: Optional[GlobalHardware] = None
def load(self, new_hardware: GlobalHardware) -> None:
self._hardware = new_hardware
@property
def hardware(self) -> GlobalHardware:
if self._hardware is None:
from service_capacity_modeling.hardware.profiles import common_profiles
self._hardware = common_profiles["aws-3yr-reserved"]
return self._hardware
def region(self, region: str) -> Hardware:
return self.hardware.regions[region]
shapes: HardwareShapes = HardwareShapes()
```
#### File: org/netflix/stateless_java.py
```python
import math
from decimal import Decimal
from typing import Any
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import Tuple
from service_capacity_modeling.interface import AccessConsistency
from service_capacity_modeling.interface import AccessPattern
from service_capacity_modeling.interface import CapacityDesires
from service_capacity_modeling.interface import CapacityPlan
from service_capacity_modeling.interface import CapacityRegretParameters
from service_capacity_modeling.interface import CapacityRequirement
from service_capacity_modeling.interface import certain_float
from service_capacity_modeling.interface import certain_int
from service_capacity_modeling.interface import Clusters
from service_capacity_modeling.interface import Consistency
from service_capacity_modeling.interface import DataShape
from service_capacity_modeling.interface import Drive
from service_capacity_modeling.interface import FixedInterval
from service_capacity_modeling.interface import GlobalConsistency
from service_capacity_modeling.interface import Instance
from service_capacity_modeling.interface import Interval
from service_capacity_modeling.interface import QueryPattern
from service_capacity_modeling.interface import RegionClusterCapacity
from service_capacity_modeling.interface import RegionContext
from service_capacity_modeling.interface import Requirements
from service_capacity_modeling.models import CapacityModel
from service_capacity_modeling.models.common import compute_stateless_region
from service_capacity_modeling.models.common import simple_network_mbps
from service_capacity_modeling.models.common import sqrt_staffed_cores
def _estimate_java_app_requirement(
desires: CapacityDesires,
failover: bool = True,
jvm_memory_overhead: float = 1.2,
) -> CapacityRequirement:
needed_cores = sqrt_staffed_cores(desires)
needed_network_mbps = simple_network_mbps(desires)
if failover:
# For failover provision at 40% utilization
needed_cores = int(math.ceil(needed_cores * (1 / 0.4)))
needed_network_mbps = int(math.ceil(needed_network_mbps * (1 / 0.4)))
# Assume a Java application that can allocate about 1 GiB/s to heap
# per 2 GiB of heap with some amount of overhead on the network traffic.
# So for example if we have 512 MiB of network traffic there is some
# overhead associated with that...
# TODO: we should probably have the memory bandwidth attached to
# the instance type, e.g. Intel CPUs and AMD CPUs have different
# per core memory bandwidth.
mem_allocation_mbps = needed_network_mbps * jvm_memory_overhead
heap_allocation_gibps = (mem_allocation_mbps / 8) / 1024
network_heap = heap_allocation_gibps * 2
needed_memory_gib = network_heap
return CapacityRequirement(
requirement_type="java-app",
core_reference_ghz=desires.core_reference_ghz,
cpu_cores=certain_int(needed_cores),
mem_gib=certain_float(needed_memory_gib),
network_mbps=certain_float(needed_network_mbps),
context={
"network_heap_gib": network_heap,
"reserved_mem": desires.data_shape.reserved_instance_app_mem_gib,
},
)
def _estimate_java_app_region(
instance: Instance,
drive: Drive,
desires: CapacityDesires,
root_disk_gib: int = 10,
failover: bool = True,
jvm_memory_overhead: float = 2,
zones_per_region: int = 3,
) -> Optional[CapacityPlan]:
if drive.name != "gp2":
return None
requirement = _estimate_java_app_requirement(desires, failover, jvm_memory_overhead)
drive = drive.copy()
drive.size_gib = root_disk_gib
attached_drives = (drive,)
cluster: RegionClusterCapacity = compute_stateless_region(
instance=instance,
needed_cores=int(requirement.cpu_cores.mid),
needed_memory_gib=requirement.mem_gib.mid,
needed_network_mbps=requirement.network_mbps.mid,
core_reference_ghz=requirement.core_reference_ghz,
num_zones=zones_per_region,
)
cluster.cluster_type = "nflx-java-app"
cluster.attached_drives = attached_drives
# Generally don't want giant clusters
# Especially not above 1000 because some load balancers struggle
# with such large clusters
if cluster.count <= 256:
return CapacityPlan(
requirements=Requirements(regional=[requirement]),
candidate_clusters=Clusters(
total_annual_cost=round(Decimal(cluster.annual_cost), 2),
regional=[cluster],
zonal=[],
),
)
return None
class NflxJavaAppCapacityModel(CapacityModel):
@staticmethod
def capacity_plan(
instance: Instance,
drive: Drive,
context: RegionContext,
desires: CapacityDesires,
extra_model_arguments: Dict[str, Any],
) -> Optional[CapacityPlan]:
failover: bool = extra_model_arguments.get("failover", True)
jvm_memory_overhead: float = extra_model_arguments.get(
"jvm_memory_overhead", 1.2
)
root_disk_gib: int = extra_model_arguments.get("root_disk_gib", 10)
return _estimate_java_app_region(
instance=instance,
drive=drive,
desires=desires,
failover=failover,
root_disk_gib=root_disk_gib,
jvm_memory_overhead=jvm_memory_overhead,
zones_per_region=context.zones_in_region,
)
@staticmethod
def description():
return "Netflix Streaming Java App Model"
@staticmethod
def extra_model_arguments() -> Sequence[Tuple[str, str, str]]:
return (
("failover", "bool = 1", "If this app participates in failover"),
(
"jvm_memory_overhead",
"float = 1.2",
"How much overhead does the heap have per read byte",
),
("root_disk_gib", "int = 10", "How many GiB of root volume to attach"),
)
@staticmethod
def regret(
regret_params: CapacityRegretParameters,
optimal_plan: CapacityPlan,
proposed_plan: CapacityPlan,
) -> Dict[str, float]:
regret = super(NflxJavaAppCapacityModel, NflxJavaAppCapacityModel).regret(
regret_params, optimal_plan, proposed_plan
)
regret["disk_space"] = 0
return regret
@staticmethod
def default_desires(user_desires, extra_model_arguments):
if user_desires.query_pattern.access_pattern == AccessPattern.latency:
return CapacityDesires(
query_pattern=QueryPattern(
access_pattern=AccessPattern.latency,
access_consistency=GlobalConsistency(
same_region=Consistency(
target_consistency=AccessConsistency.read_your_writes,
),
cross_region=Consistency(
target_consistency=AccessConsistency.never,
),
),
estimated_mean_read_size_bytes=Interval(
low=128, mid=1024, high=65536, confidence=0.95
),
estimated_mean_write_size_bytes=Interval(
low=64, mid=128, high=1024, confidence=0.95
),
estimated_mean_read_latency_ms=Interval(
low=0.2, mid=1, high=2, confidence=0.98
),
estimated_mean_write_latency_ms=Interval(
low=0.2, mid=1, high=2, confidence=0.98
),
# "Single digit milliseconds SLO"
read_latency_slo_ms=FixedInterval(
minimum_value=0.5,
maximum_value=10,
low=1,
mid=2,
high=5,
confidence=0.98,
),
write_latency_slo_ms=FixedInterval(
low=1, mid=2, high=5, confidence=0.98
),
),
data_shape=DataShape(
# Assume 4 GiB heaps
reserved_instance_app_mem_gib=4
),
)
else:
return CapacityDesires(
query_pattern=QueryPattern(
access_pattern=AccessPattern.latency,
access_consistency=GlobalConsistency(
same_region=Consistency(
target_consistency=AccessConsistency.read_your_writes,
),
cross_region=Consistency(
target_consistency=AccessConsistency.never,
),
),
estimated_mean_read_size_bytes=Interval(
low=128, mid=1024, high=65536, confidence=0.95
),
estimated_mean_write_size_bytes=Interval(
low=64, mid=128, high=1024, confidence=0.95
),
# Throughput ops can be slower
estimated_mean_read_latency_ms=Interval(
low=0.2, mid=4, high=8, confidence=0.98
),
estimated_mean_write_latency_ms=Interval(
low=0.2, mid=1, high=5, confidence=0.98
),
# "Tens of millisecond SLO"
read_latency_slo_ms=FixedInterval(
minimum_value=0.5,
maximum_value=100,
low=1,
mid=5,
high=40,
confidence=0.98,
),
write_latency_slo_ms=FixedInterval(
minimum_value=0.5,
maximum_value=100,
low=1,
mid=5,
high=40,
confidence=0.98,
),
),
data_shape=DataShape(
# Assume 4 GiB heaps
reserved_instance_app_mem_gib=4
),
)
nflx_java_app_capacity_model = NflxJavaAppCapacityModel()
```
#### File: service-capacity-modeling/service_capacity_modeling/stats.py
```python
from functools import lru_cache
from typing import Tuple
import numpy as np
from scipy.optimize import minimize
from scipy.optimize import OptimizeResult
from scipy.optimize import root
from scipy.special import gammainc as gammaf
from scipy.stats import beta as beta_dist
from scipy.stats import gamma as gamma_dist
from scipy.stats import rv_continuous
from service_capacity_modeling.interface import Interval
from service_capacity_modeling.interface import IntervalModel
# Parameter estimation of various scipy distributions using
# See https://www.johndcook.com/quantiles_parameters.pdf for
# background
EPSILON = 0.001
# Gamma distribution G(alpha, beta) with mean alpha * beta
def _gamma_fn_from_params(low, mid, high, confidence):
assert 0 < low <= mid <= high
confidence = min(confidence, 0.99)
confidence = max(confidence, 0.01)
low_p = 0 + (1 - confidence) / 2.0
high_p = 1 - (1 - confidence) / 2.0
# cdf(x) = F(k) * gammaf(shape, x / scale)
# mean = shape * scale
# We know the value at two points of the cdf and the mean so we can
# basically setup a system of equations of cdf(high) / cdf(low) = known
# and mean = known
#
# Then we can use numeric methods to solve for the remaining shape parameter
def f(k):
zero = high / low
return gammaf(k, high_p * k / mid) / gammaf(k, low_p * k / mid) - zero
return f
def _gamma_dist_from_interval(
interval: Interval, seed: int = 0xCAFE
) -> Tuple[float, rv_continuous]:
# If we know cdf(high), cdf(low) and mean (mid) we can use an iterative
# solver to find a possible gamma interval
# Note we shift the lower bound and mean by the minimum (defaults to
# half the lower bound so we don't end up with less than the minimum
# estimate. This does distort the gamma but in a way that is useful for
# capacity planning (sorta like a wet-bias in forcasting models)
minimum = interval.minimum
lower = interval.low - minimum
mean = interval.mid - minimum
if lower == 0:
lower = EPSILON
f = _gamma_fn_from_params(lower, mean, interval.high, interval.confidence)
result = root(f, 2)
shape = result.x[0]
dist = gamma_dist(shape, loc=minimum, scale=(mean / shape))
dist.random_state = np.random.default_rng(seed=seed)
return (shape, dist)
# This can be expensive, so cache it
@lru_cache(maxsize=128)
def _gamma_for_interval(interval: Interval, seed: int = 0xCAFE) -> rv_continuous:
return _gamma_dist_from_interval(interval, seed=seed)[1]
def gamma_for_interval(interval: Interval, seed: int = 0xCAFE) -> rv_continuous:
result = _gamma_for_interval(interval, seed)
# Use the new Generator API instead of RandomState for ~20% speedup
result.random_state = np.random.default_rng(seed=seed)
return result
# Beta distribution B(alpha, beta) with mean alpha / (alpha + beta)
def _beta_cost_fn_from_params(low, mid, high, confidence):
assert low <= mid <= high < 1.0
assert mid > 0
# Assume symmetric percentiles were provided
confidence = min(confidence, 0.99)
confidence = max(confidence, 0.01)
low_p = 0.0 + (1 - confidence) / 2.0
high_p = 1.0 - (1 - confidence) / 2.0
def cost(alpha):
beta = alpha / mid - alpha
if alpha == 0 or beta == 0:
return float("inf")
cost = (beta_dist.cdf(low, alpha, beta) - low_p) ** 2
cost += (beta_dist.cdf(high, alpha, beta) - high_p) ** 2
return cost
return cost
def _beta_dist_from_interval(
interval: Interval, seed: int = 0xCAFE
) -> Tuple[Tuple[float, float, OptimizeResult], rv_continuous]:
# If we know cdf(high), cdf(low) and mean (mid) we can use an iterative
# solver to find a possible beta fit
if interval.minimum == interval.maximum:
minimum = interval.low - EPSILON
maximum = interval.high + EPSILON
scale = maximum - minimum
else:
minimum = interval.minimum
maximum = interval.maximum
scale = maximum - minimum
lower = (interval.low - minimum) / scale
mean = (interval.mid - minimum) / scale
upper = (interval.high - minimum) / scale
f = _beta_cost_fn_from_params(lower, mean, upper, interval.confidence)
result = minimize(f, x0=2, bounds=[(0.1, 40)])
alpha = result.x[0]
dist = beta_dist(alpha, alpha / mean - alpha, loc=minimum, scale=scale)
dist.random_state = np.random.default_rng(seed=seed)
return (alpha, alpha / mean - alpha, result), dist
# This can be expensive, so cache it
@lru_cache(maxsize=128)
def _beta_for_interval(interval: Interval, seed: int = 0xCAFE) -> rv_continuous:
return _beta_dist_from_interval(interval, seed=seed)[1]
def beta_for_interval(interval: Interval, seed: int = 0xCAFE) -> rv_continuous:
result = _beta_for_interval(interval, seed)
# Use the new Generator API instead of RandomState for ~20% speedup
result.random_state = np.random.default_rng(seed=seed)
return result
def dist_for_interval(interval: Interval, seed: int = 0xCAFE) -> rv_continuous:
if interval.model_with == IntervalModel.beta:
result = beta_for_interval(interval=interval, seed=seed)
elif interval.model_with == IntervalModel.gamma:
result = gamma_for_interval(interval=interval, seed=seed)
else:
result = beta_for_interval(interval=interval, seed=seed)
return result
```
#### File: tests/netflix/test_java_app.py
```python
from service_capacity_modeling.capacity_planner import planner
from service_capacity_modeling.interface import CapacityDesires
from service_capacity_modeling.interface import certain_float
from service_capacity_modeling.interface import certain_int
from service_capacity_modeling.interface import DataShape
from service_capacity_modeling.interface import Interval
from service_capacity_modeling.interface import QueryPattern
small_but_high_qps = CapacityDesires(
service_tier=1,
query_pattern=QueryPattern(
estimated_read_per_second=certain_int(100000),
estimated_write_per_second=certain_int(100000),
estimated_mean_read_latency_ms=certain_float(0.4),
estimated_mean_write_latency_ms=certain_float(0.4),
),
data_shape=DataShape(
estimated_state_size_gib=certain_int(10),
),
)
large_footprint = CapacityDesires(
service_tier=1,
query_pattern=QueryPattern(
estimated_read_per_second=certain_int(60000),
estimated_write_per_second=certain_int(60000),
estimated_mean_read_latency_ms=certain_float(0.8),
estimated_mean_write_latency_ms=certain_float(0.5),
),
data_shape=DataShape(
estimated_state_size_gib=certain_int(4000),
),
)
def test_java_app():
java_cap_plan = planner.plan_certain(
model_name="org.netflix.stateless-java",
region="us-east-1",
desires=large_footprint,
)[0]
java_result = java_cap_plan.candidate_clusters.regional[0]
cores = java_result.count * java_result.instance.cpu
assert java_result.instance.name.startswith("m5.")
assert 100 <= cores <= 300
java_cap_plan = planner.plan(
model_name="org.netflix.stateless-java",
region="us-east-1",
desires=small_but_high_qps,
).least_regret[0]
java_result = java_cap_plan.candidate_clusters.regional[0]
cores = java_result.count * java_result.instance.cpu
assert java_result.instance.name.startswith("m5.")
assert 100 <= cores <= 300
def test_uncertain_java_app():
uncertain = CapacityDesires(
service_tier=1,
query_pattern=QueryPattern(
estimated_read_per_second=Interval(
low=2_000, mid=30_000, high=60_000, confidence=0.98
),
estimated_write_per_second=Interval(
low=2_000, mid=30_000, high=60_000, confidence=0.98
),
),
# Should be ignored
data_shape=DataShape(
estimated_state_size_gib=Interval(low=50, mid=500, high=1000),
reserved_instance_app_mem_gib=4,
),
)
java_cap_plan = planner.plan(
model_name="org.netflix.stateless-java",
region="us-east-1",
desires=uncertain,
)
java_least_regret = java_cap_plan.least_regret[0]
java_result = java_least_regret.candidate_clusters.regional[0]
cores = java_result.count * java_result.instance.cpu
assert java_result.instance.name.startswith("m5.")
assert 100 <= cores <= 300
# KeyValue regional clusters should match
kv_cap_plan = planner.plan(
model_name="org.netflix.key-value",
region="us-east-1",
desires=uncertain,
)
kv_least_regret = kv_cap_plan.least_regret[0]
kv_result = kv_least_regret.candidate_clusters.regional[0]
kv_cores = kv_result.count * kv_result.instance.cpu
assert kv_result.instance.name.startswith("m5.")
assert 0.5 <= float(kv_cores) / cores <= 1.5
assert kv_least_regret.candidate_clusters.zonal[0].count > 0
def test_java_heap_heavy():
large_heap = CapacityDesires(
service_tier=1,
query_pattern=QueryPattern(
estimated_read_per_second=Interval(
low=2_000, mid=10_000, high=60_000, confidence=0.98
),
estimated_write_per_second=Interval(
low=2_000, mid=10_000, high=60_000, confidence=0.98
),
),
data_shape=DataShape(
reserved_instance_app_mem_gib=40,
),
)
java_cap_plan = planner.plan(
model_name="org.netflix.stateless-java",
region="us-east-1",
desires=large_heap,
)
java_least_regret = java_cap_plan.least_regret[0]
java_result = java_least_regret.candidate_clusters.regional[0]
cores = java_result.count * java_result.instance.cpu
assert 20 <= cores <= 100
assert java_result.instance.ram_gib > 40
# Should bump the heap due to the traffic
large_traffic = CapacityDesires(
service_tier=1,
query_pattern=QueryPattern(
estimated_read_per_second=Interval(
low=2_000, mid=30_000, high=60_000, confidence=0.98
),
estimated_write_per_second=Interval(
low=2_000, mid=30_000, high=60_000, confidence=0.98
),
estimated_mean_write_size_bytes=Interval(
low=1024, mid=32768, high=262144, confidence=0.98
),
),
)
java_cap_plan = planner.plan(
model_name="org.netflix.stateless-java",
region="us-east-1",
desires=large_traffic,
)
java_least_regret = java_cap_plan.least_regret[0]
java_result = java_least_regret.candidate_clusters.regional[0]
cores = java_result.count * java_result.instance.cpu
assert 100 <= cores <= 300
# 32 KiB payloads * 30k/second is around 1 GiB per second
# which should require a decent chunk of heap memory
memory = java_result.count * java_result.instance.ram_gib
assert memory > 50
```
#### File: service-capacity-modeling/tests/test_common.py
```python
from decimal import Decimal
from service_capacity_modeling.hardware import shapes
from service_capacity_modeling.interface import CapacityPlan
from service_capacity_modeling.interface import CapacityRequirement
from service_capacity_modeling.interface import Clusters
from service_capacity_modeling.interface import Interval
from service_capacity_modeling.interface import RegionClusterCapacity
from service_capacity_modeling.interface import Requirements
from service_capacity_modeling.interface import ZoneClusterCapacity
from service_capacity_modeling.models.common import merge_plan
def test_merge_plan():
left_requirement = CapacityRequirement(
requirement_type="test",
core_reference_ghz=2.3,
cpu_cores=Interval(low=10, mid=20, high=30, confidence=0.98),
mem_gib=Interval(low=20, mid=100, high=200, confidence=0.98),
network_mbps=Interval(low=1000, mid=2000, high=3000, confidence=0.98),
disk_gib=Interval(low=40, mid=200, high=500, confidence=0.98),
)
right_requirement = CapacityRequirement(
requirement_type="test",
core_reference_ghz=2.3,
cpu_cores=Interval(low=10, mid=20, high=30, confidence=0.98),
mem_gib=Interval(low=20, mid=100, high=200, confidence=0.98),
network_mbps=Interval(low=1000, mid=2000, high=3000, confidence=0.98),
disk_gib=Interval(low=40, mid=200, high=500, confidence=0.98),
)
left_instance = shapes.region("us-east-1").instances["r5d.2xlarge"]
right_instance = shapes.region("us-east-1").instances["m5.2xlarge"]
left_plan = CapacityPlan(
requirements=Requirements(zonal=[left_requirement]),
candidate_clusters=Clusters(
total_annual_cost=Decimal(1234),
zonal=[
ZoneClusterCapacity(
cluster_type="left",
count=2,
instance=left_instance,
attached_drives=[],
annual_cost=1234,
)
],
),
)
right_plan = CapacityPlan(
requirements=Requirements(zonal=[right_requirement]),
candidate_clusters=Clusters(
total_annual_cost=Decimal(1468),
regional=[
RegionClusterCapacity(
cluster_type="right",
count=2,
instance=right_instance,
attached_drives=[],
annual_cost=234,
)
],
zonal=[
ZoneClusterCapacity(
cluster_type="right",
count=4,
instance=left_instance,
attached_drives=[],
annual_cost=1234,
)
],
),
)
result = merge_plan(left_plan, right_plan)
assert result is not None
assert (
result.requirements.zonal[0].cpu_cores.mid
+ result.requirements.zonal[1].cpu_cores.mid
== 40
)
assert (
result.requirements.zonal[0].network_mbps.mid
+ result.requirements.zonal[1].network_mbps.mid
== 4000
)
assert result.candidate_clusters.regional == right_plan.candidate_clusters.regional
assert left_plan.candidate_clusters.zonal[0] in result.candidate_clusters.zonal
assert right_plan.candidate_clusters.zonal[0] in result.candidate_clusters.zonal
```
#### File: service-capacity-modeling/tests/test_desire_merge.py
```python
from service_capacity_modeling.capacity_planner import planner
from service_capacity_modeling.interface import CapacityDesires
from service_capacity_modeling.interface import certain_int
from service_capacity_modeling.interface import DataShape
from service_capacity_modeling.interface import Interval
from service_capacity_modeling.interface import QueryPattern
user_desires = CapacityDesires(
service_tier=0,
query_pattern=QueryPattern(
estimated_read_per_second=certain_int(100000),
estimated_write_per_second=certain_int(100000),
estimated_mean_read_size_bytes=Interval(
low=10, mid=100, high=1000, confidence=0.98
),
),
data_shape=DataShape(
estimated_state_size_gib=certain_int(10),
),
)
def test_cassandra_merge():
cass_defaults = planner.models["org.netflix.cassandra"].default_desires(
user_desires, {}
)
merged = user_desires.merge_with(cass_defaults)
# Should come from the user
assert merged.service_tier == 0
assert merged.query_pattern.estimated_read_per_second.mid == 100000
assert merged.query_pattern.estimated_mean_read_size_bytes.low == 10
assert merged.data_shape.estimated_state_size_gib.mid == 10
# Should come from cassandra
assert merged.query_pattern.estimated_mean_read_latency_ms.mid == 2.0
assert merged.query_pattern.estimated_mean_write_latency_ms.mid == 1.0
# Should come from overall defaults
assert merged.data_shape.estimated_state_item_count is None
assert merged.core_reference_ghz == 2.3
``` |
{
"source": "jolyonb/chisquaredfields",
"score": 4
} |
#### File: chisquaredfields/source/mcintegrate.py
```python
import numpy as np
from math import sqrt
from random import normalvariate, uniform
def MCIntegrate(variables, integrand, probability, numsamples, parameters={}, burnin=1000) :
"""Perform a Metropolis-Hastings Monte Carlo integral for a 5D vector integrand"""
# Initialize data collection
samples = -burnin
numsamples = int(numsamples)
results = np.zeros([numsamples, 5])
accepted = 0
# Initialize values for each variable
values = [0.0] * len(variables)
lastprob = init_values(variables, values, probability, parameters)
# Start integrating
while samples < numsamples :
# Jump to a new point
values, lastprob, jumped = update_values(variables, values, probability, parameters, lastprob)
# Are we done burning in?
if samples >= 0 :
# Count the number of iterations to compute the acceptance ratio
if jumped : accepted += 1
# Compute the integrand
intval = integrand(values, parameters)
# Add it to the results
results[samples] = intval
# Increment samples
samples += 1
# Compute integrals for the four different results (different types of stationary points)
vals = [0] * 5
vals[0], vals[1], vals[2], vals[3], vals[4] = np.transpose(results)
integrals = np.zeros(5)
errors = np.zeros(5)
variances = np.zeros(5)
for i in range(5) :
integrals[i] = np.sum(vals[i]) / numsamples
delta = vals[i] - integrals[i]
variances[i] = np.sum(delta * delta) / (numsamples - 1)
errors[i] = sqrt(variances[i] / numsamples)
# Return integrals, errors and acceptance rate
return integrals, errors, float(accepted) / numsamples
def update_values(variables, values, probability, parameters, lastprob) :
"""
Jump to a new point in the domain of integration using the Metropolis-Hastings approach
"""
newvalues = [0.0] * len(variables)
# Generate a distance to jump for each variable
for i, var in enumerate(variables) :
vartype = var[1]
varjump = var[2]
if vartype == "n" :
newvalues[i] = normalvariate(values[i], varjump)
elif vartype == "pn" :
newvalues[i] = abs(normalvariate(values[i], varjump))
# Calculate the new probability
newprob = probability(newvalues, parameters)
# Accept or reject?
# Return the new values, new probability and whether or not a jump was made
if newprob >= lastprob or uniform(0, 1) < newprob / lastprob :
# Accept
return newvalues, newprob, True
else :
# Reject
return values, lastprob, False
def init_values(variables, values, probability, parameters) :
"""Initialize variables for integration"""
# Generate values for each variable
for i, var in enumerate(variables) :
vartype = var[1]
if vartype == "n" :
values[i] = normalvariate(0, 1)
elif vartype == "pn" :
values[i] = abs(normalvariate(0, 1))
# Return the probability where we've landed
return probability(values, parameters)
if __name__ == "__main__":
print("This is a supporting library. It is not intended to be executed.")
```
#### File: chisquaredfields/source/plot.py
```python
import matplotlib.pyplot as plt
import numpy as np
import argparse
from common import signed_exact, lowgamma_extrema, lowgamma_saddles
#
# Deal with command line arguments
#
parser = argparse.ArgumentParser(description="Plot the number density of stationary points in chi^2 fields", epilog="By default, all plots and curves are shown.")
# Input file
parser.add_argument("-i", help="Input file (default 'data.dat')",
type=argparse.FileType('r'), default="data.dat", dest="file")
# Plot signed number density?
parser.add_argument("-ns", help="Do not plot signed number density",
dest="signed", action="store_false", default=True)
# Plot individual number densities?
parser.add_argument("-ni", help="Do not plot individual number densities",
dest="individual", action="store_false", default=True)
# Which lines to plot?
parser.add_argument("-nx", help="Individual plot: Do not plot maxima",
dest="max", action="store_false", default=True)
parser.add_argument("-nn", help="Individual plot: Do not plot minima",
dest="min", action="store_false", default=True)
parser.add_argument("-nppm", help="Individual plot: Do not plot (++-) saddles",
dest="saddleppm", action="store_false", default=True)
parser.add_argument("-npmm", help="Individual plot: Do not plot (+--) saddles",
dest="saddlepmm", action="store_false", default=True)
# Plot low gamma analytic approximations
parser.add_argument("-l", help="Plot low-gamma analytic approximations (default off)",
dest="lowgamma", action="store_true", default=False)
# Error scaling
def pos_float(value):
try:
ivalue = float(value)
except ValueError :
raise argparse.ArgumentTypeError("must be a number. You supplied: %s" % value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("must be > 0. You supplied: %s" % value)
return ivalue
parser.add_argument("-e", help="Error scaling (default 1.0)",
dest="errscale", default=1.0, type=pos_float)
# Quiet mode?
parser.add_argument("-q", help="Quiet mode", action="store_true", dest="quiet", default=False)
# Parse the command line
args = parser.parse_args()
# Print the header
if not args.quiet : print __doc__
#
# Processing starts here
#
# Read in the data
data = args.file.readlines()
args.file.close()
# Process the data
results = [map(float, x.split(", ")) for x in data]
N, gamma, sigma0, sigma1, numsamples = results[0]
data = np.transpose(results[1:])
nuvals = data[0]
nucount = 1000
nuvalsexact = np.linspace(nuvals[0], nuvals[-1], nucount)
integrals = data[1:6]
errors = data[6:11]
errors *= args.errscale
# 11 is the exact signed number density at that nu
# 12 is the acceptance rate from the MCMC (vegas will return zero)
def plot_signed(integrals, errors, nuvals, nuvalsexact, exactvals) :
"""Plot the signed number density, along with the exact solution"""
# Global marker size msize
msize = 4
# Plot the data with errorbars and the exact solution
fig, ax = plt.subplots()
ax.errorbar(nuvals, integrals[4], yerr=errors[4], fmt='ro', markersize=msize, label="Numeric")
ax.plot(nuvalsexact, exactvals, 'b-', label="Analytic")
# Labels
ax.set_title(r'Signed Number Density', fontsize=20)
ax.set_xlabel(r'$\bar{\nu}$',fontsize=20)
ax.set_ylabel(r'$\left\langle \frac{d{\cal N}^\mathrm{signed}}{d\bar{\nu}} \right\rangle$',fontsize=24,labelpad=-8)
fig.subplots_adjust(left=0.16)
# Legend
h1, l1 = ax.get_legend_handles_labels()
legend = ax.legend(h1,l1,loc='upper right',shadow=False,fancybox=True)
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('1.0')
# Set sizes for the legend
for label in legend.get_texts():
label.set_fontsize(14)
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
# Tick marks
ax.tick_params(axis='x', which='major', labelsize=16)
ax.tick_params(axis='y', which='major', labelsize=16)
# Text box of parameters
textlist=[
r'$\sigma_0=%.2f$'%(sigma0)+'\n',
r'$\sigma_1=%.2f$'%(sigma1)+'\n',
r'$\gamma \,\, = %.2f$'%(gamma)+'\n',
r'$N\, = %1d$'%(N)]
textstr=''.join(textlist)
props = dict(boxstyle='round', facecolor='white', edgecolor='black')
ax.text(0.8, 0.8, textstr, transform=ax.transAxes, fontsize=16,
verticalalignment='top', bbox=props)
def plot_all(integrals, errors, nuvals, curves, nuvalsexact, extrema_vals, saddle_vals, lowgamma) :
"""Plot the individual number densities"""
# A pretty purple!
prettypurple = "#DE00FF"
# Global marker size msize
msize = 4
# Plot the data with errorbars and the exact solution
fig, ax = plt.subplots()
# plt.rc('text', usetex=True)
# plt.rc('font', family='sans-serif')
if curves[0] : ax.errorbar(nuvals, integrals[0], yerr=errors[0], markersize=msize, fmt='ro-', label="Minima")
if curves[1] : ax.errorbar(nuvals, integrals[1], yerr=errors[1], markersize=msize, fmt='bo-', label="Saddle (+,+,--)")
if curves[2] : ax.errorbar(nuvals, integrals[2], yerr=errors[2], markersize=msize, fmt='go-', label="Saddle (+,--,--)")
if curves[3] : ax.errorbar(nuvals, integrals[3], yerr=errors[3], markersize=msize, color=prettypurple, fmt='o-', label="Maxima")
# Plotting low gamma approximations
if lowgamma and (curves[3] or curves[0]) : ax.plot(nuvalsexact, extrema_vals, 'r-')
if lowgamma and (curves[2] or curves[1]) : ax.plot(nuvalsexact, saddle_vals, 'b-')
# Labels
ax.set_title(r'Number Density of Stationary Points', fontsize=20)
ax.set_xlabel(r'$\bar{\nu}$',fontsize=20)
ax.set_ylabel(r'$\left\langle \frac{d{\cal N}^\mathrm{stationary}}{d\bar{\nu}} \right\rangle$',fontsize=24,labelpad=-8)
fig.subplots_adjust(left=0.16)
# Legend
h1, l1 = ax.get_legend_handles_labels()
legend = ax.legend(h1,l1,loc='upper right',shadow=False,fancybox=True)
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('1.0')
# Set sizes for the legend
for label in legend.get_texts():
label.set_fontsize(14)
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
# Tick marks
ax.tick_params(axis='x', which='major', labelsize=16)
ax.tick_params(axis='y', which='major', labelsize=16)
# Text box of parameters
textlist=[
r'$\sigma_0=%.2f$'%(sigma0)+'\n',
r'$\sigma_1=%.2f$'%(sigma1)+'\n',
r'$\gamma \,\, = %.2f$'%(gamma)+'\n',
r'$N\, = %1d$'%(N)]
textstr=''.join(textlist)
props = dict(boxstyle='round', facecolor='white', edgecolor='black')
ax.text(0.8, 0.65, textstr, transform=ax.transAxes, fontsize=16,
verticalalignment='top', bbox=props)
# Set some plotting parameters
plt.rc('text', usetex=True)
plt.rc('font', family='sans-serif')
if args.signed :
# Compute the exact solution for the signed number density
exactvals = np.zeros(nucount)
for i, nuval in enumerate(nuvalsexact) :
exactvals[i] = signed_exact(N, gamma, nuval, sigma0, sigma1)
# Plot the signed number density
plot_signed(integrals, errors, nuvals, nuvalsexact, exactvals)
if args.individual :
if args.lowgamma :
#Calculating the low gamma approximation for saddles and extrema.
extrema_vals = np.zeros(nucount)
saddle_vals = np.zeros(nucount)
for i, nuval in enumerate(nuvalsexact) :
extrema_vals[i] = lowgamma_extrema(N, gamma, nuval, sigma0, sigma1)
saddle_vals[i] = lowgamma_saddles(N, gamma, nuval, sigma0, sigma1)
else :
extrema_vals = False
saddle_vals = False
nuvalsexact = False
# Plot the individual number densities
curves = [args.min, args.saddleppm, args.saddlepmm, args.max]
if any(curves) :
plot_all(integrals, errors, nuvals, curves, nuvalsexact, extrema_vals, saddle_vals, args.lowgamma)
else :
print "Error: No curves requested in individual number densities!"
if args.signed or args.individual :
plt.show()
else :
print "Error: No plots requested!"
```
#### File: chisquaredfields/source/stationary_vegas.py
```python
import numpy as np
import pyximport
pyximport.install(setup_args={'include_dirs': np.get_include()})
import vegas
import integrand as INT
import time
import common
from math import pi
def number_density(parameters, numsamples):
N, gamma, nu, sigma0, sigma1 = parameters
domain=[[-pi/2,pi/2],[-pi/2,pi/2],[-pi/2,pi/2],[0,pi/2],[0,pi/2],[0,pi/2],[-pi/2,pi/2],[-pi/2,pi/2],[-pi/2,pi/2]]
f=INT.f_cython(dim=9,N=N,nu=nu,gamma=gamma)
integ = vegas.Integrator(domain, nhcube_batch=1000)
integ(f, nitn=10, neval=numsamples)
vecresult = integ(f, nitn=10, neval=numsamples)
scale = common.scale(*parameters)
V_N = common.V_N(N,gamma)
prefactor = scale/V_N
integrals = np.zeros(5)
errors = np.zeros(5)
for i in range(1,vecresult.shape[0]):
integrals[i-1] = prefactor*vecresult[i].mean
errors[i-1] = prefactor*vecresult[i].sdev
integrals[4] = prefactor*vecresult[0].mean
errors[4] = prefactor*vecresult[0].sdev
return integrals, errors, common.signed_exact(*parameters), 0.0
if __name__ == "__main__":
# parameters = [N, gamma, nu, sigma0, sigma1]
parameters = [4, 0.8, 1.0, 1.0, 1.0]
numsamples = 1e5
print("*" * 60)
print("Performing test run")
print("Computing number densities with", int(numsamples), "samples...")
print("N = ", parameters[0])
print("nu = ", parameters[2])
print("gamma = ", parameters[1])
print("sigma_0 = ", parameters[3])
print("sigma_1 = ", parameters[4])
start = time.time()
integrals, errors, exact, acceptance = number_density(parameters, numsamples)
end = time.time()
print("Finished in", round(end - start, 4), "s")
print("Minima: ", integrals[0], "+-", errors[0])
print("Saddle (++-):", integrals[1], "+-", errors[1])
print("Saddle (+--):", integrals[2], "+-", errors[2])
print("Maxima: ", integrals[3], "+-", errors[3])
print("Signed: ", integrals[4], "+-", errors[4])
print("Signed exact:", exact)
print("Signed error:", abs(integrals[4] - exact))
``` |
{
"source": "jolyonb/edXstructure",
"score": 3
} |
#### File: edXstructure/structgen/structgen.py
```python
import argparse, os, shutil, csv, collections
levels = {"course" : 0, "chapter" : 1, "sequential" : 2, "vertical" : 3, "html" : 4, "video" : 4, "problem" : 4}
content_type = {"html": 0, "video": 1, "problem": 2}
short = ["course", "chap", "seq", "vert", "html", "video", "problem"]
def interpret(line) :
'''
Reads a line, and returns a list containing the entry, url_name, display_name, and a list of any extras
'''
entry = line[0]
url_name = ""
display_name = ""
extras = []
if len(line) > 1 :
url_name = line[1]
if len(line) > 2 :
display_name = line[2]
if len(line) > 3 :
extras = line[3:]
return [entry, url_name, display_name, extras]
class Node :
def __init__(self, line) :
'''
Store the information about this node
'''
self.entry, self.url_name, self.display_name, self.extras = interpret(line)
self.children = []
self.level = levels[self.entry]
self.content_type = 0
if self.level == 4 :
self.content_type = content_type[self.entry]
self.extratext = " ".join(self.extras)
if len(self.extratext) > 0 : self.extratext = " " + self.extratext
def create_ID(self, counts) :
'''
Creates a unique ID, and sets the url_name to this if it's empty
'''
self.ID = short[self.level + self.content_type] + "-" + str(counts[self.level + self.content_type])
if self.url_name.strip() == "" :
self.url_name = self.ID
def print_node(self) :
'''
Prints itself and all of its children
'''
print " " * self.level + self.entry + ": " + self.display_name
for i in self.children :
i.print_node()
def write_node(self, file_handle) :
'''
Outputs the node and all children to file
'''
# Write the opening tag
if self.level == 0 :
file_handle.write("<course>\n")
elif self.level == 4 :
file_handle.write(" " * self.level + "<" + self.entry + " url_name=\"" + self.url_name + "\" />\n")
else :
file_handle.write(" " * self.level + "<" + self.entry + " url_name=\""
+ self.url_name + "\" display_name=\"" + self.display_name + "\"" + self.extratext + ">\n")
# Write the children
for i in self.children :
i.write_node(file_handle)
# Close the tag
if self.level < 4 :
file_handle.write(" " * self.level + "</" + self.entry + ">\n")
def write_tree(self, location, file_handle, filename="") :
'''
Outputs the node and all children to file in a tree structure
'''
# Insert the link in the old file
if not file_handle is None :
# Write the opening tag in the given file
file_handle.write(" <" + self.entry +
" url_name=\"" + self.url_name + "\" />\n")
# We write out courses, chapters, sequentials and verticals
if self.level < 4 :
# Come up with the filename to write
if filename == "" :
filename = os.path.join(location, self.entry, self.url_name + ".xml")
else :
filename = os.path.join(location, self.entry, filename)
# Create a new file
with open(filename, "w") as f :
# Write this entry
if self.level == 0 :
f.write("<course>\n")
else :
f.write("<" + self.entry + " display_name=\"" + self.display_name
+ "\"" + self.extratext + ">\n")
# Write the children
for i in self.children :
i.write_tree(location, f)
# Close this tag
f.write("</" + self.entry + ">\n")
def read_csv_file(file):
'''
Read in a CSV file, returning a list of lists.
Provide the filename to read.
'''
with open(file, 'r') as f:
data = [row for row in csv.reader(f.read().splitlines())]
return data
def strip_line(line):
'''
Takes in a list of fields from a line in a csv file.
Trims all fields, and drops leading and trailing empty fields.
'''
newline = map(lambda s: s.strip(), line)
for i in range(len(newline)) :
if newline[i] != "" :
result = newline[i:]
break
while result and result[-1] == "" :
result.pop()
return result
def strip_data(data):
'''
Data should be a list of lists from a csv file.
Looks at each line of the csv file, and removes any trailing empty fields.
Then, if the first character in the first field is a #, ignores this line.
Also turns the first entry to lower case, for later ease.
'''
output = []
for line in data :
working = strip_line(line)
if working == [] :
continue
if working[0][0] == "#" :
continue
working[0] = working[0].lower()
output.append(working)
return output
def validate(data) :
'''
Checks to make sure that all input into the CSV is readable.
'''
# Make sure all entries are understood and that structure is in the
# correct order (eg no jumping from chapter to vertical)
# Track the level. Level can only increase 1 level at a time, but can decrease arbitrarily.
level = 0
content = ["html", "video", "problem"]
translation = ["to begin with.", "after chapter.", "after sequential."]
for line in data :
if line[0] in levels :
newlevel = levels[line[0]]
else :
print "Error: Unknown entry \"" + line[0] + "\". Full line is as follows."
print "\"" + "\", \"".join(line) + "\""
return False
if newlevel > level + 1 :
print "Error: Inappropriate order of entries. Cannot have", line[0], translation[level], "Full line is as follows."
print "\"" + "\", \"".join(line) + "\""
return False
level = newlevel
return True
def xmlify(data) :
'''
Turn the data into an XML structure using our node class
'''
# The course is the root node
course = Node(["course", "course", ""])
# Iterate over everything, remembering what the last node of each level is
last_nodes = [course, 0, 0, 0, 0]
count_nodes = [1,0,0,0,0,0,0]
for i in range(len(data)) :
# Create the new node
newnode = Node(data[i])
# Add it to the last node of the next lower level
last_nodes[newnode.level - 1].children.append(newnode)
# Update the last_nodes list
last_nodes[newnode.level] = newnode
# Update the count
count_nodes[newnode.level + newnode.content_type] += 1
# Create the unique-id for the node
newnode.create_ID(count_nodes)
# Return the root
return course, count_nodes
def make_dir(location, name) :
'''
Creates a directory if necessary
'''
path = os.path.join(location, name)
if os.path.isdir(path) :
return
os.mkdir(path)
def rm_dir(location, name) :
'''
Removes a directory, if it exists
'''
path = os.path.join(location, name)
if os.path.isdir(path) :
shutil.rmtree(path)
def scan_url_names(course) :
namelist = collections.Counter()
def traverse(node) :
namelist[node.url_name] += 1
for i in node.children :
traverse(i)
traverse(course)
for elem, count in namelist.items() :
if " " in elem :
print "Warning: The url_name \"" + elem + "\" has spaces in it."
if count > 1 :
print "Warning: The url_name \"" + elem + "\" has " + str(count) + " instances."
print "edX Structure Generator v1.0"
print "by <NAME>, August 2016"
# Deal with the command line arguments
parser = argparse.ArgumentParser(description="Create edX XML course structure from CSV file")
parser.add_argument("csv_file", help="CSV file to read structure from")
# Write or test?
group = parser.add_mutually_exclusive_group()
group.add_argument("-t", "--test", help="Test only (do not write files)",
dest="write", action="store_false")
group.add_argument("-w", "--write", help="Write XML structure (default)",
dest="write", default=True, action="store_true")
# One file or multi-structure?
group2 = parser.add_mutually_exclusive_group()
group2.add_argument("-s", "--single", help="Write a single XML file",
dest="tree", action="store_false")
group2.add_argument("-T", "--tree", help="Write out an XML tree of files (default)",
dest="tree", default=True, action="store_true")
parser.add_argument("-l", dest="location", type=str, default="./",
help="Set the output directory (defaults to \".\")")
parser.add_argument("-o", dest="filename", type=str, default="course.xml",
help="Set the root file name (defaults to \"course.xml\")")
parser.add_argument("-c", "--counts", help="Print counts of each item",
default=False, action="store_true")
parser.add_argument("--clean", help="Clear all directories before writing (only for tree writes)",
default=False, action="store_true")
parser.add_argument("-m", "--map", help="Print the structure map to screen",
default=False, action="store_true")
# Parse the command line
args = parser.parse_args()
# print args
# exit()
# Read in the CSV file
try :
data = read_csv_file(args.csv_file)
except IOError as e:
print "Error reading", args.csv_file
print "I/O error({0}): {1}".format(e.errno, e.strerror)
exit()
# Trim the data to the stuff we need
data = strip_data(data)
# Validate the data
if not validate(data) : exit()
# Process the data
course, count_nodes = xmlify(data)
print "Structure read."
# Print item counts
courses, chapters, seqs, verticals, htmls, videos, problems = count_nodes
if args.counts :
print "Chapters:", chapters
print "Sequentials:", seqs
print "Verticals:", verticals
print "HTMLs:", htmls
print "Videos:", videos
print "Problems:", problems
# Print content map
if args.map :
print "Content Map:"
course.print_node()
# Scan for duplicate url_names
scan_url_names(course)
# Write content map
if args.write :
print "Writing course structure."
if args.tree == False :
# Single file
with open(os.path.join(args.location, args.filename), "w") as f :
course.write_node(f)
else :
# Multi-file tree
if args.clean :
# Clean everything out before writing
rm_dir(args.location, "course")
rm_dir(args.location, "chapter")
rm_dir(args.location, "sequential")
rm_dir(args.location, "vertical")
# Make sure directories exist (but only if necessary)
if courses > 0 : make_dir(args.location, "course")
if chapters > 0 : make_dir(args.location, "chapter")
if seqs > 0 : make_dir(args.location, "sequential")
if verticals > 0 : make_dir(args.location, "vertical")
# Write the first node
course.write_tree(args.location, None, filename=args.filename)
print "Structure written."
``` |
{
"source": "jolzgrafe/phidl",
"score": 3
} |
#### File: phidl/phidl/path.py
```python
from __future__ import division, print_function, absolute_import
import numpy as np
import warnings
from phidl.device_layout import Path, CrossSection, _rotate_points
def arc(radius = 10, angle = 90, num_pts = 720):
""" Create a circular arc Path
Parameters
----------
radius : int or float
Radius of arc
angle : int or float
Total angle of arc
num_pts : int
Number of points used per 360 degrees
Returns
-------
Path
A Path object with the specified arc
"""
num_pts = abs(int(num_pts*angle/360))
t = np.linspace(-90*np.pi/180, (angle-90)*np.pi/180, num_pts)
x = radius*np.cos(t)
y = radius*(np.sin(t)+1)
points = np.array((x,y)).T * np.sign(angle)
P = Path()
# Manually add points & adjust start and end angles
P.points = points
P.start_angle = 0
P.end_angle = angle
return P
def straight(length = 5, num_pts = 100):
""" Creates a straight Path
Parameters
----------
length : int or float
Total length of straight path
num_pts : int
Number of points along Path
Returns
-------
Path
A Path object with the specified straight section
"""
x = np.linspace(0, length, num_pts)
y = x*0
points = np.array((x,y)).T
P = Path()
P.append(points)
return P
#### Euler bends
def _cumtrapz(x):
""" Numpy-based implementation of the cumulative trapezoidal integration
function usually found in scipy (scipy.integrate.cumtrapz) """
return np.cumsum((x[1:] + x[:-1])/2)
def _fresnel(R0, s, num_pts, n_iter=8):
""" Fresnel integral using a series expansion """
t = np.linspace(0,s/(np.sqrt(2)*R0), num_pts)
x = np.zeros(num_pts)
y = np.zeros(num_pts)
for n in range(0,n_iter):
x += (-1)**n * t**(4*n+1)/(np.math.factorial(2*n) * (4*n+1))
y += (-1)**n * t**(4*n+3)/(np.math.factorial(2*n+1) * (4*n+3))
return np.array([np.sqrt(2)*R0*x, np.sqrt(2)*R0*y])
def euler(radius = 3, angle = 90, p = 1.0, use_eff = False, num_pts = 720):
""" Create an Euler bend (also known as "racetrack" or "clothoid" curves)
that adiabatically transitions from straight to curved. By default,
`radius` corresponds to the minimum radius of curvature of the bend.
However, if `use_eff` is set to True, `radius` corresponds to the effective
radius of curvature (making the curve a drop-in replacement for an arc). If
p < 1.0, will create a "partial euler" curve as described in Vogelbacher et.
al. https://dx.doi.org/10.1364/oe.27.031394
Parameters
----------
radius : int or float
Minimum radius of curvature
angle : int or float
Total angle of curve
p : float
Proportion of curve that is an Euler curve
use_eff : bool
If False: `radius` corresponds to minimum radius of curvature of the bend
If True: The curve will be scaled such that the endpoints match an arc
with parameters `radius` and `angle`
num_pts : int
Number of points used per 360 degrees
Returns
-------
Path
A Path object with the specified Euler curve
"""
if (p < 0) or (p > 1):
raise ValueError('[PHIDL] euler() requires argument `p` be between 0 and 1')
if p == 0:
P = arc(radius = radius, angle = angle, num_pts = num_pts)
P.info['Reff'] = radius
P.info['Rmin'] = radius
return P
if angle < 0:
mirror = True
angle = np.abs(angle)
else:
mirror = False
R0 = 1
alpha = np.radians(angle)
Rp = R0 / (np.sqrt(p*alpha))
sp = R0 * np.sqrt(p*alpha)
s0 = 2*sp + Rp*alpha*(1-p)
num_pts = abs(int(num_pts*angle/360))
num_pts_euler = int(np.round(sp/(s0/2)*num_pts))
num_pts_arc = num_pts - num_pts_euler
xbend1, ybend1 = _fresnel(R0, sp, num_pts_euler)
xp, yp = xbend1[-1], ybend1[-1]
dx = xp - Rp*np.sin(p*alpha/2)
dy = yp - Rp*(1-np.cos(p*alpha/2))
s = np.linspace(sp, s0/2, num_pts_arc)
xbend2 = Rp*np.sin((s-sp)/Rp + p*alpha/2) + dx
ybend2 = Rp*(1 - np.cos((s-sp)/Rp + p*alpha/2)) + dy
x = np.concatenate([xbend1, xbend2[1:]])
y = np.concatenate([ybend1, ybend2[1:]])
points1 = np.array([x,y]).T
points2 = np.flipud(np.array([x,-y]).T)
points2 = _rotate_points(points2, angle-180)
points2 += -points2[0,:] + points1[-1,:]
points = np.concatenate([points1[:-1],points2])
# Find y-axis intersection point to compute Reff
start_angle = 180*(angle<0)
end_angle = start_angle + angle
dy = np.tan(np.radians(end_angle-90)) * points[-1][0]
Reff = points[-1][1] - dy
Rmin = Rp
# Fix degenerate condition at angle == 180
if np.abs(180-angle) < 1e-3:
Reff = points[-1][1]/2
# Scale curve to either match Reff or Rmin
if use_eff == True:
scale = radius/Reff
else:
scale = radius/Rmin
points *= scale
P = Path()
# Manually add points & adjust start and end angles
P.points = points
P.start_angle = start_angle
P.end_angle = end_angle
P.info['Reff'] = Reff*scale
P.info['Rmin'] = Rmin*scale
if mirror == True:
P.mirror((1,0))
return P
def spiral(num_turns = 5, gap = 1, inner_gap = 2, num_pts = 10000):
""" Creates a spiral geometry consisting of two oddly-symmetric
semi-circular arcs in the centre and two Archimedean (involute) spiral arms
extending outward from the ends of both arcs.
Parameters
----------
num_turns : int or float
The number of turns in the spiral. Must be greater than 1. A full
spiral rotation counts as 1 turn, and the center arcs will together
always be 0.5 turn.
gap : int or float
The distance between any point on one arm of the spiral and a point
with the same angular coordinate on an adjacent arm.
inner_gap : int or float
The inner size of the spiral, equal to twice the chord length of the
centre arcs.
num_pts: int
The number of points in the entire spiral. The actual number of points
will be slightly different than the specified value, as they are
dynamically allocated using the path lengths of the spiral.
Returns
-------
Path
A Path object forming a spiral
Notes
-----
``num_turns`` usage (x is any whole number):
- ``num_turns = x.0``: Output arm will be extended 0.5 turn to be on
the same side as the input.
- ``num_turns < x.5``: Input arm will be extended by the fractional
amount.
- ``num_turns = x.5``: Both arms will be the same length and the input
and output will be on opposite sides.
- ``num_turns > x.5``: Output arm will be extended by the fractional
amount.
"""
# Establishing number of turns in each arm
if num_turns <= 1:
raise ValueError('num_turns must be greater than 1')
diff = num_turns - np.floor(num_turns)
if diff < 0.5:
num_turns1 = np.floor(num_turns) - 1 + 2*diff
else:
num_turns1 = np.floor(num_turns)
if diff > 0.5:
num_turns2 = np.floor(num_turns) - 1 + 2*diff
else:
num_turns2 = np.floor(num_turns)
# Establishing relevant angles and spiral/centre arc parameters
a1 = np.pi/2
a2 = np.array([np.pi*num_turns1 + a1, np.pi*num_turns2 + a1])
a = inner_gap/2 - gap/2
b = gap/np.pi
Rc = inner_gap*np.sqrt(1 + (b/(a+b*a1))**2) / 4
theta = np.degrees(2*np.arcsin(inner_gap/4/Rc))
# Establishing number of points in each arm
s_centre = Rc*np.radians(theta)
s_spiral = ((a + a2*b)**2 + b**2)**(3/2) / (3*(a*b + (a2*b**2)))
z = num_pts / (s_spiral[0] + s_spiral[1] + 2*s_centre)
num_pts0 = int(z*s_centre)
num_pts1 = int(z*s_spiral[0])
num_pts2 = int(z*s_spiral[1]) - num_pts1
# Forming both spiral arms
arm1 = np.linspace(a1, a2[0], num_pts1)
arm2 = np.linspace(a2[0], a2[1], num_pts2)[1:]
a_spiral = np.array([arm1, np.concatenate([arm1, arm2])])
r_spiral = a + b*a_spiral
x_spiral = np.array([np.zeros(num_pts1), np.zeros(len(a_spiral[1]))])
y_spiral = np.array([np.zeros(num_pts1), np.zeros(len(a_spiral[1]))])
for i in range(2):
x_spiral[i] = r_spiral[i]*np.cos(a_spiral[i])
y_spiral[i] = r_spiral[i]*np.sin(a_spiral[i])
# Forming centre arcs
pts = _rotate_points(arc(Rc, theta, 360*num_pts0/theta).points, -theta/2+90)
x_centre = pts[:,0] + x_spiral[0][0] - pts[:,0][-1]
y_centre = pts[:,1] + y_spiral[0][0] - pts[:,1][-1]
x_centre = np.concatenate([-np.flip(x_centre), x_centre])
y_centre = np.concatenate([-np.flip(y_centre), y_centre])
# Combining into final spiral
x = np.concatenate([-np.flip(x_spiral[1]), x_centre, x_spiral[0]])
y = np.concatenate([-np.flip(y_spiral[1]), y_centre, y_spiral[0]])
points = np.array((x,y)).T
P = Path()
# Manually add points & adjust start and end angles
P.points = points
nx1,ny1 = points[1] - points[0]
P.start_angle = np.arctan2(ny1,nx1)/np.pi*180
nx2,ny2 = points[-1] - points[-2]
P.end_angle = np.arctan2(ny2,nx2)/np.pi*180
# print(P.start_angle)
# print(P.end_angle)
return P
def _compute_segments(points):
points = np.asfarray(points)
normals = np.diff(points, axis = 0)
normals = (normals.T/np.linalg.norm(normals, axis = 1)).T
dx = np.diff(points[:,0])
dy = np.diff(points[:,1])
ds = np.sqrt(dx**2 + dy**2)
theta = np.degrees(np.arctan2(dy,dx))
dtheta = np.diff(theta)
dtheta = dtheta - 360*np.floor((dtheta + 180)/360)
return points, normals, ds, theta, dtheta
def smooth(
points = [(20,0), (40,0), (80,40), (80,10), (100,10),],
radius = 4,
corner_fun = euler,
**kwargs
):
""" Create a smooth path from a series of waypoints. Corners will be rounded
using `corner_fun` and any additional key word arguments (for example,
`use_eff = True` when `corner_fun = pp.euler`)
Parameters
----------
points : array-like[N][2] or Path
List of waypoints for the path to follow
radius : int or float
Radius of curvature, this argument will be passed to `corner_fun`
corner_fun : function
The function that controls how the corners are rounded. Typically either
`arc()` or `euler()`
**kwargs : dict
Extra keyword arguments that will be passed to `corner_fun`
Returns
-------
Path
A Path object with the specified smoothed path.
"""
if isinstance(points, Path): points = points.points
points, normals, ds, theta, dtheta = _compute_segments(points)
colinear_elements = np.concatenate([[False], np.abs(dtheta) < 1e-6, [False]])
if np.any(colinear_elements):
new_points = points[~colinear_elements,:]
points, normals, ds, theta, dtheta = _compute_segments(new_points)
if np.any(np.abs(np.abs(dtheta)-180) < 1e-6):
raise ValueError('[PHIDL] smooth() received points which double-back on themselves' +
'--turns cannot be computed when going forwards then exactly backwards.')
# FIXME add caching
# Create arcs
paths = []
radii = []
for dt in dtheta:
P = corner_fun(radius = radius, angle = dt, **kwargs)
chord = np.linalg.norm(P.points[-1,:] - P.points[0,:])
r = (chord/2)/np.sin(np.radians(dt/2))
r = np.abs(r)
radii.append(r)
paths.append(P)
d = np.abs(np.array(radii)/np.tan(np.radians(180-dtheta)/2))
encroachment = np.concatenate([[0],d]) + np.concatenate([d,[0]])
if np.any(encroachment > ds):
raise ValueError('[PHIDL] smooth(): Not enough distance between points to to fit curves. Try reducing the radius or spacing the points out farther')
p1 = points[1:-1,:] - normals[:-1,:]*d[:,np.newaxis]
# Move arcs into position
new_points = []
new_points.append( [points[0,:]] )
for n,dt in enumerate(dtheta):
P = paths[n]
P.rotate(theta[n] - 0)
P.move(p1[n])
new_points.append(P.points)
new_points.append( [points[-1,:]] )
new_points = np.concatenate(new_points)
P = Path()
P.rotate(theta[0])
P.append(new_points)
P.move(points[0,:])
return P
def _sinusoidal_transition(y1, y2):
dx = y2 - y1
return lambda t: y1 + (1 - np.cos(np.pi*t))/2*dx
def _linear_transition(y1, y2):
dx = y2 - y1
return lambda t: y1 + t*dx
def transition(cross_section1, cross_section2, width_type = 'sine'):
""" Creates a CrossSection that smoothly transitions between two input
CrossSections. Only cross-sectional elements that have the `name` (as in
X.add(..., name = 'wg') ) parameter specified in both input CrosSections
will be created. Port names will be cloned from the input CrossSections in
reverse.
Parameters
----------
cross_section1 : CrossSection
First input CrossSection
cross_section2 : CrossSection
Second input CrossSection
width_type : {'sine', 'linear'}
Sets the type of width transition used if any widths are different
between the two input CrossSections.
Returns
-------
CrossSection
A smoothly-transitioning CrossSection
"""
X1 = cross_section1
X2 = cross_section2
Xtrans = CrossSection()
if not X1.aliases or not X2.aliases:
raise ValueError("""[PHIDL] transition() found no named sections in one
or both inputs (cross_section1/cross_section2).""")
for alias in X1.aliases.keys():
if alias in X2.aliases:
offset1 = X1[alias]['offset']
offset2 = X2[alias]['offset']
width1 = X1[alias]['width']
width2 = X2[alias]['width']
if callable(offset1):
offset1 = offset1(1)
if callable(offset2):
offset2 = offset2(0)
if callable(width1):
width1 = width1(1)
if callable(width2):
width2 = width2(0)
offset_fun = _sinusoidal_transition(offset1, offset2)
if width_type == 'sine':
width_fun = _sinusoidal_transition(width1, width2)
elif width_type == 'linear':
width_fun = _linear_transition(width1, width2)
else:
raise ValueError("[PHIDL] transition() width_type " +
"argument must be one of {'sine','linear'}")
Xtrans.add(width = width_fun, offset = offset_fun,
layer = X1[alias]['layer'],
ports = (X2[alias]['ports'][0], X1[alias]['ports'][1]),
name = alias)
return Xtrans
```
#### File: phidl/phidl/routing.py
```python
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import sqrt, pi, cos, sin, log, exp, sinh, mod
from numpy.linalg import norm
from phidl.device_layout import Device, CrossSection, Port
from phidl.device_layout import _parse_layer
from phidl.geometry import turn
import phidl.path as pp
from phidl import Path
import gdspy
import warnings
def _get_rotated_basis(theta):
"""Returns basis vectors rotated CCW by theta (in degrees)
"""
theta = np.radians(theta)
e1 = np.array([np.cos(theta), np.sin(theta)])
e2 = np.array([-1*np.sin(theta), np.cos(theta)])
return e1, e2
def _arc(radius = 10, width = 0.5, theta = 45, start_angle = 0, angle_resolution = 2.5, layer = 0):
""" Creates an arc of arclength ``theta`` starting at angle ``start_angle`` """
inner_radius = radius-width/2
outer_radius = radius+width/2
angle1 = (start_angle)*pi/180
angle2 = (start_angle + theta)*pi/180
t = np.linspace(angle1, angle2, int(np.ceil(abs(theta)/angle_resolution)))
inner_points_x = (inner_radius*cos(t)).tolist()
inner_points_y = (inner_radius*sin(t)).tolist()
outer_points_x = (outer_radius*cos(t)).tolist()
outer_points_y = (outer_radius*sin(t)).tolist()
xpts = inner_points_x + outer_points_x[::-1]
ypts = inner_points_y + outer_points_y[::-1]
D = Device('arc')
D.add_polygon(points = (xpts,ypts), layer = layer)
D.add_port(name = 1, midpoint = (radius*cos(angle1), radius*sin(angle1)), width = width, orientation = start_angle - 90 + 180*(theta<0))
D.add_port(name = 2, midpoint = (radius*cos(angle2), radius*sin(angle2)), width = width, orientation = start_angle + theta + 90 - 180*(theta<0))
D.info['length'] = (abs(theta)*pi/180)*radius
return D
def _gradual_bend(
radius = 20,
width = 1.0,
angular_coverage=15,
num_steps=10,
angle_resolution=0.1,
start_angle=0,
direction='ccw',
layer=0,
):
"""
creates a 90-degree bent waveguide
the bending radius is gradually increased until it reaches the minimum
value of the radius at the "angular coverage" angle.
it essentially creates a smooth transition to a bent waveguide mode.
user can control number of steps provided.
direction determined by start angle and cw or ccw switch
############
with the default 10 "num_steps" and 15 degree coverage, effective radius is about 1.5*radius.
"""
angular_coverage=np.deg2rad(angular_coverage)
D = Device()
#determines the increment in radius through its inverse from 0 to 1/r
inc_rad =(radius**-1)/(num_steps)
angle_step = angular_coverage/num_steps
#construct a series of sub-arcs with equal angles but gradually decreasing bend radius
arcs = []
for x in range(num_steps):
A = _arc(radius=1/((x+1)*inc_rad),width=width,theta=np.rad2deg(angle_step),start_angle=x*np.rad2deg(angle_step),angle_resolution=angle_resolution,layer=layer)
a = D.add_ref(A)
arcs.append(a)
if x>0:
a.connect(port=1,destination=prevPort)
prevPort=a.ports[2]
D.add_port(name=1,port=arcs[0].ports[1])
#now connect a regular bend for the normal curved portion
B = _arc(radius=radius,width=width,theta=45-np.rad2deg(angular_coverage),start_angle=angular_coverage,angle_resolution=angle_resolution,layer=layer)
b = D.add_ref(B)
b.connect(port=1,destination=prevPort)
prevPort=b.ports[2]
D.add_port(name=2,port=prevPort)
#now create the overall structure
Total = Device()
#clone the half-curve into two objects and connect for a 90 deg bend.
D1 = Total.add_ref(D)
D2 = Total.add_ref(D)
D2.mirror(p1=[0,0],p2=[1,1])
D2.connect(port=2,destination=D1.ports[2])
Total.xmin=0
Total.ymin=0
#orient to default settings...
Total.mirror(p1=[0,0],p2=[1,1])
Total.mirror(p1=[0,0],p2=[1,0])
#orient to user-provided settings
if direction == 'cw':
Total.mirror(p1=[0,0],p2=[1,0])
Total.rotate(angle=start_angle,center=Total.center)
Total.center=[0,0]
Total.add_port(name=1,port=D1.ports[1])
Total.add_port(name=2,port=D2.ports[1])
return Total
def route_basic(port1, port2, path_type = 'sine', width_type = 'straight', width1 = None, width2 = None, num_path_pts = 99, layer = 0):
"""
.. deprecated:: 1.6.0
`route_basic` will be removed in August 2022, please replace with
`route_quad()` or `route_smooth()`.
"""
warnings.warn("""[PHIDL] Warning: route_basic() will be deprecated
in August 2022, please replace with `route_quad()` or `route_smooth()`""")
# Assuming they're both Ports for now
point_a = np.array(port1.midpoint)
if width1 is None: width1 = port1.width
point_b = np.array(port2.midpoint)
if width2 is None: width2 = port2.width
if round(abs(mod(port1.orientation - port2.orientation,360)),3) != 180:
raise ValueError('[DEVICE] route() error: Ports do not face each other (orientations must be 180 apart)')
orientation = port1.orientation
separation = point_b - point_a # Vector drawn from A to B
distance = norm(separation) # Magnitude of vector from A to B
rotation = np.arctan2(separation[1],separation[0])*180/pi # Rotation of vector from A to B
angle = rotation - orientation # If looking out along the normal of ``a``, the angle you would have to look to see ``b``
forward_distance = distance*cos(angle*pi/180)
lateral_distance = distance*sin(angle*pi/180)
# Create a path assuming starting at the origin and setting orientation = 0
# use the "connect" function later to move the path to the correct location
xf = forward_distance
yf = lateral_distance
if path_type == 'straight':
curve_fun = lambda t: [xf*t, yf*t]
curve_deriv_fun = lambda t: [xf + t*0, t*0]
if path_type == 'sine':
curve_fun = lambda t: [xf*t, yf*(1-cos(t*pi))/2]
curve_deriv_fun = lambda t: [xf + t*0, yf*(sin(t*pi)*pi)/2]
#if path_type == 'semicircle':
# def semicircle(t):
# t = np.array(t)
# x,y = np.zeros(t.shape), np.zeros(t.shape)
# ii = (0 <= t) & (t <= 0.5)
# jj = (0.5 < t) & (t <= 1)
# x[ii] = (cos(-pi/2 + t[ii]*pi/2))*xf
# y[ii] = (sin(-pi/2 + t[ii]*pi/2)+1)*yf*2
# x[jj] = (cos(pi*3/2 - t[jj]*pi)+2)*xf/2
# y[jj] = (sin(pi*3/2 - t[jj]*pi)+1)*yf/2
# return x,y
# curve_fun = semicircle
# curve_deriv_fun = None
if width_type == 'straight':
width_fun = lambda t: (width2 - width1)*t + width1
if width_type == 'sine':
width_fun = lambda t: (width2 - width1)*(1-cos(t*pi))/2 + width1
route_path = gdspy.Path(width = width1, initial_point = (0,0))
route_path.parametric(curve_fun, curve_deriv_fun, number_of_evaluations=num_path_pts,
max_points=199, final_width=width_fun, final_distance=None)
route_path_polygons = route_path.polygons
# Make the route path into a Device with ports, and use "connect" to move it
# into the proper location
D = Device()
D.add_polygon(route_path_polygons, layer = layer)
p1 = D.add_port(name = 1, midpoint = (0,0), width = width1, orientation = 180)
p2 = D.add_port(name = 2, midpoint = [forward_distance,lateral_distance], width = width2, orientation = 0)
D.info['length'] = route_path.length
D.rotate(angle = 180 + port1.orientation - p1.orientation, center = p1.midpoint)
D.move(origin = p1, destination = port1)
return D
def route_quad(port1, port2, width1=None, width2=None, layer=0):
"""Routes a basic quadrilateral polygon directly between two ports.
Parameters
----------
port1, port2 : Port objects
Ports to route between.
width1, width2 : int, float or None
Width of quadrilateral at ports. If None, uses port widths.
layer : int or array-like[2]
Layer to put the route on.
Returns
---------
D : Device
A Device containing the route and two ports (`1` and `2`) on either end.
"""
def get_port_edges(port, width):
_, e1 = _get_rotated_basis(port.orientation)
pt1 = port.midpoint + e1*width/2
pt2 = port.midpoint - e1*width/2
return pt1, pt2
if width1 is None:
width1 = port1.width
if width2 is None:
width2 = port2.width
vertices = np.array(get_port_edges(port1, width1) + get_port_edges(port2, width2))
center = np.mean(vertices, axis=0)
displacements = vertices-center
# sort vertices by angle from center of quadrilateral to make convex polygon
angles = np.array([np.arctan2(disp[0], disp[1]) for disp in displacements])
vertices = [vert for _, vert in sorted(zip(angles, vertices), key=lambda x: x[0])]
D = Device()
D.add_polygon(points=vertices, layer=layer)
D.add_port(name=1, midpoint=port1.midpoint, orientation=port1.orientation+180, width=width1)
D.add_port(name=2, midpoint=port2.midpoint, orientation=port2.orientation+180, width=width2)
return D
def route_smooth(
port1,
port2,
radius=5,
width=None,
path_type='manhattan',
manual_path=None,
smooth_options={'corner_fun': pp.euler, 'use_eff': True},
layer=np.nan,
**kwargs
):
""" Convenience function that routes a path between ports using pp.smooth(),
then immediately extrudes the path to create polygons. Has several waypoint
path type options. Equivalent to e.g.
>>> pts = pr.path_manhattan(port1, port2, radius)
>>> P = pp.smooth(pts, radius)
>>> D = P.extrude(width)
Parameters
----------
port1, port2 : Port objects
Ports to route between.
radius : int or float
Bend radius passed to pp.smooth
width : None, int, float, array-like[2], or CrossSection
If None, the route linearly tapers between the widths the ports
If set to a single number (e.g. `width=1.7`): makes a fixed-width route
If set to a 2-element array (e.g. `width=[1.8,2.5]`): makes a route
whose width varies linearly from width[0] to width[1]
If set to a CrossSection: uses the CrossSection parameters for the route
path_type : {'manhattan', 'L', 'U', 'J', 'C', 'V', 'Z', 'straight', 'manual'}
Method of waypoint path creation. Should be one of
- 'manhattan' - automatic manhattan routing
(see path_manhattan() ).
- 'L' - L-shaped path for orthogonal ports that can be directly
connected (see path_L() ).
- 'U' - U-shaped path for parrallel or facing ports
(see path_U() ).
- 'J' - J-shaped path for orthogonal ports that cannot be
directly connected (see path_J() ).
- 'C' - C-shaped path for ports that face away from each
other (see path_C() ).
- 'Z' - Z-shaped path with three segments for ports at any
angles (see path_Z() ).
- 'V' - V-shaped path with two segments for ports at any
angles (see path_V() ).
- 'straight' - straight path for ports that face each other
see path_straight() ).
- 'manual' - use an explicit waypoint path provided
in manual_path.
manual_path : array-like[N][2] or Path
Waypoint path for creating a manual route
smooth_options: dict
Keyword arguments passed to pp.smooth
layer : int or array-like[2]
Layer to put route on. `layer=0` is used by default.
**kwargs :
Keyword arguments passed to the waypoint path function.
Returns
----------
D : Device
A Device containing the route and two ports (`1` and `2`) on either end.
"""
if path_type == 'straight':
P = path_straight(port1, port2)
elif path_type == 'manual':
if not isinstance(manual_path, Path):
P = Path(manual_path)
else:
P = manual_path
elif path_type == 'L':
P = path_L(port1, port2)
elif path_type == 'U':
P = path_U(port1, port2, **kwargs)
elif path_type == 'J':
P = path_J(port1, port2, **kwargs)
elif path_type == 'C':
P = path_C(port1, port2, **kwargs)
elif path_type == 'manhattan':
if smooth_options['corner_fun'] == pp.euler:
use_eff = smooth_options.get('use_eff')
if use_eff is None or use_eff == False:
raise ValueError("""[PHIDL] route_smooth(): when using manhattan path type with euler
bends, smooth_options['use_eff'] must be True.""")
P = path_manhattan(port1, port2, radius=radius)
elif path_type == 'Z':
P = path_Z(port1, port2, **kwargs)
elif path_type == 'V':
P = path_V(port1, port2)
else:
raise ValueError("""[PHIDL] route_smooth() received an invalid path_type. Must be one of
{'manhattan', 'L', 'U', 'J', 'C', 'V', 'Z', 'straight', 'manual'}""")
P = pp.smooth(points=P, radius=radius, **smooth_options)
if width is None:
X1 = CrossSection().add(width=port1.width, ports=(1, 2), layer=layer, name='a')
X2 = CrossSection().add(width=port2.width, ports=(1, 2), layer=layer, name='a')
cross_section = pp.transition(cross_section1=X1, cross_section2=X2, width_type='linear')
D = P.extrude(width=cross_section)
else:
D = P.extrude(width=width, layer=layer)
if not isinstance(width, CrossSection):
newport1 = D.add_port(port = port1, name = 1).rotate(180)
newport2 = D.add_port(port = port2, name = 2).rotate(180)
if np.size(width) == 1:
newport1.width = width
newport2.width = width
if np.size(width) == 2:
newport1.width = width[0]
newport2.width = width[1]
return D
def route_sharp(
port1,
port2,
width=None,
path_type='manhattan',
manual_path=None,
layer=np.nan,
**kwargs
):
""" Convenience function that routes a path between ports and immediately
extrudes the path to create polygons. Has several waypoint path type
options. Equivalent to e.g.
>>> P = pr.path_manhattan(port1, port2, radius)
>>> D = P.extrude(width)
Parameters
----------
port1, port2 : Port objects
Ports to route between.
width : None, int, float, array-like[2], or CrossSection
If None, the route linearly tapers between the widths the ports
If set to a single number (e.g. `width=1.7`): makes a fixed-width route
If set to a 2-element array (e.g. `width=[1.8,2.5]`): makes a route
whose width varies linearly from width[0] to width[1]
If set to a CrossSection: uses the CrossSection parameters for the route
path_type : {'manhattan', 'L', 'U', 'J', 'C', 'V', 'Z', 'straight', 'manual'}
Method of waypoint path creation. Should be one of
- 'manhattan' - automatic manhattan routing
(see path_manhattan() ).
- 'L' - L-shaped path for orthogonal ports that can be directly
connected (see path_L() ).
- 'U' - U-shaped path for parrallel or facing ports
(see path_U() ).
- 'J' - J-shaped path for orthogonal ports that cannot be
directly connected (see path_J() ).
- 'C' - C-shaped path for ports that face away from each
other (see path_C() ).
- 'Z' - Z-shaped path with three segments for ports at any
angles (see path_Z() ).
- 'V' - V-shaped path with two segments for ports at any
angles (see path_V() ).
- 'straight' - straight path for ports that face each other
see path_straight() ).
- 'manual' - use an explicit waypoint path provided
in manual_path.
manual_path : array-like[N][2] or Path
Waypoint path for creating a manual route
layer : int or array-like[2]
Layer to put route on. `layer=0` is used by default.
**kwargs :
Keyword arguments passed to the waypoint path function.
Returns
----------
D : Device
A Device containing the route and two ports (`1` and `2`) on either end.
"""
if path_type == 'straight':
P = path_straight(port1, port2)
elif path_type == 'manual':
if not isinstance(manual_path, Path):
P = Path(manual_path)
else:
P = manual_path
elif path_type == 'L':
P = path_L(port1, port2)
elif path_type == 'U':
P = path_U(port1, port2, **kwargs)
elif path_type == 'J':
P = path_J(port1, port2, **kwargs)
elif path_type == 'C':
P = path_C(port1, port2, **kwargs)
elif path_type == 'manhattan':
radius = max(port1.width, port2.width)
P = path_manhattan(port1, port2, radius=radius)
elif path_type == 'Z':
P = path_Z(port1, port2, **kwargs)
elif path_type == 'V':
P = path_V(port1, port2)
else:
raise ValueError("""[PHIDL] route_sharp() received an invalid path_type. Must be one of
{'manhattan', 'L', 'U', 'J', 'C', 'V', 'Z', 'straight', 'manual'}""")
if width is None:
X1 = CrossSection().add(width=port1.width, ports=(1, 2), layer=layer, name='a')
X2 = CrossSection().add(width=port2.width, ports=(1, 2), layer=layer, name='a')
cross_section = pp.transition(cross_section1=X1, cross_section2=X2, width_type='linear')
D = P.extrude(width=cross_section)
else:
D = P.extrude(width=width, layer=layer)
if not isinstance(width, CrossSection):
newport1 = D.add_port(port = port1, name = 1).rotate(180)
newport2 = D.add_port(port = port2, name = 2).rotate(180)
if np.size(width) == 1:
newport1.width = width
newport2.width = width
if np.size(width) == 2:
newport1.width = width[0]
newport2.width = width[1]
return D
def path_straight(port1, port2):
"""Return waypoint path between port1 and port2 in a straight line.
Useful when ports point directly at each other.
Parameters
----------
port1, port2 : Port objects
Ports to route between.
Returns
----------
points : array[2][2]
Waypoints for the route path to follow.
"""
delta_orientation = np.round(np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3)
e1, e2 = _get_rotated_basis(port1.orientation)
displacement = port2.midpoint - port1.midpoint
xrel = np.round(np.dot(displacement, e1), 3) # relative position of port 2, forward/backward
yrel = np.round(np.dot(displacement, e2), 3) # relative position of port 2, left/right
if (delta_orientation not in (0, 180, 360)) or (yrel != 0) or (xrel <= 0):
raise ValueError('[PHIDL] path_straight(): ports must point directly at each other.')
return Path(np.array([port1.midpoint, port2.midpoint]))
def path_L(port1, port2):
"""Return waypoint path between port1 and port2 in an L shape. Useful
when orthogonal ports can be directly connected with one turn.
Parameters
----------
port1, port2 : Port objects
Ports to route between.
Returns
----------
points : Path
Waypoints for the route path to follow.
"""
delta_orientation = np.round(np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3)
if delta_orientation not in (90, 270):
raise ValueError('[PHIDL] path_L(): ports must be orthogonal.')
e1, e2 = _get_rotated_basis(port1.orientation)
# assemble waypoints
pt1 = port1.midpoint
pt3 = port2.midpoint
delta_vec = pt3-pt1
pt2 = pt1 + np.dot(delta_vec, e1)*e1
return Path(np.array([pt1, pt2, pt3]))
def path_U(port1, port2, length1=200):
"""Return waypoint path between port1 and port2 in a U shape. Useful
when ports face the same direction or toward each other.
Parameters
----------
port1, port2 : Port objects
Ports to route between.
length1 : int or float
Length of segment exiting port1. Should be larger than bend radius.
Returns
----------
points : Path
Waypoints for the route path to follow.
"""
delta_orientation = np.round(np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3)
if delta_orientation not in (0, 180, 360):
raise ValueError('[PHIDL] path_U(): ports must be parrallel.')
theta = np.radians(port1.orientation)
e1 = np.array([np.cos(theta), np.sin(theta)])
e2 = np.array([-1*np.sin(theta), np.cos(theta)])
# assemble waypoints
pt1 = port1.midpoint
pt4 = port2.midpoint
pt2 = pt1 + length1*e1 # outward by length1 distance
delta_vec = pt4-pt2
pt3 = pt2 + np.dot(delta_vec, e2)*e2
return Path(np.array([pt1, pt2, pt3, pt4]))
def path_J(port1, port2, length1=200, length2=200):
"""Return waypoint path between port1 and port2 in a J shape. Useful
when orthogonal ports cannot be connected directly with an L shape.
Parameters
----------
port1, port2 : Port objects
Ports to route between.
length1 : int or float
Length of segment exiting port1. Should be larger than bend radius.
length2 : int or float
Length of segment exiting port2. Should be larger than bend radius.
Returns
----------
points : Path
Waypoints for the route path to follow.
"""
delta_orientation = np.round(np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3)
if delta_orientation not in (90, 270):
raise ValueError('[PHIDL] path_J(): ports must be orthogonal.')
e1, _ = _get_rotated_basis(port1.orientation)
e2, _ = _get_rotated_basis(port2.orientation)
# assemble waypoints
pt1 = port1.midpoint
pt2 = pt1 + length1*e1 # outward from port1 by length1
pt5 = port2.midpoint
pt4 = pt5 + length2*e2 # outward from port2 by length2
delta_vec = pt4-pt2
pt3 = pt2 + np.dot(delta_vec, e2)*e2 # move orthogonally in e2 direction
return Path(np.array([pt1, pt2, pt3, pt4, pt5]))
def path_C(port1, port2, length1=100, left1=100, length2=100):
"""Return waypoint path between port1 and port2 in a C shape. Useful
when ports are parrallel and face away from each other.
Parameters
----------
port1, port2 : Port objects
Ports to route between.
length1 : int or float
Length of route segment coming out of port1. Should be at larger
than bend radius.
left1 : int or float
Length of route segment that turns left (or right if negative)
from port1. Should be larger than twice the bend radius.
length2 : int or float
Length of route segment coming out of port2. Should be larger
than bend radius.
Returns
----------
points : Path
Waypoints for the route path to follow.
"""
delta_orientation = np.round(np.abs(np.mod(port1.orientation - port2.orientation, 360)), 3)
if delta_orientation not in (0, 180, 360):
raise ValueError('[PHIDL] path_C(): ports must be parrallel.')
e1, e_left = _get_rotated_basis(port1.orientation)
e2, _ = _get_rotated_basis(port2.orientation)
# assemble route points
pt1 = port1.midpoint
pt2 = pt1 + length1*e1 # outward from port1 by length1
pt3 = pt2 + left1*e_left # leftward by left1
pt6 = port2.midpoint
pt5 = pt6 + length2*e2 # outward from port2 by length2
delta_vec = pt5-pt3
pt4 = pt3 + np.dot(delta_vec, e1)*e1 # move orthogonally in e1 direction
return Path(np.array([pt1, pt2, pt3, pt4, pt5, pt6]))
def path_manhattan(port1, port2, radius):
"""Return waypoint path between port1 and port2 using manhattan routing.
Routing is performed using straight, L, U, J, or C waypoint path
as needed. Ports must face orthogonal or parallel directions.
Parameters
----------
port1, port2 : Port objects
Ports to route between.
radius : float or int
Bend radius for 90 degree bend.
Returns
----------
points : Path
Waypoints for the route path to follow.
"""
radius = radius + 0.1 # ensure space for bend radius
e1, e2 = _get_rotated_basis(port1.orientation)
displacement = port2.midpoint - port1.midpoint
xrel = np.round(np.dot(displacement, e1), 3) # port2 position, forward(+)/backward(-) from port 1
yrel = np.round(np.dot(displacement, e2), 3) # port2 position, left(+)/right(-) from port1
orel = np.round(np.abs(np.mod(port2.orientation - port1.orientation, 360)), 3) # relative orientation
if orel not in (0, 90, 180, 270, 360):
raise ValueError('[PHIDL] path_manhattan(): ports must face parrallel or orthogonal directions.')
if orel in (90, 270):
# Orthogonal case
if ((orel == 90 and yrel < -1*radius) or (orel == 270 and yrel > radius)) and xrel > radius:
pts = path_L(port1, port2)
else:
# Adjust length1 and length2 to ensure intermediate segments fit bend radius
direction = -1 if (orel == 270) else 1
length2 = 2*radius-direction*yrel if (np.abs(radius+direction*yrel) < 2*radius) else radius
length1 = 2*radius+xrel if (np.abs(radius-xrel) < 2*radius) else radius
pts = path_J(port1, port2, length1=length1, length2=length2)
else:
# Parrallel case
if orel == 180 and yrel == 0 and xrel > 0:
pts = path_straight(port1, port2)
elif (orel == 180 and xrel <= 2*radius) or (np.abs(yrel) < 2*radius):
# Adjust length1 and left1 to ensure intermediate segments fit bend radius
left1 = np.abs(yrel)+2*radius if (np.abs(yrel) < 4*radius) else 2*radius
y_direction = -1 if (yrel < 0) else 1
left1 = y_direction*left1
length2 = radius
x_direction = -1 if (orel == 180) else 1
segmentx_length = np.abs(xrel+x_direction*length2-radius)
if segmentx_length < 2*radius:
length1 = xrel+x_direction*length2+2*radius
else:
length1 = radius
pts = path_C(port1, port2, length1=length1, length2=length2, left1=left1)
else:
# Adjust length1 to ensure segment comes out of port2
length1 = radius+xrel if (orel == 0 and xrel > 0) else radius
pts = path_U(port1, port2, length1=length1)
return pts
def path_Z(port1, port2, length1=100, length2=100):
"""Return waypoint path between port1 and port2 in a Z shape. Ports can have any relative
orientation.
Parameters
----------
port1, port2 : Port objects
Ports to route between.
length1 : int or float
Length of route segment coming out of port1.
length2 : int or float
Length of route segment coming out of port2.
Returns
----------
points : Path
Waypoints for the route path to follow.
"""
# get basis vectors in port directions
e1, _ = _get_rotated_basis(port1.orientation)
e2, _ = _get_rotated_basis(port2.orientation)
# assemble route points
pt1 = port1.midpoint
pt2 = pt1 + length1*e1 # outward from port1 by length1
pt4 = port2.midpoint
pt3 = pt4 + length2*e2 # outward from port2 by length2
return Path(np.array([pt1, pt2, pt3, pt4]))
def path_V(port1, port2):
"""Return waypoint path between port1 and port2 in a V shape. Useful when
ports point to a single connecting point
Parameters
----------
port1, port2 : Port objects
Ports to route between.
Returns
----------
points : Path
Waypoints for the route path to follow.
"""
# get basis vectors in port directions
e1, _ = _get_rotated_basis(port1.orientation)
e2, _ = _get_rotated_basis(port2.orientation)
# assemble route points
pt1 = port1.midpoint
pt3 = port2.midpoint
# solve for intersection
E = np.column_stack((e1, -1*e2))
pt2 = np.matmul(np.linalg.inv(E), pt3-pt1)[0]*e1 + pt1
return Path(np.array([pt1, pt2, pt3]))
def path_xy(port1, port2, directions = 'xxyx'):
""" Creates a Path that travels only in x and y directions (manhattan) from
one point (or Port) to another. The `directions` string determines the order
of the x/y steps. Example: `directions = 'xyx'` will travel
1/2 the distance in x from p1 to p2
The whole distance in y from p1 to p2
1/2 of the distance in x from p1 to p2
Parameters
----------
p1, p2 : array-like[2] points or Port objects
Points to route between.
directions : string of {'x','y'} characters
Directions the Path will be routed along
Returns
----------
Path
Waypoints for the route path to follow.
"""
if isinstance(port1, Port): p1 = port1.midpoint
elif np.size(port1) == 2: p1 = port1
if isinstance(port2, Port): p2 = port2.midpoint
elif np.size(port2) == 2: p2 = port2
directions = directions.lower()
num_x = sum([xy == 'x' for xy in directions])
num_y = sum([xy == 'y' for xy in directions])
distance = np.asarray(p2)-p1
points = [p1]
for xy in directions:
if xy == 'x':
travel = np.array([distance[0]/num_x, 0])
new_point = points[-1] + travel
elif xy == 'y':
travel = np.array([0, distance[1]/num_y])
new_point = points[-1] + travel
else:
raise ValueError('[PHIDL] path_xy() directions argument must be string with only "x" or "y" characters')
if np.abs(np.sum(travel)) > 1e-6: # Only add point if traveling some significant distance
points.append(new_point)
return Path(np.array(points))
def route_xy(port1, port2, directions = 'xxyx', width = None, layer = np.nan):
""" Routes a path in x and y directions (manhattan) from one point (or Port)
to another. The `directions` string determines the order of the x/y steps.
Example: `directions = 'xyx'` will travel
1/2 the distance in x from p1 to p2
The whole distance in y from p1 to p2
1/2 of the distance in x from p1 to p2
Parameters
----------
port1, port2 : Ports pr array-like[2] points or Port objects
Points to route between.
directions : string of {'x','y'} characters
Directions the Path will be routed along
Returns
----------
D : Device
A Device containing the route and two ports (`1` and `2`) on either end.
"""
P = path_xy(port1, port2, directions = directions)
if width is None:
X1 = CrossSection().add(width=port1.width, ports=(1, 2), layer=layer, name='a')
X2 = CrossSection().add(width=port2.width, ports=(1, 2), layer=layer, name='a')
cross_section = pp.transition(cross_section1=X1, cross_section2=X2, width_type='linear')
D = P.extrude(width=cross_section)
else:
D = P.extrude(width=width, layer=layer)
if not isinstance(width, CrossSection):
newport1 = D.add_port(port = port1, name = 1).rotate(180)
newport2 = D.add_port(port = port2, name = 2).rotate(180)
if np.size(width) == 1:
newport1.width = width
newport2.width = width
if np.size(width) == 2:
newport1.width = width[0]
newport2.width = width[1]
# D = P.extrude(width, layer = layer)
return D
# ################
# gradual_bend() - variable radius-of-curvature bends for low-loss routing
# note - these are not bezier spline curves, but are instead constructed by
# subdividing the coverage angle into equal segments and implementing a gradual
# decrease in bending radius until the minimum is reached. """
# ################
# route_manhattan() - routing between any two ports rotated in 90 degree increments
# note - ports must be located diagonally from each other and oriented along
# cardinal directions. Routing can be done with circular or gradual bends.
# Waveguide width is set by the width parameter of the first port.
# ################
# route_manhattan_auto() - handy routine which performs route_manhattan() on a
# vector of ports provided to it, allowing easy connecting of many objects.
class RoutingError(ValueError):
pass
def route_manhattan(
port1,
port2,
bendType='circular',
layer=0,
radius=20
):
"""
.. deprecated:: 1.6.0
`route_manhattan` will be removed in August 2022, please replace with
`route_smooth()`.
"""
warnings.warn("""[PHIDL] Warning: route_manhattan() will be deprecated
in August 2022, please replace with `route_smooth()`""")
#route along cardinal directions between any two ports placed diagonally
#from each other
valid_bend_types = ["circular", "gradual"]
if bendType not in valid_bend_types:
raise ValueError("bendType%s= not in %s" % (bendType, valid_bend_types))
if bendType == "gradual":
b = _gradual_bend(radius=radius)
radius_eff = b.xsize
else:
radius_eff = radius
if (
abs(port1.midpoint[0] - port2.midpoint[0]) < 2 * radius_eff
or abs(port1.midpoint[1] - port2.midpoint[1]) < 2 * radius_eff
):
raise RoutingError(
"bend does not fit (radius = %s) you need radius <" % radius_eff,
min(
[
abs(port1.midpoint[0] - port2.midpoint[0]) / 2,
abs(port1.midpoint[1] - port2.midpoint[1]) / 2,
]
),
)
Total = Device()
width=port1.width
#first map into uniform plane with normal x,y coords
#allows each situation to be put into uniform cases of quadrants for routing.
#this is because bends change direction and positioning.
if port1.orientation==0:
p2=[port2.midpoint[0],port2.midpoint[1]]
p1=[port1.midpoint[0],port1.midpoint[1]]
if port1.orientation==90:
p2=[port2.midpoint[1],-port2.midpoint[0]]
p1=[port1.midpoint[1],-port1.midpoint[0]]
if port1.orientation==180:
p2=[-port2.midpoint[0],-port2.midpoint[1]]
p1=[-port1.midpoint[0],-port1.midpoint[1]]
if port1.orientation==270:
p2=[-port2.midpoint[1],port2.midpoint[0]]
p1=[-port1.midpoint[1],port1.midpoint[0]]
Total.add_port(name=1,port=port1)
Total.add_port(name=2,port=port2)
if p2[1] == p1[1] or p2[0] == p1[0]:
raise ValueError('Error - ports must be at different x AND y values.')
#if it is parallel or anti-parallel, route with 180 option
if (np.round(np.abs(np.mod(port1.orientation - port2.orientation,360)),3) == 180) or (np.round(np.abs(np.mod(port1.orientation - port2.orientation,360)),3) == 0):
R1 = _route_manhattan180(port1=port1,port2=port2,bendType=bendType,layer=layer,radius=radius)
r1 = Total.add_ref(R1)
else:
#first quadrant case
if (p2[1] > p1[1]) & (p2[0] > p1[0]):
#simple 90 degree single-bend case
if port2.orientation == port1.orientation-90 or port2.orientation == port1.orientation+270:
R1 = _route_manhattan90(port1=port1,port2=port2,bendType=bendType,layer=layer,radius=radius)
r1 = Total.add_ref(R1)
elif port2.orientation == port1.orientation+90 or port2.orientation == port1.orientation-270:
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=port1.orientation,theta=90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=port1.orientation,direction='ccw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b1.connect(port=1,destination=port1)
R1 = _route_manhattan180(port1=b1.ports[2],port2=port2,bendType=bendType,layer=layer,radius=radius)
r1 = Total.add_ref(R1)
#second quadrant case
if (p2[1] > p1[1]) & (p2[0] < p1[0]):
if np.abs(port1.orientation-port2.orientation) == 90 or np.abs(port1.orientation-port2.orientation) == 270:
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=port1.orientation,theta=90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=port1.orientation,direction='ccw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b1.connect(port=1,destination=port1)
R1 = _route_manhattan180(port1=b1.ports[2],port2=port2,bendType=bendType,layer=layer,radius=radius)
r1 = Total.add_ref(R1)
#third quadrant case
if (p2[1] < p1[1]) & (p2[0] < p1[0]):
if np.abs(port1.orientation-port2.orientation) == 90 or np.abs(port1.orientation-port2.orientation) == 270:
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=port1.orientation,theta=-90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=port1.orientation,direction='cw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b1.connect(port=1,destination=port1)
R1 = _route_manhattan180(port1=b1.ports[2],port2=port2,bendType=bendType,layer=layer,radius=radius)
r1 = Total.add_ref(R1)
#fourth quadrant case
if (p2[1] < p1[1]) & (p2[0] > p1[0]):
#simple 90 degree single-bend case
if port2.orientation == port1.orientation+90 or port2.orientation == port1.orientation-270:
R1 = _route_manhattan90(port1=port1,port2=port2,bendType=bendType,layer=layer,radius=radius)
r1 = Total.add_ref(R1)
elif port2.orientation == port1.orientation-90 or port2.orientation == port1.orientation+270:
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=port1.orientation,theta=-90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=port1.orientation,direction='cw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b1.connect(port=1,destination=port1)
R1 = _route_manhattan180(port1=b1.ports[2],port2=port2,bendType=bendType,layer=layer,radius=radius)
r1 = Total.add_ref(R1)
return Total
def _route_manhattan180(
port1,
port2,
bendType='circular',
layer=0,
radius=20
):
#this is a subroutine of route_manhattan() and should not be used by itself.
Total = Device()
width=port1.width
#first map into uniform plane with normal x,y coords
#allows each situation to be put into uniform cases of quadrants for routing.
#this is because bends change direction and positioning.
if port1.orientation==0:
p2=[port2.midpoint[0],port2.midpoint[1]]
p1=[port1.midpoint[0],port1.midpoint[1]]
if port1.orientation==90:
p2=[port2.midpoint[1],-port2.midpoint[0]]
p1=[port1.midpoint[1],-port1.midpoint[0]]
if port1.orientation==180:
p2=[-port2.midpoint[0],-port2.midpoint[1]]
p1=[-port1.midpoint[0],-port1.midpoint[1]]
if port1.orientation==270:
p2=[-port2.midpoint[1],port2.midpoint[0]]
p1=[-port1.midpoint[1],port1.midpoint[0]]
#create placeholder ports based on the imaginary coordinates we created
Total.add_port(name='t1',midpoint=[0,0],orientation=0,width=width)
if(port1.orientation!=port2.orientation):
Total.add_port(name='t2',midpoint=list(np.subtract(p2,p1)),orientation=180,width=width)
else:
Total.add_port(name='t2',midpoint=list(np.subtract(p2,p1)),orientation=0,width=width)
if port1.orientation==port2.orientation:
#first quadrant target
if (p2[1] > p1[1]) & (p2[0] > p1[0]):
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=0,theta=90)
B2=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=90,theta=90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=0,direction='ccw')
B2=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=90,direction='ccw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b2=Total.add_ref(B2)
b1.connect(port=b1.ports[1],destination=Total.ports['t1'])
b1.move([p2[0]-p1[0],0])
b2.connect(port=b2.ports[1],destination=b1.ports[2])
b2.move([0,p2[1]-p1[1]-radiusEff*2])
R1=route_basic(port1=Total.ports['t1'],port2=b1.ports[1],layer=layer)
r1=Total.add_ref(R1)
R2=route_basic(port1=b1.ports[2],port2=b2.ports[1],layer=layer)
r2=Total.add_ref(R2)
Total.add_port(name=1,port=r1.ports[1])
Total.add_port(name=2,port=b2.ports[2])
#second quadrant target
if (p2[1] > p1[1]) & (p2[0] < p1[0]):
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=0,theta=90)
B2=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=90,theta=90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=0,direction='ccw')
B2=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=90,direction='ccw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b2=Total.add_ref(B2)
b1.connect(port=b1.ports[1],destination=Total.ports['t1'])
b2.connect(port=b2.ports[1],destination=b1.ports[2])
b2.move([0,p2[1]-p1[1]-radiusEff*2])
R1=route_basic(port1=b1.ports[2],port2=b2.ports[1],layer=layer)
r1=Total.add_ref(R1)
R2=route_basic(port1=b2.ports[2],port2=Total.ports['t2'],layer=layer)
r2=Total.add_ref(R2)
Total.add_port(name=1,port=b1.ports[1])
Total.add_port(name=2,port=r2.ports[2])
#third quadrant target
if (p2[1] < p1[1]) & (p2[0] < p1[0]):
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=0,theta=-90)
B2=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=-90,theta=-90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=0,direction='cw')
B2=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=-90,direction='cw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b2=Total.add_ref(B2)
b1.connect(port=b1.ports[1],destination=Total.ports['t1'])
b2.connect(port=b2.ports[1],destination=b1.ports[2])
b2.move([0,p2[1]-p1[1]+radiusEff*2])
R1=route_basic(port1=b1.ports[2],port2=b2.ports[1],layer=layer)
r1=Total.add_ref(R1)
R2=route_basic(port1=b2.ports[2],port2=Total.ports['t2'],layer=layer)
r2=Total.add_ref(R2)
Total.add_port(name=1,port=b1.ports[1])
Total.add_port(name=2,port=r2.ports[2])
#fourth quadrant target
if (p2[1] < p1[1]) & (p2[0] > p1[0]):
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=0,theta=-90)
B2=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=-90,theta=-90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=0,direction='cw')
B2=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=-90,direction='cw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b2=Total.add_ref(B2)
b1.connect(port=b1.ports[1],destination=Total.ports['t1'])
b1.move([p2[0]-p1[0],0])
b2.connect(port=b2.ports[1],destination=b1.ports[2])
b2.move([0,p2[1]-p1[1]+radiusEff*2])
R1=route_basic(port1=Total.ports['t1'],port2=b1.ports[1],layer=layer)
r1=Total.add_ref(R1)
R2=route_basic(port1=b1.ports[2],port2=b2.ports[1],layer=layer)
r2=Total.add_ref(R2)
Total.add_port(name=1,port=r1.ports[1])
Total.add_port(name=2,port=b2.ports[2])
#other port orientations are not supported:
elif np.round(np.abs(np.mod(port1.orientation - port2.orientation,360)),3) != 180:
raise ValueError('[DEVICE] route() error: Ports do not face each other (orientations must be 180 apart)')
#otherwise, they are 180 degrees apart:
else:
#first quadrant target
if (p2[1] > p1[1]) & (p2[0] > p1[0]):
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=0,theta=90)
B2=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=90,theta=-90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=0,direction='ccw')
B2=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=90,direction='cw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b2=Total.add_ref(B2)
b1.connect(port=b1.ports[1],destination=Total.ports['t1'])
b1.move([p2[0]-p1[0]-radiusEff*2,0])
b2.connect(port=b2.ports[1],destination=b1.ports[2])
b2.move([0,p2[1]-p1[1]-radiusEff*2])
R1=route_basic(port1=Total.ports['t1'],port2=b1.ports[1],layer=layer)
r1=Total.add_ref(R1)
R2=route_basic(port1=b1.ports[2],port2=b2.ports[1],layer=layer)
r2=Total.add_ref(R2)
Total.add_port(name=1,port=r1.ports[1])
Total.add_port(name=2,port=b2.ports[2])
#second quadrant target
if (p2[1] > p1[1]) & (p2[0] < p1[0]):
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=0,theta=90)
B2=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=90,theta=90)
B3=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=180,theta=-90)
B4=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=90,theta=-90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=0,direction='ccw')
B2=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=90,direction='ccw')
B3=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=180,direction='cw')
B4=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=90,direction='cw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b2=Total.add_ref(B2)
b3=Total.add_ref(B3)
b4=Total.add_ref(B4)
b1.connect(port=b1.ports[1],destination=Total.ports['t1'])
b2.connect(port=b2.ports[1],destination=b1.ports[2])
b2.move([0,p2[1]-p1[1]-radiusEff*4])
R1=route_basic(port1=b1.ports[2],port2=b2.ports[1],layer=layer)
r1=Total.add_ref(R1)
b3.connect(port=b3.ports[1],destination=b2.ports[2])
b3.move([p2[0]-p1[0],0])
R2=route_basic(port1=b2.ports[2],port2=b3.ports[1],layer=layer)
r2=Total.add_ref(R2)
b4.connect(port=b4.ports[1],destination=b3.ports[2])
Total.add_port(name=1,port=r1.ports[1])
Total.add_port(name=2,port=b4.ports[2])
#third quadrant target
if (p2[1] < p1[1]) & (p2[0] < p1[0]):
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=0,theta=-90)
B2=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=-90,theta=-90)
B3=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=-180,theta=90)
B4=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=-90,theta=90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=0,direction='cw')
B2=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=-90,direction='cw')
B3=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=-180,direction='ccw')
B4=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=-90,direction='ccw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b2=Total.add_ref(B2)
b3=Total.add_ref(B3)
b4=Total.add_ref(B4)
b1.connect(port=b1.ports[1],destination=Total.ports['t1'])
b2.connect(port=b2.ports[1],destination=b1.ports[2])
b2.move([0,p2[1]-p1[1]+radiusEff*4])
R1=route_basic(port1=b1.ports[2],port2=b2.ports[1],layer=layer)
r1=Total.add_ref(R1)
b3.connect(port=b3.ports[1],destination=b2.ports[2])
b3.move([p2[0]-p1[0],0])
R2=route_basic(port1=b2.ports[2],port2=b3.ports[1],layer=layer)
r2=Total.add_ref(R2)
b4.connect(port=b4.ports[1],destination=b3.ports[2])
Total.add_port(name=1,port=r1.ports[1])
Total.add_port(name=2,port=b4.ports[2])
#fourth quadrant target
if (p2[1] < p1[1]) & (p2[0] > p1[0]):
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=0,theta=-90)
B2=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=-90,theta=90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=0,direction='cw')
B2=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=-90,direction='ccw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b2=Total.add_ref(B2)
b1.connect(port=b1.ports[1],destination=Total.ports['t1'])
b1.move([p2[0]-p1[0]-radiusEff*2,0])
b2.connect(port=b2.ports[1],destination=b1.ports[2])
b2.move([0,p2[1]-p1[1]+radiusEff*2])
R1=route_basic(port1=Total.ports['t1'],port2=b1.ports[1],layer=layer)
r1=Total.add_ref(R1)
R2=route_basic(port1=b1.ports[2],port2=b2.ports[1],layer=layer)
r2=Total.add_ref(R2)
Total.add_port(name=1,port=r1.ports[1])
Total.add_port(name=2,port=b2.ports[2])
Total.rotate(angle = port1.orientation, center = p1)
Total.move(origin = Total.ports['t1'], destination = port1)
return Total
def _route_manhattan90(
port1,
port2,
bendType='circular',
layer=0,
radius=20
):
#this is a subroutine of route_manhattan() and should not be used by itself.
Total = Device()
width=port1.width
#first map into uniform plane with normal x,y coords
#allows each situation to be put into uniform cases of quadrants for routing.
#this is because bends change direction and positioning.
if port1.orientation==0:
p2=[port2.midpoint[0],port2.midpoint[1]]
p1=[port1.midpoint[0],port1.midpoint[1]]
if port1.orientation==90:
p2=[port2.midpoint[1],-port2.midpoint[0]]
p1=[port1.midpoint[1],-port1.midpoint[0]]
if port1.orientation==180:
p2=[-port2.midpoint[0],-port2.midpoint[1]]
p1=[-port1.midpoint[0],-port1.midpoint[1]]
if port1.orientation==270:
p2=[-port2.midpoint[1],port2.midpoint[0]]
p1=[-port1.midpoint[1],port1.midpoint[0]]
#create placeholder ports based on the imaginary coordinates we created
Total.add_port(name='t1',midpoint=[0,0],orientation=0,width=width)
#CHECK THIS
#first quadrant target, route upward
if (p2[1] > p1[1]) & (p2[0] > p1[0]):
Total.add_port(name='t2',midpoint=list(np.subtract(p2,p1)),orientation=-90,width=width)
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=0,theta=90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=0,direction='ccw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b1.connect(port=b1.ports[1],destination=Total.ports['t1'])
b1.move([p2[0]-p1[0]-radiusEff,0])
R1 = route_basic(port1=Total.ports['t1'],port2=b1.ports[1],layer=layer)
R2 = route_basic(port1=b1.ports[2],port2=Total.ports['t2'],layer=layer)
r1 = Total.add_ref(R1)
r2 = Total.add_ref(R2)
Total.add_port(name=1,port=r1.ports[1])
Total.add_port(name=2,port=r2.ports[2])
#fourth quadrant target, route downward
if (p2[1] < p1[1]) & (p2[0] > p1[0]):
Total.add_port(name='t2',midpoint=list(np.subtract(p2,p1)),orientation=90,width=width)
if bendType == 'circular':
B1=_arc(radius=radius,width=width,layer=layer,angle_resolution=1,start_angle=0,theta=-90)
radiusEff=radius
if bendType == 'gradual':
B1=_gradual_bend(radius=radius,width=width,layer=layer,start_angle=0,direction='cw')
radiusEff=B1.xsize-width/2
b1=Total.add_ref(B1)
b1.connect(port=b1.ports[1],destination=Total.ports['t1'])
b1.move([p2[0]-p1[0]-radiusEff,0])
R1 = route_basic(port1=Total.ports['t1'],port2=b1.ports[1],layer=layer)
R2 = route_basic(port1=b1.ports[2],port2=Total.ports['t2'],layer=layer)
r1 = Total.add_ref(R1)
r2 = Total.add_ref(R2)
Total.add_port(name=1,port=r1.ports[1])
Total.add_port(name=2,port=r2.ports[2])
Total.rotate(angle = port1.orientation, center = p1)
Total.move(origin = Total.ports['t1'], destination = port1)
return Total
def route_manhattan_auto(
ports,
bendType='circular',
layer=0,
radius=20
):
""" routes a one-dimensional array of ports using manhattan algorithm
and give it a series of ports to route to in a continuous list.
accepts same parameters as ordinary route_manhattan to determine bending """
Total=Device()
for x in range(int(np.floor(len(ports)/2))+1):
R = route_manhattan(port1=ports[x],port2=ports[x+1],bendType=bendType,layer=layer,radius=radius)
r = Total.add_ref(R)
return Total
def route_turn_manhattan(
port1,
port2,
layer=0,
radius=20
):
"""
Mahattan routing between two ports. If directions are not cardinal, adds a
turn to make cardinal and then routes.
Parameters
----------
port1, port2: Port objects
Ports to route to and from
layer: int (default: 0)
Layer to use for the routes
radius: float (default: 20)
Curve radius for bends
Returns
----------
Device object
Notes
----------
If direction is not cardinal, will route to nearest cardinal, then call
route_manhattan.
"""
D = Device()
new_ports = []
for port in (port1, port2):
if port.orientation % 90 == 0:
new_ports.append(port)
else:
turn_angle = _get_turn_angle(port.orientation, _to_cardinal(port.orientation))
turn_route = turn(port, radius=radius, angle=turn_angle, layer=layer)
D.add_ref(turn_route)
new_ports.append(turn_route.ports[2])
#Manhattan on new ports
route = route_manhattan(new_ports[0], new_ports[1], bendType='circular', layer=layer,
radius=radius)
D.add_ref(route)
return D
def _to_cardinal(angle):
"""
Determines which cardinal direction is closest to input angle
Parameters
----------
angle : float
Returns
-------
angle : [-180, -90, 0, 90]
Which cardinal direction is closest to the input angle
"""
angle = _map_to_pm180(angle)
cardinals = np.array([-180, -90, 0, 90])
arg = np.argmin(np.abs(angle - cardinals))
return cardinals[arg]
def _map_to_pm180(angle):
"""converts an angle to an angle between -180 (inclusive) to +180 (exclusive)"""
return np.mod(angle + 180,360)-180
def _get_turn_angle(start_angle, target_angle):
"""
Difference in angle in the range -180 to +180 (where negative is counter clockwise)
Parameters
----------
start_angle, target_angle : float
Returns
-------
float
difference in angle.
"""
return _map_to_pm180(target_angle - start_angle)
#==============================================================================
# Test code for route_manhattan
#==============================================================================
# D=Device()
# A=pg.compass()
# A.add_port(name=1,port=A.ports['N'])
# A.add_port(name=2,port=A.ports['E'])
# A.add_port(name=3,port=A.ports['S'])
# A.add_port(name=4,port=A.ports['W'])
# points=[]
# points.append((300,300))
# points.append((-300,300))
# points.append((-300,-300))
# points.append((300,-300))
# xoff=0
# yoff=0
# for x in range(4):
# for y in range(4):
# for z in range(4):
# a = D.add_ref(A)
# b = D.add_ref(A)
# a.center=(xoff,yoff)
# b.center=(xoff+(points[y])[0],yoff+(points[y])[1])
# C = route_manhattan(bendType='gradual',port1=a.ports[z+1],port2=b.ports[x+1])
# c=D.add_ref(C)
# yoff+=600
# yoff+=600
# xoff+=600
# yoff=0
# quickplot(D)
```
#### File: phidl/tests/test_path.py
```python
import pytest
import numpy as np
from phidl import Device, Group, Path, CrossSection
import phidl.geometry as pg
import phidl.path as pp
# import phidl.utilities as pu
def test_path_extrude_width1_constant():
P = pp.arc(radius = 10, angle = 90, num_pts = 720)
D = P.extrude(width = 1, layer = np.nan, simplify = None)
h = D.hash_geometry(precision = 1e-4)
assert(h == '048ae3e85ff393362d2283c0046db1f0a53b5fe7')
def test_path_extrude_layer():
P = pp.arc(radius = 10, angle = 90, num_pts = 720)
D = P.extrude(width = 1, layer = 3, simplify = None)
h = D.hash_geometry(precision = 1e-4)
assert(h == '3ec778d98ce1760c4ba067bd1bdc0baf80dcbaf7')
def test_path_extrude_simplify():
P = pp.arc(radius = 10, angle = 90, num_pts = 300)
D = P.extrude(width = 1, layer = 3, simplify = 1e-1)
h = D.hash_geometry(precision = 1e-4)
assert(h == '92a41fb5afa37cde3b06b521ff14d8445d962069')
def test_path_extrude_width2_linearly_varying():
P = pp.arc(radius = 10, angle = 90, num_pts = 720)
D = P.extrude(width = [2.5,3], layer = np.nan, simplify = None)
h = D.hash_geometry(precision = 1e-4)
assert(h == '2768e7f7ce7503a512089ddb80cd52ed0b43fd8d')
def test_path_extrude_width3_crossection():
# Create a blank CrossSection
X = CrossSection()
# Add a a few "sections" to the cross-section
X.add(width = 1, offset = 0, layer = 0, ports = ('in','out'))
X.add(width = 3, offset = 2, layer = 2)
X.add(width = 3, offset = -2, layer = 2)
P = pp.arc(radius = 10, angle = 90, num_pts = 720)
D = P.extrude(width = X, layer = np.nan, simplify = None)
h = D.hash_geometry(precision = 1e-4)
assert(h == 'e17c4ed7a7467412728dc893610006fa2d9982b0')
def test_blank_path():
P = Path()
h = P.hash_geometry(precision = 1e-4)
assert(h == 'de8a847bff8c343d69b853a215e6ee775ef2ef96')
def test_path_straight():
P = pp.straight(length = 15, num_pts = 100)
h = P.hash_geometry(precision = 1e-4)
assert(h == '3672036ccf29992546436e1cc5e62e667cde4af0')
def test_path_euler1():
P = pp.euler(radius = 3, angle = 90, p = 1.0, use_eff = False, num_pts = 720)
h = P.hash_geometry(precision = 1e-4)
assert(h == 'd6134ba90167caf551de6d2f8c7230f7f072c562')
def test_path_euler2():
P = pp.euler(radius = 3, angle = 90, p = 0.5, use_eff = False, num_pts = 720)
h = P.hash_geometry(precision = 1e-4)
assert(h == '78b325576400e3f4f9a04c27243432102e84cfe7')
def test_path_euler3():
P = pp.euler(radius = 3, angle = 90, p = 0.5, use_eff = True, num_pts = 720)
h = P.hash_geometry(precision = 1e-4)
assert(h == '0c5fc2a47b910522b4bd37176dbb096f99ea1083')
def test_path_arc():
P = pp.arc(radius = 10, angle = 90, num_pts = 720)
h = P.hash_geometry(precision = 1e-4)
assert(h == '53eaa037b8872155a1d1cb81e2daa02a0da2eb91')
def test_path_spiral():
P = pp.spiral(num_turns = 5, gap = 1, inner_gap = 2, num_pts = 10000)
h = P.hash_geometry(precision = 1e-4)
assert(h == '46c5da60268d4fb308b81e0a6de416af332b91fc')
def test_path_smooth1():
P = pp.spiral(num_turns = 5, gap = 1, inner_gap = 2, num_pts = 10000)
h = P.hash_geometry(precision = 1e-4)
assert(h == '46c5da60268d4fb308b81e0a6de416af332b91fc')
def test_path_smooth1():
P = pp.smooth(
points = [(20,0), (40,0), (80,40), (80,10), (100,10),],
radius = 2,
corner_fun = pp.euler,
use_eff = True,
)
h = P.hash_geometry(precision = 1e-4)
assert(h == '3a2526edee6cf4577cf7fb700ee18a28048e4967')
def test_path_smooth2():
P = pp.smooth(
points = [(20,0), (40,0), (80,40), (80,10), (100,10),],
radius = 4,
corner_fun = pp.arc,
)
h = P.hash_geometry(precision = 1e-4)
assert(h == 'f075520ab38933b35f3c8af110627de29e34b695')
def test_path_rotate1():
P = Path()
P.rotate(47)
h = P.hash_geometry(precision = 1e-4)
assert(h == 'f30d4e52e113a2954e7facee8144ac8bf4ea66c9')
def test_path_rotate2():
P = pp.arc(radius = 10, angle = 90, num_pts = 720)
P.rotate(47)
h = P.hash_geometry(precision = 1e-4)
assert(h == 'd3bc47a30787fb6d9514fdd91f45d619c9afff02')
def test_path_move1():
P = Path()
P.move((16,17.7))
h = P.hash_geometry(precision = 1e-4)
assert(h == '6be1a6e602c7ce3d5560176aa7c9ebbf1b6788d5')
def test_path_move2():
P = pp.arc(radius = 10, angle = 90, num_pts = 720)
P.move((16,17.7))
h = P.hash_geometry(precision = 1e-4)
assert(h == '200e897720aa4d4a7f54afeaa2ca1faaa0e6b3de')
def test_path_bbox():
P = pp.arc(radius = 10, angle = 90, num_pts = 720)
assert(np.allclose(P.bbox, np.array([[6.123234e-16, 0.000000e+00],
[1.000000e+01, 1.000000e+01]])))
def test_path_append1():
P1 = pp.arc(radius = 12, angle = 47, num_pts = 720)
P2 = pp.arc(radius = 12, angle = 63, num_pts = 720)
P = Path()
P.append(P1)
P.append(P2)
h = P.hash_geometry(precision = 1e-4)
assert(h == '57a2cec018e695db668437a9b3dcdd8c595e300f')
def test_path_append2():
P1 = pp.arc(radius = 12, angle = 47, num_pts = 720)
P2 = pp.arc(radius = 12, angle = 63, num_pts = 720)
P = Path([P1,P2])
h = P.hash_geometry(precision = 1e-4)
assert(h == '57a2cec018e695db668437a9b3dcdd8c595e300f')
def test_path_append3():
P1 = pp.arc(radius = 12, angle = 47, num_pts = 720)
P2 = pp.arc(radius = 12, angle = -63, num_pts = 720)
P = Path([[P1,P2],[P2,P1,P2]])
h = P.hash_geometry(precision = 1e-4)
assert(h == '85646fba990cd2e72334df5d1e31678f090f6ce2')
```
#### File: phidl/tests/test_routing.py
```python
import pytest
import numpy as np
from phidl import Device, Group, Path
import phidl.geometry as pg
import phidl.routing as pr
import phidl.path as pp
# import phidl.utilities as pu
def test_route_quad():
D = Device()
port1 = D.add_port(name = 1, midpoint = (30, 30), width = 20, orientation = 270)
port2 = D.add_port(name = 2, midpoint = (0, 0), width = 20, orientation = 60)
R = pr.route_quad(port1, port2, width1 = None, width2 = None) # width = None means use Port width
h = R.hash_geometry(precision = 1e-4)
assert(h == 'c42b130a3822aaefd368464d9e6558c750928b52')
def test_route_smooth1():
D = Device()
port1 = D.add_port(name='smooth1', midpoint=(40, 0), width=5, orientation=180)
port2 = D.add_port(name='smooth2', midpoint=(0, -40), width=5, orientation=270)
R = pr.route_smooth(
port1,
port2,
radius=5,
width=None,
path_type='manhattan',
manual_path=None,
smooth_options={'corner_fun': pp.euler, 'use_eff': True},
layer=np.nan,
)
h = R.hash_geometry(precision = 1e-4)
assert(h == 'f7865e2b7f865a214b7847fdaa704d729842fde6')
def test_route_smooth2():
D = Device()
port1 = D.add_port(name='smooth1', midpoint=(40, 0), width=5, orientation=180)
port2 = D.add_port(name='smooth2', midpoint=(0, -40), width=5, orientation=270)
R = pr.route_smooth(
port1,
port2,
radius=1.7,
width=2.5,
path_type='manhattan',
manual_path=None,
smooth_options={'corner_fun': pp.arc},
layer=3,
)
h = R.hash_geometry(precision = 1e-4)
assert(h == 'e47d0573b1c1adba357d7ea8f523f84425e474e4')
def test_route_straight():
#straight path
D = Device()
port1 = D.add_port(name='S1', midpoint=(-50, 0), width=4, orientation=90)
port2 = D.add_port(name='S2', midpoint=(-50, 50), width=4, orientation=270)
D = pr.route_smooth(port1, port2, path_type='straight')
h = D.hash_geometry(precision = 1e-4)
assert(h == '051c27aed3cbf01700aadd83cf2dc7304d0236cf')
def test_route_L():
D = Device() #L path
port1 = D.add_port(name='L1', midpoint=(30,0), width=4, orientation=180)
port2 = D.add_port(name='L2', midpoint=(0, 50), width=4, orientation=270)
D = pr.route_smooth(port1, port2, path_type='L')
h = D.hash_geometry(precision = 1e-4)
assert(h == 'f3b8ee6096184a6cae2f6dab7cda038c7f80ba4b')
def test_route_U1():
D = Device()
#U path
port1 = D.add_port(name='U1', midpoint=(50, 50), width=2, orientation=270)
port2 = D.add_port(name='U2', midpoint=(80,50), width=4, orientation=270)
D = pr.route_smooth(port1, port2, radius=10, path_type='U', length1=50)
h = D.hash_geometry(precision = 1e-4)
assert(h == 'abfd2a4c8d3d1d388675dc613efacf369bbd7e4a')
def test_route_U2():
D = Device()
#U path
port1 = D.add_port(name='U3', midpoint=(50, 80), width=4, orientation=10)
port2 = D.add_port(name='U4', midpoint=(80, 130), width=4, orientation=190)
D = pr.route_smooth(port1, port2, path_type='U', length1=20)
h = D.hash_geometry(precision = 1e-4)
assert(h == '21f43cfcbede169789b4808771286e368bb363ca')
def test_route_J1():
D = Device()
#J path
port1 = D.add_port(name='J1', midpoint=(100, 25), width=4, orientation=270)
port2 = D.add_port(name='J2', midpoint=(130, 50), width=4, orientation=180)
D = pr.route_smooth(port1, port2, path_type='J', length1=25, length2=10)
h = D.hash_geometry(precision = 1e-4)
assert(h == '1b17edad12788318cc113a8e1e76893ae301e6ca')
def test_route_J2():
D = Device()
port1 = D.add_port(name='J3', midpoint=(115, 105), width=5, orientation=270)
port2 = D.add_port(name='J4', midpoint=(131, 130), width=5, orientation=180)
D = pr.route_smooth(port1, port2, path_type='J', length1=25, length2=30)
h = D.hash_geometry(precision = 1e-4)
assert(h == '6872cc085a7f8829a8962cfe6b3ed53e253ac075')
def test_route_C1():
D = Device()
#C path
port1 = D.add_port(name='C1', midpoint=(180, 35), width=4, orientation=90)
port2 = D.add_port(name='C2', midpoint=(178, 15), width=4, orientation=270)
D = pr.route_smooth(port1, port2, path_type='C', length1=15, left1=30, length2=15)
h = D.hash_geometry(precision = 1e-4)
assert(h == 'd9f1f76304ce01a775fe92b3ef662f26373fc2a6')
def test_route_C2():
D = Device()
port1 = D.add_port(name='C3', midpoint=(150, 105), width=4, orientation=90)
port2 = D.add_port(name='C4', midpoint=(180, 105), width=4, orientation=270)
D = pr.route_smooth(port1, port2, path_type='C', length1=25, left1=-15, length2=25)
h = D.hash_geometry(precision = 1e-4)
assert(h == 'b894692aa396ccda7f28fdcb3727d0e814e97935')
def test_route_C3():
D = Device()
port1 = D.add_port(name='C5', midpoint=(150, 170), width=4, orientation=0)
port2 = D.add_port(name='C6', midpoint=(175, 170), width=4, orientation=0)
D = pr.route_smooth(port1, port2, path_type='C', length1=10, left1=10, length2=10, radius=4)
h = D.hash_geometry(precision = 1e-4)
assert(h == '098cfe5cac505408cd4c78a1a239595d801904f2')
def test_route_V():
D = Device()
port1 = D.add_port(name='V1', midpoint=(200,50), width=5, orientation=284)
port2 = D.add_port(name='V2', midpoint=(230, 50), width=5, orientation=270-14)
D = pr.route_smooth(port1, port2, path_type='V')
h = D.hash_geometry(precision = 1e-4)
assert(h == 'b8a5cb77ad0e35e367530168f3aa9c72b3538117')
def test_route_Z():
D = Device()
port1 = D.add_port(name='Z1', midpoint=(280,0), width=4, orientation=190)
port2 = D.add_port(name='Z2', midpoint=(250, 50), width=3, orientation=-10)
D = pr.route_smooth(port1, port2, path_type='Z', length1=30, length2=40)
h = D.hash_geometry(precision = 1e-4)
assert(h == '1332b33414a4362b9e83d11649b8ecaffe8604b0')
``` |
{
"source": "JoMaAlves/Graph-Craftsman",
"score": 4
} |
#### File: src/components/vertex.py
```python
from components.prints import *
class vertex:
def __init__(self, value, paths):
self.value = value
self.edges = []
self.prevEdges = []
self.nextEdges = []
self.paths = paths
self.paths[value] = [0,[value]]
# Adds an edge to the edges list if the graph is not directed
def addEdge(self, edge):
self.edges.append(edge)
# Adds an edge to the nextEdges list if it is a directed graph
def addNext(self, next):
self.nextEdges.append(next)
# Adds an edge to the prevEdges list if it is a directed graph
def addPrevious(self, prev):
self.prevEdges.append(prev)
# Add new paths
def addNewPaths(self, paths):
for i in paths:
self.paths[i] = [0,[]]
# Checks if there is a connection between the vertex already
def checkEdges(self, vertex, direc, weight):
if(direc):
for i in self.nextEdges:
if(weight):
if(i[0] == vertex):
return True
else:
if(i == vertex):
return True
for i in self.prevEdges:
if(weight):
if(i[0] == vertex):
return True
else:
if(i == vertex):
return True
else:
for i in self.edges:
if(weight):
if(i[0] == vertex):
return True
else:
if(i == vertex):
return True
return False
# Gets the list of Adjacent Edges and place it into a string
def listAdjacents(self, direc, weight):
if(direc):
next = ""
prev = ""
if(weight):
for i in self.nextEdges:
next += str(i[0].value) + " "
for i in self.prevEdges:
prev+=str(i[0].value) + " "
else:
for i in self.nextEdges:
next += str(i.value) + " "
for i in self.prevEdges:
prev+=str(i.value) + " "
printAdjList(direc, next.strip(), prev.strip())
else:
all = ""
if(weight):
for i in self.edges:
all +=str(i[0].value) + " "
else:
for i in self.edges:
all +=str(i.value) + " "
printAdjList(direc, all.strip())
# Check if two vertex are adjacent
def adjacencyCheck(self, node, direc, weight):
check = False
if(direc):
if(weight):
for i in self.nextEdges:
if(node == i[0].value):
check = True
for i in self.prevEdges:
if(node == i[0].value):
check = True
else:
for i in self.nextEdges:
if(node == i.value):
check = True
for i in self.prevEdges:
if(node == i.value):
check = True
else:
if(weight):
for i in self.edges:
if(node == i[0].value):
check = True
else:
for i in self.edges:
if(node == i.value):
check = True
return check
# Gets the degree of the vertex
def getDegreeEdges(self,direc):
if(direc):
return len(self.nextEdges), len(self.prevEdges)
else:
return len(self.edges), None
def getValuesList(self,direc, valor):
lista_edges = []
if(direc):
for i in self.nextEdges:
if(valor):
lista_edges.append(i[0].value)
else:
lista_edges.append(i.value)
else:
for i in self.edges:
if(valor):
lista_edges.append(i[0].value)
else:
lista_edges.append(i.value)
return lista_edges, len(lista_edges)
def getMatrixAdj(self, direc, valor,list_elements):
lista_aux_edges = []
lista_final_edges = []
if(direc):
for i in self.nextEdges:
if(valor):
lista_aux_edges.append(i[0])
else:
lista_aux_edges.append(i)
else:
for i in self.edges:
if(valor):
lista_aux_edges.append(i[0])
else:
lista_aux_edges.append(i)
for i in list_elements:
if i in lista_aux_edges:
lista_final_edges.append(1)
else:
lista_final_edges.append(0)
return lista_final_edges
``` |
{
"source": "jomalsan/cloud-custodian",
"score": 2
} |
#### File: c7n_azure/tests_azure/test_filters_parent.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
from .azure_common import BaseTest
from c7n_azure.filters import ParentFilter
from c7n_azure.resources.key_vault import KeyVault
from c7n_azure.resources.key_vault_keys import KeyVaultKeys
from c7n.config import Config, Bag
from c7n.ctx import ExecutionContext
from c7n.filters.core import ValueFilter
class ParentFilterTest(BaseTest):
def test_schema(self):
self.assertTrue(self.load_policy({
'name': 'test-policy',
'resource': 'azure.keyvault-keys',
'filters': [
{'type': 'parent',
'filter': {
'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cctestkv*'
}}]
}, validate=True))
self.assertTrue(self.load_policy({
'name': 'test-policy',
'resource': 'azure.cosmosdb-collection',
'filters': [
{'type': 'parent',
'filter': {
'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cctestkv*'
}}]
}, validate=True))
def test_verify_parent_filter(self):
manager = KeyVaultKeys(
ExecutionContext(
None,
Bag(name="xyz", provider_name='azure'),
Config.empty()
),
{
'name': 'test-policy',
'resource': 'azure.keyvault-keys',
'filters': [
{'type': 'parent',
'filter': {
'type': 'value',
'key': 'name',
'op': 'glob',
'value': 'cctestkv*'
}}]}
)
self.assertEqual(len(manager.filters), 1)
filter = manager.filters[0]
self.assertTrue(isinstance(filter, ParentFilter))
self.assertTrue(isinstance(filter.parent_manager, KeyVault))
self.assertTrue(isinstance(filter.parent_filter, ValueFilter))
``` |
{
"source": "JomaMinoza/COVID19-mesa",
"score": 3
} |
#### File: JomaMinoza/COVID19-mesa/batchrunner_local.py
```python
import copy
from itertools import product, count
import pandas as pd
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
import os
import random
class ParameterError(TypeError):
MESSAGE = (
"parameters must map a name to a value. "
"These names did not match paramerets: {}"
)
def __init__(self, bad_names):
self.bad_names = bad_names
def __str__(self):
return self.MESSAGE.format(self.bad_names)
class VariableParameterError(ParameterError):
MESSAGE = (
"variable_parameters must map a name to a sequence of values. "
"These parameters were given with non-sequence values: {}"
)
def __init__(self, bad_names):
super().__init__(bad_names)
class FixedBatchRunner:
""" This class is instantiated with a model class, and model parameters
associated with one or more values. It is also instantiated with model and
agent-level reporters, dictionaries mapping a variable name to a function
which collects some data from the model or its agents at the end of the run
and stores it.
Note that by default, the reporters only collect data at the *end* of the
run. To get step by step data, simply have a reporter store the model's
entire DataCollector object.
"""
def __init__(
self,
model_cls,
parameters_list=None,
fixed_parameters=None,
iterations=1,
max_steps=1000,
model_reporters=None,
agent_reporters=None,
display_progress=True,
):
""" Create a new BatchRunner for a given model with the given
parameters.
Args:
model_cls: The class of model to batch-run.
parameters_list: A list of dictionaries of parameter sets.
The model will be run with dictionary of paramters.
For example, given parameters_list of
[{"homophily": 3, "density": 0.8, "minority_pc": 0.2},
{"homophily": 2, "density": 0.9, "minority_pc": 0.1},
{"homophily": 4, "density": 0.6, "minority_pc": 0.5}]
3 models will be run, one for each provided set of parameters.
fixed_parameters: Dictionary of parameters that stay same through
all batch runs. For example, given fixed_parameters of
{"constant_parameter": 3},
every instantiated model will be passed constant_parameter=3
as a kwarg.
iterations: The total number of times to run the model for each set
of parameters.
max_steps: Upper limit of steps above which each run will be halted
if it hasn't halted on its own.
model_reporters: The dictionary of variables to collect on each run
at the end, with variable names mapped to a function to collect
them. For example:
{"agent_count": lambda m: m.schedule.get_agent_count()}
agent_reporters: Like model_reporters, but each variable is now
collected at the level of each agent present in the model at
the end of the run.
display_progress: Display progresss bar with time estimation?
"""
self.model_cls = model_cls
if parameters_list is None:
parameters_list = []
self.parameters_list = list(parameters_list)
self.fixed_parameters = fixed_parameters or {}
self._include_fixed = len(self.fixed_parameters.keys()) > 0
self.iterations = iterations
self.max_steps = max_steps
self.model_reporters = model_reporters
self.agent_reporters = agent_reporters
if self.model_reporters:
self.model_vars = {}
if self.agent_reporters:
self.agent_vars = {}
self.display_progress = display_progress
def _make_model_args(self):
"""Prepare all combinations of parameter values for `run_all`
Returns:
Tuple with the form:
(total_iterations, all_kwargs, all_param_values)
"""
total_iterations = self.iterations
all_kwargs = []
count = len(self.parameters_list)
if count:
for params in self.parameters_list:
kwargs = params.copy()
kwargs.update(self.fixed_parameters)
#run each iterations specific number of times
for iter in range(self.iterations):
kwargs_repeated = kwargs.copy()
all_kwargs.append([self.model_cls, kwargs_repeated, self.max_steps, iter])
elif len(self.fixed_parameters):
count = 1
kwargs = self.fixed_parameters.copy()
all_kwargs.append(kwargs)
total_iterations *= count
return all_kwargs, total_iterations
#return (total_iterations, all_kwargs, all_param_values)
def run_all(self):
""" Run the model at all parameter combinations and store results. """
run_count = count()
total_iterations, all_kwargs, all_param_values = self._make_model_args()
with tqdm(total_iterations, disable=not self.display_progress) as pbar:
for i, kwargs in enumerate(all_kwargs):
param_values = all_param_values[i]
for _ in range(self.iterations):
self.run_iteration(kwargs, param_values, next(run_count))
pbar.update()
@staticmethod
def run_wrapper(iter_args):
model_i = iter_args[0]
kwargs = iter_args[1]
max_steps = iter_args[2]
iteration = iter_args[3]
def run_iteration(model_i, kwargs, max_steps, iteration):
#instantiate version of model with correct parameters
model = model_i(**kwargs)
while model.running and model.schedule.steps < max_steps:
model.step()
#add iteration number to dictionary to make unique_key
kwargs["iteration"] = iteration
if model.datacollector:
return kwargs, model.datacollector.get_model_vars_dataframe()
else:
return kwargs, "no datacollector in model"
return run_iteration(model_i, kwargs, max_steps, iteration)
def run_model(self, model):
""" Run a model object to completion, or until reaching max steps.
If your model runs in a non-standard way, this is the method to modify
in your subclass.
"""
while model.running and model.schedule.steps < self.max_steps:
model.step()
count+=1
def collect_model_vars(self, model):
""" Run reporters and collect model-level variables. """
model_vars = {}
for var, reporter in self.model_reporters.items():
model_vars[var] = reporter(model)
return model_vars
def collect_agent_vars(self, model):
""" Run reporters and collect agent-level variables. """
agent_vars = {}
for agent in model.schedule._agents.values():
agent_record = {}
for var, reporter in self.agent_reporters.items():
agent_record[var] = getattr(agent, reporter)
agent_vars[agent.unique_id] = agent_record
return agent_vars
'''
def get_model_vars_dataframe(self):
""" Generate a pandas DataFrame from the model-level variables
collected.
"""
return self._prepare_report_table(self.model_vars)
def get_agent_vars_dataframe(self):
""" Generate a pandas DataFrame from the agent-level variables
collected.
"""
return self._prepare_report_table(self.agent_vars, extra_cols=["AgentId"])
def _prepare_report_table(self, vars_dict, extra_cols=None):
"""
Creates a dataframe from collected records and sorts it using 'Run'
column as a key.
"""
extra_cols = ["Run"] + (extra_cols or [])
index_cols = set()
for params in self.parameters_list:
index_cols |= params.keys()
index_cols = list(index_cols) + extra_cols
records = []
for param_key, values in vars_dict.items():
record = dict(zip(index_cols, param_key))
record.update(values)
records.append(record)
df = pd.DataFrame(records)
rest_cols = set(df.columns) - set(index_cols)
ordered = df[index_cols + list(sorted(rest_cols))]
ordered.sort_values(by="Run", inplace=True)
if self._include_fixed:
for param in self.fixed_parameters.keys():
val = self.fixed_parameters[param]
# avoid error when val is an iterable
vallist = [val for i in range(ordered.shape[0])]
ordered[param] = vallist
return ordered
'''
# This is kind of a useless class, but it does carry the 'source' parameters with it
class ParameterProduct:
def __init__(self, variable_parameters):
if variable_parameters != None:
self.param_names, self.param_lists = zip(
*(copy.deepcopy(variable_parameters)).items()
)
self._product = product(*self.param_lists)
else:
self.param_names = None
self.param_lists = None
def __iter__(self):
return self
def __next__(self):
if self.param_names != None:
return dict(zip(self.param_names, next(self._product)))
else:
return []
# Roughly inspired by sklearn.model_selection.ParameterSampler. Does not handle
# distributions, only lists.
class ParameterSampler:
def __init__(self, parameter_lists, n, random_state=None):
self.param_names, self.param_lists = zip(
*(copy.deepcopy(parameter_lists)).items()
)
self.n = n
if random_state is None:
self.random_state = random.Random()
elif isinstance(random_state, int):
self.random_state = random.Random(random_state)
else:
self.random_state = random_state
self.count = 0
def __iter__(self):
return self
def __next__(self):
self.count += 1
if self.count <= self.n:
return dict(
zip(
self.param_names,
[self.random_state.choice(l) for l in self.param_lists],
)
)
raise StopIteration()
class BatchRunner(FixedBatchRunner):
""" This class is instantiated with a model class, and model parameters
associated with one or more values. It is also instantiated with model and
agent-level reporters, dictionaries mapping a variable name to a function
which collects some data from the model or its agents at the end of the run
and stores it.
Note that by default, the reporters only collect data at the *end* of the
run. To get step by step data, simply have a reporter store the model's
entire DataCollector object.
"""
def __init__(
self,
model_cls,
variable_parameters=None,
fixed_parameters=None,
iterations=1,
max_steps=1000,
model_reporters=None,
agent_reporters=None,
display_progress=True,
):
""" Create a new BatchRunner for a given model with the given
parameters.
Args:
model_cls: The class of model to batch-run.
variable_parameters: Dictionary of parameters to lists of values.
The model will be run with every combo of these paramters.
For example, given variable_parameters of
{"param_1": range(5),
"param_2": [1, 5, 10]}
models will be run with {param_1=1, param_2=1},
{param_1=2, param_2=1}, ..., {param_1=4, param_2=10}.
fixed_parameters: Dictionary of parameters that stay same through
all batch runs. For example, given fixed_parameters of
{"constant_parameter": 3},
every instantiated model will be passed constant_parameter=3
as a kwarg.
iterations: The total number of times to run the model for each
combination of parameters.
max_steps: Upper limit of steps above which each run will be halted
if it hasn't halted on its own.
model_reporters: The dictionary of variables to collect on each run
at the end, with variable names mapped to a function to collect
them. For example:
{"agent_count": lambda m: m.schedule.get_agent_count()}
agent_reporters: Like model_reporters, but each variable is now
collected at the level of each agent present in the model at
the end of the run.
display_progress: Display progresss bar with time estimation?
"""
super().__init__(
model_cls,
ParameterProduct(variable_parameters),
fixed_parameters,
iterations,
max_steps,
model_reporters,
agent_reporters,
display_progress,
)
class MPSupport(Exception):
def __str__(self):
return (
"BatchRunnerMP depends on pathos, which is either not "
"installed, or the path can not be found. "
)
class BatchRunnerMP(BatchRunner):
""" Child class of BatchRunner, extended with multiprocessing support. """
def __init__(self, model_cls, nr_processes=None, **kwargs):
""" Create a new BatchRunnerMP for a given model with the given
parameters.
Args:
model_cls: The class of model to batch-run.
nr_processes: the number of separate processes the BatchRunner
should start, all running in parallel.
kwargs: the kwargs required for the parent BatchRunner class
"""
if nr_processes == None:
#identifies the number of processors available on users machine
available_processors = cpu_count()
self.processes = available_processors
print ("Your system has {} available processors.".format(self.processes))
else:
self.processes = nr_processes
super().__init__(model_cls, **kwargs)
self.pool = Pool(self.processes)
def run_all(self):
"""
Run the model at all parameter combinations and store results,
overrides run_all from BatchRunner.
"""
run_count = count()
run_iter_args, total_iterations = self._make_model_args()
# register the process pool and init a queue
#results = []
results = {}
#with tqdm(total_iterations, disable=not self.display_progress) as pbar:
#for i, kwargs in enumerate(all_kwargs):
# param_values = all_param_values[i]
# for _ in range(self.iterations):
# make a new process and add it to the queue
#with self.pool as p:
if self.processes > 1:
for params, model_data in self.pool.imap_unordered(self.run_wrapper, run_iter_args):
results[str(params)] = model_data
#For debugging model due to difficulty of getting errors during multiprocessing
else:
for run in run_iter_args:
params, model_data = self.run_wrapper(run)
#params, model_data = self.run_wrapper(run)
#no need for a dictionary since one set of results
results[str(params)] = model_data
return results
# empty the queue
'''
results = []
for task in job_queue:
for model_vars, agent_vars in list(task):
results.append((model_vars, agent_vars))
pbar.update()
# store the results
for model_vars, agent_vars in results:
if self.model_reporters:
for model_key, model_val in model_vars.items():
self.model_vars[model_key] = model_val
if self.agent_reporters:
for agent_key, reports in agent_vars.items():
self.agent_vars[agent_key] = reports
with tqdm(total_iterations, disable=not self.display_progress) as pbar:
for i, kwargs in enumerate(all_kwargs):
param_values = all_param_values[i]
for _ in range(self.iterations):
# make a list of parameters for each model run
job_queue.append((kwargs, param_values, next(run_count)))
#start dictionary to store results
#results[next(run_count)] =[param_values]
# empty the queue
results = []
print (len(job_queue))
with self.pool as p:
results.append(p.imap_unordered(self.run_iteration, job_queue))
pbar.update()
'''
``` |
{
"source": "jomar83/airflow",
"score": 2
} |
#### File: cloud/hooks/test_oss.py
```python
import os
import unittest
import oss2
from airflow.exceptions import AirflowException
from airflow.providers.alibaba.cloud.hooks.oss import OSSHook
from tests.providers.alibaba.cloud.utils.test_utils import skip_test_if_no_valid_conn_id
TEST_CONN_ID = os.environ.get('TEST_OSS_CONN_ID', 'oss_default')
TEST_REGION = os.environ.get('TEST_OSS_REGION', 'cn-hangzhou')
TEST_BUCKET = os.environ.get('TEST_OSS_BUCKET', 'test-bucket')
class TestOSSHook(unittest.TestCase):
def setUp(self):
try:
self.hook = OSSHook(region=TEST_REGION, oss_conn_id=TEST_CONN_ID)
self.hook.object_exists(key='test-obj', bucket_name=TEST_BUCKET)
except AirflowException:
self.hook = None
except oss2.exceptions.ServerError as e:
if e.status == 403:
self.hook = None
@skip_test_if_no_valid_conn_id
def test_init(self):
assert self.hook.oss_conn_id == TEST_CONN_ID
@skip_test_if_no_valid_conn_id
def test_get_conn(self):
assert self.hook.get_conn() is not None
@skip_test_if_no_valid_conn_id
def test_parse_oss_url(self):
parsed = self.hook.parse_oss_url(f"oss://{TEST_BUCKET}/this/is/not/a-real-key.txt")
print(parsed)
assert parsed == (TEST_BUCKET, "this/is/not/a-real-key.txt"), "Incorrect parsing of the oss url"
@skip_test_if_no_valid_conn_id
def test_parse_oss_object_directory(self):
parsed = self.hook.parse_oss_url(f"oss://{TEST_BUCKET}/this/is/not/a-real-oss-directory/")
assert parsed == (
TEST_BUCKET,
"this/is/not/a-real-oss-directory/",
), "Incorrect parsing of the oss url"
@skip_test_if_no_valid_conn_id
def test_get_bucket(self):
assert self.hook.get_bucket(TEST_BUCKET) is not None
``` |
{
"source": "JOmarCuenca/Unsplash-WebScrapper",
"score": 3
} |
#### File: JOmarCuenca/Unsplash-WebScrapper/imgScrapperUnsplash.py
```python
import requests
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.webdriver import WebDriver
# from selenium.webdriver.common.by import By
# from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
# from selenium.webdriver.support import expected_conditions as EC
import time
from fileManager import createFolder,deleteFolder
import programArgs as pargs
from progressbar import progressbar, streams
import logging
# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# Setup Progressbar wrapper function
streams.wrap_stderr()
_DEFAULT_SEPARATOR_LENGTH = 50
_WEBSITE_URL = "https://unsplash.com"
_imgURL = []
def printSeparator(title : str, length = _DEFAULT_SEPARATOR_LENGTH):
"""
Prints a separator in console to visualize better the sections of the code.
Args:
title (str): Title of the separator
length (int, optional): Length of the separator. Defaults to DEFAULT_SEPARATOR_LENGTH.
"""
logger.info(f" {title.upper()} ".center(length,'='))
def getURLs(images):
"""
Gets the URL's of the images to be downloaded from the website according to the website design.
Args:
images (List[str]): Array of HTML Attr with the target http link of the imgs.
"""
printSeparator("Getting imgs URL's current screen")
for img in images:
srcset = img.get_attribute("srcset")
if(srcset == None):
continue
sources = srcset.split(" ")
for src in sources:
if("http" in src and "fit=crop&w=700" in src):
_imgURL.append(src)
def cleanDuplicates():
"""
Cleans possible duplicate urls.
"""
global _imgURL
_imgURL = list(dict.fromkeys(_imgURL))
def writeImgs(urls : list, trainPath : str, testPath : str = None, proportion : float = None):
"""
Saves the PNG files in the targeted dirs from the collected URLS.
Args:
urls (list): Targeted URL's of the img files
trainPath (str): Training dir path
testPath (str): Testing dir path
"""
targetFolder = trainPath
switch = False
printSeparator("Saving Imgs")
for x in progressbar(range(len(urls))):
if(not switch and testPath != None and proportion != None and x > len(urls) * proportion):
switch = True
targetFolder = testPath
webImg = requests.get(urls[x])
f = open(f"./{targetFolder}img_{x + 1}.png","wb")
f.write(webImg.content)
f.close()
def mainActivity(driver, currentHeight, targetNumImgs):
global _imgURL
# Get scroll height
last_height = currentHeight
new_height = -1
iters = 0
toBottom = False
while len(_imgURL) < targetNumImgs and iters < 20:
# Wait to load page
time.sleep(1)
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight*31/33);" if toBottom else "window.scrollTo(0, document.body.scrollHeight*9/11);")
# Wait to load page
time.sleep(2)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
images = driver.find_elements_by_class_name("oCCRx")
getURLs(images)
cleanDuplicates()
iters += 1
if new_height == last_height:
logger.warning("Height didn't update")
if(toBottom):
break
else:
toBottom = True
last_height = new_height
if __name__ == "__main__":
args = pargs.getArgs()
openBrowser = True
path = args.path
driver : WebDriver = None
if(path is None):
try:
with open("pathFile.txt","r") as pathFile:
path = pathFile.readline().rstrip()
pathFile.close()
except Exception:
openBrowser = False
driver = Chrome(path)
driver.get(_WEBSITE_URL)
searchBar = driver.find_element_by_name("searchKeyword")
try:
if not openBrowser:
raise Exception("No path to the selenium web driver")
searchBar.send_keys(args.keyword)
searchBar.send_keys(Keys.ENTER)
except IndexError:
openBrowser = False
driver.close()
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
if(openBrowser):
mainFolder = f"data/{args.keyword}/"
trainFolder = None
testFolder = None
if(args.divide != None):
trainFolder = f"{mainFolder}train/"
testFolder = f"{mainFolder}test/"
## Delete existing folders, if any
deleteFolder(trainFolder)
deleteFolder(testFolder)
## Create Folders
createFolder(trainFolder)
createFolder(testFolder)
else:
# Delete folder if exists
deleteFolder(mainFolder)
# Create Folder
createFolder(mainFolder)
mainActivity(driver, last_height, args.targetNum)
driver.close()
if(args.divide != None):
writeImgs(_imgURL,trainFolder,testFolder, proportion=args.divide)
else:
writeImgs(_imgURL,mainFolder)
``` |
{
"source": "jomarin38/piCAMTracker",
"score": 2
} |
#### File: jomarin38/piCAMTracker/runtracker.py
```python
import picamera
import picamera.array
import numpy as np
import datetime as dt
import os
import io
import re
from time import sleep,time
import sys
import subprocess
from argparse import ArgumentParser
import picamtracker
import prctl
max_temp = 75.0
temp = 20.0
#-- start an arbitrary shell (Python-2.7 subprocess.Popen)
def shell(cmd, *argv):
err = "Error for command: %s" % (cmd)
command = [cmd]
for item in argv:
command.append(item)
try:
p = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out,err = p.communicate()
if(len(err)):
print >> sys.stderr, err
except:
print >> sys.stderr, err
out = ''
return out.strip()
def get_raspi_revision():
rev_file = '/sys/firmware/devicetree/base/model'
info = { 'pi': '', 'model': '', 'rev': ''}
try:
fd = os.open(rev_file, os.O_RDONLY)
line = os.read(fd,256)
os.close(fd)
m = re.match('Raspberry Pi (\d+) Model (\w(?: Plus)?) Rev ([\d\.]+)', line)
if m:
info['pi'] = m.group(1)
info['model'] = m.group(2)
info['rev'] = m.group(3)
except:
pass
return info
def get_temp():
temp_file='/run/picamtracker/temp'
temp = 20.0
try:
fd = os.open(temp_file, os.O_RDONLY)
temp_string = os.read(fd,256)
os.close(fd)
temp = float(temp_string)
except:
pass
return temp
def main(ashow=True, debug=False):
global config
preview = True
show = 1 if ashow else 0
try:
preview = config.conf['preview']
except:
raise
if debug:
config.conf['debug'] = True
print(get_raspi_revision())
#- open picamera device
with picamera.PiCamera() as camera:
#- determine camera module
revision = camera._revision.upper()
print("camera chip: %s" % revision)
if revision == 'OV5647':
# V1 module
# 1280x720 has a bug. (wrong center value)
resx = 1280
resy = 960
fps = 42
mode = 4
elif revision == 'IMX219':
# V2 module
resx = 1632
resy = 896
fps = 38
mode = 5
else:
raise ValueError('Unknown camera device')
#- check if the crossing line is in the center (this is not needed.)
if config.conf['yCross'] > 0 and config.conf['yCross'] != (resy/32):
print("WARNING: Y crossing %d expected but %d given!" % (resy/32, config.conf['yCross']))
if config.conf['xCross'] > 0 and config.conf['xCross'] != (resx/32):
print("WARNING: X crossing bar is not in the center of the screen!")
camera.resolution = (resx,resy)
if show:
preview = True
camera.framerate = 25
x_disp = config.conf['offsetX']
y_disp = config.conf['offsetY']
display = picamtracker.Display(caption='piCAMTracker',x=x_disp,y=y_disp,w=resy/2,h=resx/2)
else:
display = None
camera.sensor_mode = mode
camera.framerate = fps
print("warm-up 2 seconds...")
#serialPort = picamtracker.SerialIO.SerialCommunication(port=config.conf['serialPort'],options=config.conf['serialConf'])
greenLED = picamtracker.GPIOPort.gpioPort(config.conf['greenLEDPort'],
is_active_low=config.conf['ledActiveLow'],
duration=config.conf['signalLength'],
start_blinks=3)
redLED = picamtracker.GPIOPort.gpioPort(config.conf['redLEDPort'],
duration=config.conf['signalLength'],
is_active_low=config.conf['ledActiveLow'])
sleep(1.0)
print("...start")
picamtracker.GPIOPort.statusLED(config.conf['statusLEDPort'], on=True)
if preview:
cl = np.zeros((resy,resx,3), np.uint8)
ycross = config.conf['yCross']
if ycross > 0:
ym = 16 * ycross
cl[ym,:,:] = 0xff #horizantal line
xcross = config.conf['xCross']
if xcross > 0:
xm = 16 * xcross
cl[:,xm,:] = 0xff #vertical line
#- preview settings
px = config.conf['previewX']
py = config.conf['previewY']
camera.start_preview()
camera.preview.fullscreen = False
if show:
camera.preview.alpha = 192
else:
camera.preview.alpha = 255
rotation = config.conf['viewAngle']
camera.preview.window = (px,py,resy/2,resx/2)
camera.preview.rotation = rotation
#- overlay settings
overlay = camera.add_overlay(source=np.getbuffer(cl),
size=(resx,resy),format='rgb')
overlay.fullscreen = False
overlay.alpha = 32
overlay.layer = 3
overlay.window = (px,py,resy/2,resx/2)
overlay.rotation= rotation
#- disable auto (exposure + white balance)
#camera.shutter_speed = camera.exposure_speed
#camera.exposure_mode = 'off'
#g = camera.awb_gains
#camera.awb_mode = 'off'
#camera.awb_gains = g
vstream = picamera.PiCameraCircularIO(camera, seconds=config.conf['videoLength'])
tracker = picamtracker.Tracker(camera, greenLed=greenLED, redLed=redLED, config=config)
writer = picamtracker.Writer(camera, stream=vstream, config=config)
cmds = picamtracker.CommandInterface(config=config)
cmds.subscribe(tracker.set_maxDist, 'maxDist')
cmds.subscribe(tracker.set_trackMaturity, 'trackMaturity')
cmds.subscribe(tracker.testCrossing, 'testBeep')
cmds.subscribe(config.set_storeParams, 'storeParams')
#cmds.subscribe(greenLED.check, 'testBeep')
with picamtracker.MotionAnalyser(camera, tracker, display, show, config) as output:
loop = 0
t_wait = 0.5
old_frames = 0
camera.annotate_text_size = 24
camera.start_recording(output=vstream, format='h264', level='4.2', motion_output=output)
cmds.subscribe(output.set_vMax, 'vMax')
cmds.subscribe(output.set_vMin, 'vMin')
cmds.subscribe(output.set_maxArea, 'maxArea')
cmds.subscribe(output.set_minArea, 'minArea')
cmds.subscribe(output.set_sadThreshold, 'sadThreshold')
cmds.subscribe(output.set_debug, 'debug')
cmds.subscribe(output.set_baseB, 'baseB')
if config.conf['debugInputPort']:
picamtracker.GPIOPort.addCallback(config.conf['debugInputPort'], output.debug_button)
prctl.set_name('python')
try:
while True:
global temp
loop += 1
# check temperature every minute
if loop % 120 == 0:
temp = get_temp()
# update statistics every second
if loop & 1:
add_text = ""
sep = ""
if tracker.noise > 0.8:
add_text += " NOISY"
sep = " +"
if camera.analog_gain > 7:
add_text = add_text + sep + " DARK"
if temp > max_temp:
add_text = add_text + sep + " HOT (%4.1f)" % temp
if len(add_text):
add_text += " !"
frames = output.processed_frames
fs = (frames - old_frames) / (2 * t_wait)
old_frames = frames
camera.annotate_text = "%s (%3.1f f/s) %s" % (dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), fs, add_text)
delay,frame,motion = tracker.getStatus()
if frame != 0:
#t0 = time()
#camera.split_recording('after.h264')
#vstream.copy_to('before.h264',size=2147483648)
#vstream.copy_to('before.h264',size=1073741824)
#vstream.clear()
#camera.split_recording(vstream)
#name = "AAA-%d.jpg" % loop
#camera.capture(reader, format='rgb', use_video_port=True)
writer.takeSnapshot(delay, frame, motion)
tracker.releaseLock()
#print("capture: %4.2fms" % (1000.0 * (time() - t0)))
# check for USB stick every 60 seconds
camera.wait_recording(t_wait)
except KeyboardInterrupt:
pass
finally:
# stop camera and preview
#serialPort.terminated = True
greenLED.terminated = True
redLED.terminated = True
camera.stop_recording()
if preview:
camera.stop_preview()
camera.remove_overlay(overlay)
# stop all threads
if display is not None:
display.terminated = True
cmds.stop()
tracker.stop()
writer.stop()
# wait and join threads
sleep(0.5)
if display is not None:
display.join()
#serialPort.join()
greenLED.join()
redLED.join()
cmds.join()
tracker.join()
writer.join()
picamtracker.GPIOPort.statusLED(config.conf['statusLEDPort'], on=False)
#config.write()
if __name__ == '__main__':
parser = ArgumentParser(prog='piCAMTracker')
parser.add_argument('-s', '--show', action='store_true',
help = 'show internal graphical information (slow!)')
parser.add_argument( '-d', '--debug', action='store_true',
help = 'write debug information for later investigation')
args = parser.parse_args()
global config
config = picamtracker.Configuration('config.json')
os.system("[ ! -d /run/picamtracker ] && sudo mkdir -p /run/picamtracker && sudo chown pi:www-data /run/picamtracker && sudo chmod 775 /run/picamtracker")
os.system("/home/pi/piCAMTracker/etc/background_service.sh </dev/null&")
out = shell('/usr/bin/vcgencmd', 'measure_temp')
print("Actual core %s" % out)
main(args.show, args.debug)
``` |
{
"source": "jomasim/heroku-devops",
"score": 3
} |
#### File: v2/models/user.py
```python
from app.api.v2.database import DBConnection
from werkzeug.security import generate_password_hash
from psycopg2 import sql,extras
cur = DBConnection.get_connection().cursor(cursor_factory = extras.RealDictCursor)
class User(object):
''' create user '''
@staticmethod
def create(data):
hashed=generate_password_hash(data['password'])
query = "INSERT INTO users (name,username,email,password,role)" \
"VALUES('%s','%s', '%s', '%s', '%s')" % (
data['name'],data['username'],data['email'],hashed,data['role'])
cur.execute(query)
@staticmethod
def create_admin(data):
hashed=generate_password_hash(data['password'])
query = "INSERT INTO users (name,username,email,password,role)" \
"VALUES('%s','%s', '%s', '%s', '%s')" % (
data['name'],data['username'],data['email'],hashed,"admin")
cur.execute(query)
@staticmethod
def exists(data):
if data['email']:
query="SELECT * FROM users WHERE email = '%s';" % data['email']
cur.execute(query)
return cur.fetchone()
return False
@staticmethod
def get_by_email(email):
if email:
query="SELECT * FROM users WHERE email = '%s';" % email
cur.execute(query)
return cur.fetchone()
@staticmethod
def get_by_id(user_id):
if user_id:
query="SELECT * FROM users WHERE id = '%s';" % user_id
cur.execute(query)
return cur.fetchone()
@staticmethod
def get():
query="SELECT * FROM users"
cur.execute(query)
users=cur.fetchall()
for user in users:
user.pop("password")
return users
```
#### File: v2/views/auth_resource.py
```python
from flask import jsonify, make_response, request
from flask_restful import Resource
from app.api.v2.request import Request
from app.api.v2.models.user import User
from werkzeug.security import check_password_hash
from flask_jwt_extended import create_access_token, jwt_required
import datetime
class AuthController(Resource):
def post(self):
data = request.get_json()
request_schema = {'email': 'required|email',
'password': 'required|string|min:6|max:12'}
validator = Request(data, request_schema)
if validator.validate() == None:
email = data['email']
password = data['password']
''' verify password '''
if AuthController.__verify_password(email,password):
user=User.get_by_email(email)
exp = datetime.timedelta(minutes=45)
token=AuthController.__generate_token(user['id'],exp)
user.pop("password")
return make_response(jsonify({"message": "Login successful",
"access_token": token,
"exp":str(exp),
"user":user}), 200)
else:
return make_response(jsonify({"message": "Invalid credentials"}), 401)
else:
return make_response(jsonify(validator.validate()), 422)
@staticmethod
def __generate_token(user_id,exp):
return create_access_token(user_id, exp)
@staticmethod
def __verify_password(email,password):
if User.exists({'email':email}):
user=User.get_by_email(email)
return check_password_hash(user['password'], password)
return False
```
#### File: v2/views/product_resource.py
```python
from flask import jsonify, make_response, request
from flask_restful import Resource
from app.api.v2.request import Request
from app.api.v2.models.product import Product
from flask_jwt_extended import jwt_required, get_jwt_identity
from app.api.v2.views.admin import admin_required
''' collect all key errors '''
key_errors = None
class ProductController(Resource):
def __init__(self):
request_schema = {'name': 'required|string',
'category': 'required|string',
'description': 'required',
'price': 'required',
'quantity': 'required'
}
self.request_schema=request_schema
@jwt_required
def get(self, product_id=None):
if not product_id:
return make_response(jsonify({'products': Product.get()}), 200)
else:
''' search for product using product_id '''
if not Product.get_by_id(product_id):
return make_response(jsonify({'message': 'product not found'}), 404)
else:
return make_response(jsonify({'product': Product.get_by_id(product_id)}), 200)
@admin_required
def post(self):
data = request.get_json()
user = get_jwt_identity()
''' append user '''
data['created_by'] = user
all_errors = self.get_validation_errors(data, self.request_schema)
if all_errors == None:
product = Product.get_by_name(data['name'])
''' check if product with the same name exists '''
if product:
message = "product already exists consider updating the quantity"
return make_response(jsonify({'message': message}), 409)
else:
''' create product '''
Product.create(data)
return make_response(jsonify({'message': "product created successfully"}), 201)
else:
return make_response(jsonify(all_errors), 422)
@admin_required
def delete(self, product_id=None):
if not product_id:
return make_response(jsonify({"message": "productid is required"}), 422)
else:
if Product.get_by_id(product_id) != None:
Product.delete_by_Id(product_id)
return make_response(jsonify({"message": "product deleted successfully"}), 200)
else:
return make_response(jsonify({"message": "product not found"}), 404)
@admin_required
def put(self, product_id=None):
if not product_id:
return make_response(jsonify({"message": "productid is required"}), 422)
data = request.get_json()
user = get_jwt_identity()
if data != None and not Product.get_by_id(product_id):
return make_response(jsonify({"message": "product not found"}), 404)
''' update product '''
updated_list=self.get_updated_list(data, self.get_existing(product_id))
''' collect all errors '''
all_errors=self.validate_update(updated_list,self.get_existing(product_id))
if not all_errors:
found=self.get_existing(product_id)
update=all(updated_list[k] == found[k] for k in found)
if update:
return make_response(jsonify({"message": "nothing to update"}), 409)
updated_list['created_by']=user
Product.update(updated_list, product_id)
return make_response(jsonify({'message': "product updated successfully"}), 201)
return make_response(jsonify(all_errors), 422)
def get_validation_errors(self, data, request_schema):
validator = Request(data, request_schema)
all_errors = validator.validate()
if all_errors == None and int(data['price']) <= 0:
all_errors = {}
all_errors['errors'] = {"price": ['price should not be a zero or a negative value']}
return all_errors
def get_updated_list(self,data, existing):
sect=[key for key in set(existing) if key in set(data)]
if sect:
for key in sect:
existing[key]=data[key]
try:
existing['quantity']=int(existing['quantity'])
except ValueError:
existing['quantity']=0
return existing
return None
def validate_update(self,update,existing):
all_errors={}
if update == None:
all_errors['errors']='invalid request'
return all_errors
all_errors = self.get_validation_errors(update,self.request_schema)
return all_errors
def get_existing(self,product_id):
found=Product.get_by_id(product_id)
found.pop("create_at")
found.pop("updated_at")
return found
```
#### File: heroku-devops/app/__init__.py
```python
from flask import Flask, Blueprint,redirect, jsonify
from flask_restful import Resource, Api
from flask_jwt_extended import JWTManager
from utils import env
from instance.api_config import api_config
from app.api.v2.views.user_resource import UserController
from app.api.v2.views.auth_resource import AuthController
from app.api.v2.views.product_resource import ProductController
from app.api.v2.views.sale_resource import SalesController
from app.api.v2.views.logout_resource import Logout
from app.api.v2.models.user import User
from app.api.v2.black_list import get_black_list
from flask_cors import CORS
api_blueprint = Blueprint("store-api", __name__, url_prefix='/api/v2')
jwt = JWTManager()
''' store revoved tokens '''
blacklist = get_black_list()
''' setting api config '''
def create_app(config_setting):
app = Flask(__name__)
app.config.from_object(api_config[config_setting])
app.config['JWT_SECRET_KEY'] = env('JWT_SECRET')
app.config['JWT_BLACKLIST_ENABLED'] = env('JWT_BLACKLIST')
jwt.init_app(app)
''' setting api blueprint '''
api = Api(api_blueprint)
api.add_resource(UserController, '/user/',
strict_slashes=False, endpoint='post_user')
api.add_resource(AuthController, '/login/',
strict_slashes=False, endpoint='login')
api.add_resource(Logout, '/logout/',
strict_slashes=False, endpoint='logout')
api.add_resource(ProductController, '/products/',
strict_slashes=False, endpoint='products')
api.add_resource(ProductController, '/products/<int:product_id>/',
strict_slashes=False, endpoint='delete/put product')
api.add_resource(SalesController, '/sales/',
strict_slashes=False, endpoint='sales')
api.add_resource(SalesController, '/sales/<int:sale_id>/',
strict_slashes=False, endpoint='get_all_sales')
app.register_blueprint(api_blueprint)
# Catch all 404 errors
@app.errorhandler(404)
def not_found_error(error):
return jsonify({"error": "Resource not found on server"}), 404
# Catch all 500 errors
@app.errorhandler(500)
def internal_server_error(error):
return jsonify({"error": "Internal server error has occured"}), 500
# Catch all 400 errors
@app.errorhandler(400)
def bad_request_error(error):
return jsonify({"error": "Bad request sent to the server"}), 400
@app.route('/')
def root():
return redirect('https://storeapiv2.docs.apiary.io/')
# add CORS handler
CORS(app)
return app
@jwt.user_claims_loader
def add_claims_to_access_token(identity):
user_id=identity
user=User.get_by_id(user_id)
if user:
if user['role'] == 'admin':
return {'roles': 'admin'}
else:
return {'roles': 'normal'}
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return jti in blacklist
def add_to_blacklist(jti):
return blacklist.add(jti)
```
#### File: tests/v2/auth_test.py
```python
import json
from app.tests.v2.base_test import BaseTestCase
from app.api.v2.models.user import User
from werkzeug.security import generate_password_hash, check_password_hash
class TestAuth(BaseTestCase):
def test_for_get_user_identity(self):
user = User.get_by_email(self.sample_user['email'])
self.assertTrue(user)
self.assertEqual(user['name'], self.sample_user['name'])
def test_match_password(self):
user = User.get_by_email(self.sample_user['email'])
password = "<PASSWORD>"
self.assertTrue(user)
self.assertTrue(check_password_hash(user['password'], password))
def test_login(self):
user={"email": self.sample_user['email'],
"password":<PASSWORD>['password'],
}
response = self.post('/api/v2/login', data=user)
self.assertTrue(response)
data=json.loads(response.data)
self.assertEqual({"message":data['message']}, {
'message': 'Login successful'})
self.assertEqual(response.status_code,200)
def test_login_with_invalid_credentials(self):
user={"email": "<EMAIL>",
"password": "<PASSWORD>"
}
response = self.post('/api/v2/login', data=user)
self.assertTrue(response)
self.assertEqual(json.loads(response.data), {
'message': 'Invalid credentials'})
self.assertEqual(response.status_code,401)
def test_login_with_empty_data(self):
user={"email": "",
"password": ""
}
response = self.post('/api/v2/login', data=user)
self.assertTrue(response)
self.assertEqual(json.loads(response.data),
{'errors': {'email': ['email is required'],
'password': ['<PASSWORD>']}})
self.assertEqual(response.status_code,422)
``` |
{
"source": "jomat/bender",
"score": 3
} |
#### File: bender/tests/test_callbacks.py
```python
import unittest
from unittest.mock import Mock
import nio
from my_project_name.callbacks import Callbacks
from my_project_name.storage import Storage
class CallbacksTestCase(unittest.TestCase):
def setUp(self) -> None:
# Create a Callbacks object and give it some Mock'd objects to use
self.fake_client = Mock(spec=nio.AsyncClient)
self.fake_client.user = "@fake_user:example.com"
self.fake_storage = Mock(spec=Storage)
# We don't spec config, as it doesn't currently have well defined attributes
self.fake_config = Mock()
self.callbacks = Callbacks(
self.fake_client, self.fake_storage, self.fake_config
)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jomauricio/abgthe",
"score": 2
} |
#### File: apps/games/models.py
```python
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
from model_utils import Choices
from localflavor.br.br_states import STATE_CHOICES
from django.core.urlresolvers import reverse
from autoslug import AutoSlugField
from taggit.managers import TaggableManager
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
class Game(TimeStampedModel):
class Meta():
verbose_name = "Jogo"
verbose_name_plural ="Jogos"
slug = AutoSlugField(populate_from='name', unique=True)
name = models.CharField("Nome", max_length=100, unique=True)
image = ProcessedImageField(upload_to="games/image/%Y/%m/%d", blank=True, null=True,
processors=[ResizeToFill(80, 90)], format='JPEG',options={'quality': 60})
description = models.TextField("Descrição", blank=True)
number_players = models.CharField("Nº de Jogadores", blank=True, max_length=10)
tags = TaggableManager(verbose_name='Tags', blank=True)
def __unicode__(self):
return u'%s' % (self.name)
def get_absolute_url(self):
return reverse('games:game_detail', args=[self.slug])
class Expansion(TimeStampedModel):
class Meta():
verbose_name = "Expansão"
verbose_name_plural ="Expansões"
game = models.ForeignKey(Game, verbose_name="Jogo",related_name="expansions")
slug = AutoSlugField(populate_from='name', unique=True)
name = models.CharField("Nome", max_length=100, unique=True)
image = ProcessedImageField(upload_to="games/image/%Y/%m/%d", blank=True, null=True,
processors=[ResizeToFill(80, 90)], format='JPEG',options={'quality': 60})
description = models.TextField("Descrição", blank=True)
number_players = models.CharField("Nº de Jogadores", blank=True, max_length=10)
tags = TaggableManager(verbose_name='Tags', blank=True)
def __unicode__(self):
return u'%s' % (self.name)
def get_absolute_url(self):
return reverse('games:expansion_detail', args=[self.slug])
```
#### File: apps/polls/admin.py
```python
from django.contrib import admin
#from ajax_select import make_ajax_form
#from ajax_select.admin import AjaxSelectAdmin, AjaxSelectAdminTabularInline, AjaxSelectAdminStackedInline
from .models import Poll, Item, SixListRanking, PurchaseAccept
#from .forms import PollSixListForm, PollPurchaseForm
class ItemInline(admin.StackedInline):
model = Item
extra = 0
def save_model(self, request, obj, form, change):
obj.create_user = request.user
obj.save()
@admin.register(Poll)
class PollAdmin(admin.ModelAdmin):
model = Poll
exclude = ('create_user',)
#inlines = [ItemInline,]
def save_model(self, request, obj, form, change):
if not change:
obj.create_user = request.user
obj.save()
else:
obj.save()
'''def save_formset(self, request, form, formset, change):
instances = formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
instance.create_user = request.user
instance.save()
formset.save_m2m()'''
@admin.register(SixListRanking)
class SixListRankingAdmin(admin.ModelAdmin):
model = SixListRanking
def save_model(self, request, obj, form, change):
obj.create_user = request.user
obj.save()
```
#### File: users/signals/handlers.py
```python
from django.db.models.signals import post_save
from django.dispatch import receiver
from abgthe.profiles.models import Profile
from abgthe.users.models import User
from allauth.socialaccount.models import SocialAccount
from allauth.account.signals import user_signed_up
from django.core.files import File
from avatar.models import Avatar
from urllib2 import urlopen
import requests
from django.core.files.temp import NamedTemporaryFile
def download_avatar(self, url):
"""
"""
r = requests.get(url)
img_temp = NamedTemporaryFile(delete=True)
img_temp.write(r.content)
img_temp.flush()
img_temp.seek(0)
return File(img_temp)
@receiver(post_save, sender=User)
def save_in_user(sender, **kwargs):
obj=kwargs['instance']
created=kwargs['created']
if created:
p = Profile(user=obj, username=obj.username)
p.save()
@receiver(user_signed_up)
def user_signed_up_(request, user, sociallogin=None, **kwargs):
'''
When a social account is created successfully and this signal is received,
django-allauth passes in the sociallogin param, giving access to metadata on the remote account, e.g.:
sociallogin.account.provider # e.g. 'twitter'
sociallogin.account.get_avatar_url()
sociallogin.account.get_profile_url()
sociallogin.account.extra_data['screen_name']
See the socialaccount_socialaccount table for more in the 'extra_data' field.
'''
if sociallogin:
# Extract first / last names from social nets and store on Profile record
if sociallogin.account.provider == 'facebook':
user.profile.first_name = sociallogin.account.extra_data['first_name']
user.profile.last_name = sociallogin.account.extra_data['last_name']
if sociallogin.account.provider == 'google':
user.profile.first_name = sociallogin.account.extra_data['given_name']
user.profile.last_name = sociallogin.account.extra_data['family_name']
user.profile.save()
mage_avatar = user.profile.download_avatar(sociallogin.account.get_avatar_url())
avatar = Avatar(user=user,primary=True, avatar=image_avatar)
avatar.save()
``` |
{
"source": "Jomcgi/scrubadub",
"score": 2
} |
#### File: scrubadub/detectors/spacy.py
```python
import os
from wasabi import msg
from typing import Generator, Iterable, Optional, Sequence
try:
import spacy
except ImportError as e:
if e.name == "spacy":
raise ImportError(
"Could not find module 'spacy'. If you want to use extras,"
" make sure you install scrubadub with 'pip install scrubadub[spacy]'"
)
from . import register_detector
from .base import Detector
from ..filth import NamedEntityFilth, Filth, NameFilth, OrganizationFilth
from ..utils import CanonicalStringSet
class SpacyEntityDetector(Detector):
"""Use spacy's named entity recognition to clean named entities.
List specific entities to include passing ``named_entities``, e.g.
(PERSON)
"""
filth_cls_map = {
'PERSON': NameFilth,
'PER': NameFilth,
'ORG': OrganizationFilth
}
name = 'spacy'
language_to_model = {
"zh": "zh_core_web_trf",
"nl": "nl_core_news_trf",
"en": "en_core_web_trf",
"fr": "fr_dep_news_trf",
"de": "de_dep_news_trf",
"es": "es_dep_news_trf",
}
disallowed_nouns = CanonicalStringSet(["skype"])
def __init__(self, named_entities: Iterable[str] = {'PERSON', 'PER'},
model: Optional[str] = None, **kwargs):
super(SpacyEntityDetector, self).__init__(**kwargs)
# Spacy NER are all upper cased
self.named_entities = {entity.upper() for entity in named_entities}
# Fixes a warning message from transformers that is pulled in via spacy
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
self.check_spacy_version()
if model is None:
if self.language in self.language_to_model:
model = self.language_to_model[self.language]
else:
model = "{}_core_news_lg".format(self.language)
if not self.check_spacy_model(model):
raise ValueError("Unable to find spacy model '{}'. Is your language supported? "
"Check the list of models available here: "
"https://github.com/explosion/spacy-models ".format(model))
self.nlp = spacy.load(model)
# If the model doesn't support named entity recognition
if 'ner' not in [step[0] for step in self.nlp.pipeline]:
raise ValueError(
"The spacy model '{}' doesn't support named entity recognition, "
"please choose another model.".format(model)
)
# Only enable necessary pipes
self.nlp.select_pipes(enable=["transformer", "tagger", "parser", "ner"])
@staticmethod
def check_spacy_version() -> bool:
# spacy_info = spacy.info()
spacy_version = spacy.__version__ # spacy_info.get('spaCy version', spacy_info.get('spacy_version', None))
spacy_major = 0
if spacy_version is None:
raise ImportError('Spacy v3 needs to be installed. Unable to detect spacy version.')
try:
spacy_major = int(spacy_version.split('.')[0])
except Exception:
raise ImportError('Spacy v3 needs to be installed. Spacy version {} is unknown.'.format(spacy_version))
if spacy_major != 3:
raise ImportError('Spacy v3 needs to be installed. Detected version {}.'.format(spacy_version))
return True
@staticmethod
def check_spacy_model(model) -> bool:
spacy_info = spacy.info()
models = list(spacy_info.get('pipelines', spacy_info.get('models', None)).keys())
if models is None:
raise ValueError('Unable to detect spacy models.')
if model not in models:
msg.info("Downloading spacy model {}".format(model))
spacy.cli.download(model)
# spacy.info() doesnt update after a spacy.cli.download, so theres no point checking it
models.append(model)
# Always returns true, if it fails to download, spacy sys.exit()s
return model in models
def iter_filth_documents(self, doc_names: Sequence[Optional[str]],
doc_list: Sequence[str]) -> Generator[Filth, None, None]:
for doc_name, doc in zip(doc_names, self.nlp.pipe(doc_list)):
for ent in doc.ents:
if ent.label_ in self.named_entities:
# If there is no standard 'filth', returns a NamedEntity filth
filth_cls = self.filth_cls_map.get(ent.label_, NamedEntityFilth)
yield filth_cls(beg=ent.start_char,
end=ent.end_char,
text=ent.text,
document_name=(str(doc_name) if doc_name else None), # None if no doc_name provided
detector_name=self.name,
label=ent.label_,
locale=self.locale)
def iter_filth(self, text: str, document_name: Optional[str] = None) -> Generator[Filth, None, None]:
yield from self.iter_filth_documents([document_name], [text])
@classmethod
def supported_locale(cls, locale: str) -> bool:
"""Returns true if this ``Detector`` supports the given locale.
:param locale: The locale of the documents in the format: 2 letter lower-case language code followed by an
underscore and the two letter upper-case country code, eg "en_GB" or "de_CH".
:type locale: str
:return: ``True`` if the locale is supported, otherwise ``False``
:rtype: bool
"""
return True
register_detector(SpacyEntityDetector, autoload=False)
```
#### File: scrubadub/filth/known.py
```python
from .base import Filth
import typing
class KnownFilth(Filth):
type = 'known'
def __init__(self, *args, comparison_type: typing.Optional[str] = None, **kwargs):
super(KnownFilth, self).__init__(*args, **kwargs)
self.comparison_type = comparison_type
# def __repr__(self) -> str:
# return self._to_string(['text', 'document_name', 'beg', 'end', 'comparison_type', 'detector_name', 'locale'])
```
#### File: scrubadub/filth/ssn.py
```python
from faker import Faker
from .base import Filth
class SSNFilth(Filth):
type = 'ssn'
@staticmethod
def generate(faker: Faker) -> str:
"""Generates an example of this ``Filth`` type, usually using the faker python library.
:param faker: The ``Faker`` class from the ``faker`` library
:type faker: Faker
:return: An example of this ``Filth``
:rtype: str
"""
return faker.ssn()
```
#### File: scrubadub/post_processors/base.py
```python
from typing import Optional, Sequence
from ..filth import Filth
class PostProcessor(object):
name = 'post_processor' # type: str
def __init__(self, name: Optional[str] = None):
if name is not None:
self.name = name
def process_filth(self, filth_list: Sequence[Filth]) -> Sequence[Filth]:
raise NotImplementedError('must be overridden by base classes')
```
#### File: scrubadub/post_processors/__init__.py
```python
import sys
from typing import Dict, Type
if sys.version_info >= (3, 8):
from typing import TypedDict # pylint: disable=no-name-in-module
else:
from typing_extensions import TypedDict
from .base import PostProcessor
from .text_replacers.filth_type import FilthTypeReplacer
from .text_replacers.hash import HashReplacer
from .text_replacers.numeric import NumericReplacer
from .text_replacers.prefix_suffix import PrefixSuffixReplacer
PostProcessorConfigurationItem = TypedDict(
'PostProcessorConfigurationItem',
{'post_processor': Type[PostProcessor], 'autoload': bool, 'index': int}
)
post_processor_configuration = {
# PostProcessors that are not automatically loaded by scrubadub
NumericReplacer.name: {'post_processor': NumericReplacer, 'autoload': False, 'index': 0},
PrefixSuffixReplacer.name: {'post_processor': PrefixSuffixReplacer, 'autoload': False, 'index': 1},
FilthTypeReplacer.name: {'post_processor': FilthTypeReplacer, 'autoload': False, 'index': 0},
HashReplacer.name: {'post_processor': HashReplacer, 'autoload': False, 'index': 0},
} # type: Dict[str, PostProcessorConfigurationItem]
def register_post_processor(post_processor: Type[PostProcessor], autoload: bool = False, index: int = 0):
"""Register a PostProcessor for use with the ``Scrubber`` class.
This is used when you don't want to have to import a detector by default.
It may be useful for certain detectors with large or unusual dependencies, which you may not always want to import.
In this case you can use ``register_detector(NewDetector, autoload=True)`` after your detector definition so that
if the file is imported it wil be automatically registered.
This will mean that you don't need to import the ``NewDetector`` in this file and so it's dependencies won't need
to be installed just to import this package.
The argument ``autoload`` sets if a new ``Scrubber()`` instance should load this ``Detector`` by default.
:param post_processor: The ``PostProcessor`` to register with the scrubadub post-processor configuration.
:type post_processor: PostProcessor class
:param autoload: Whether to automatically load this ``Detector`` on ``Scrubber`` initialisation.
:type autoload: bool
:param index: The location/index in which this ``PostProcessor`` should be added.
:type index: int
"""
post_processor_configuration[post_processor.name] = {
'post_processor': post_processor,
'autoload': autoload,
'index': index,
}
current_module = __import__(__name__)
setattr(current_module.post_processors, post_processor.__name__, post_processor)
```
#### File: scrubadub/scrubadub/utils.py
```python
import re
import locale as locale_module
from typing import Optional, Tuple
try:
unicode # type: ignore # tell mypy to ignore the fact that this doesnt exist in python3
except NameError:
basestring = str # Compatibility for Python 2 and 3
class CanonicalStringSet(set):
"""Just like a set, except it makes sure that all elements are lower case
strings.
"""
def _cast_as_lower(self, x):
if not isinstance(x, basestring):
raise TypeError('CanonicalStringSet only works with strings')
return x.lower()
def __init__(self, *elements):
super(CanonicalStringSet, self).__init__()
if elements:
self.update(*elements)
def __contains__(self, element):
return super(CanonicalStringSet, self).__contains__(
self._cast_as_lower(element)
)
def add(self, element):
return super(CanonicalStringSet, self).add(
self._cast_as_lower(element)
)
def update(self, elements):
for element in elements:
self.add(element)
def remove(self, element):
return super(CanonicalStringSet, self).remove(
self._cast_as_lower(element)
)
def discard(self, element):
return super(CanonicalStringSet, self).discard(
self._cast_as_lower(element)
)
class Lookup(object):
"""The Lookup object is used to create an in-memory reference table to
create unique identifiers for ``Filth`` that is encountered.
"""
def __init__(self):
self.table = {}
def __getitem__(self, key):
try:
return self.table[key]
except KeyError:
self.table[key] = len(self.table)
return self.table[key]
def locale_transform(locale: str) -> str:
"""Normalise the locale string, e.g. 'fr' -> 'fr_FR'.
:param locale: The locale of the documents in the format: 2 letter lower-case language code followed by an
underscore and the two letter upper-case country code, eg "en_GB" or "de_CH".
:type locale: str
:return: The normalised locale string
:rtype: str
"""
normalised = locale_module.normalize(locale.lower())
if normalised not in locale_module.locale_alias.values():
raise ValueError("Unknown locale '{}', not in locale.locale_alias".format(locale))
return normalised
def locale_split(locale: str) -> Tuple[Optional[str], Optional[str]]:
"""Split the locale string into the language and region.
:param locale: The locale of the documents in the format: 2 letter lower-case language code followed by an
underscore and the two letter upper-case country code, eg "en_GB" or "de_CH".
:type locale: str
:return: The two-letter language code and the two-letter region code in a tuple.
:rtype: tuple, (str, str)
"""
locale = locale_transform(locale)
regex = r'(?P<language>[0-9a-zA-Z]+)(_(?P<region>[0-9a-zA-Z]+))?' \
r'(\.(?P<charset>[0-9a-zA-Z-]+)(@(?P<charset2>[0-9a-zA-Z]+))?)?'
match = re.match(regex, locale)
if match is None:
raise ValueError('Locale does not match expected format.')
return match.group('language').lower(), match.group('region').upper()
```
#### File: scrubadub/tests/run.py
```python
import os
import sys
import subprocess
import yaml
from wasabi import msg
def run_test(command, directory):
"""Execute a command that runs a test"""
wrapped_command = "cd %s && %s" % (directory, command)
pipe = subprocess.Popen(
wrapped_command, shell=True,
)
pipe.wait()
if pipe.returncode == 0:
msg.good("TEST PASSED")
else:
msg.fail("TEST FAILED")
return pipe.returncode
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# load the script tests from the .travis.yml file
with open(os.path.join(root_dir, '.travis.yml')) as stream:
travis_yml = yaml.safe_load(stream.read())
tests = travis_yml['script']
# run the tests
if isinstance(tests, str):
returncode = run_test(tests, root_dir)
elif isinstance(tests, (list, tuple)):
returncode = 0
for test in tests:
returncode += run_test(test, root_dir)
if returncode == 0:
msg.good("ALL TESTS PASSED")
else:
msg.fail("SOME TESTS FAILED, SEE ABOVE")
sys.exit(returncode)
```
#### File: scrubadub/tests/test_api.py
```python
import unittest
import scrubadub
class APITestCase(unittest.TestCase):
def test_clean(self):
"""Test the top level clean api"""
self.assertEqual(
"This is a test message for {{EMAIL}}",
scrubadub.clean("This is a test message for <EMAIL>"),
)
def test_clean_documents(self):
"""Test the top level clean_documents api"""
self.assertEqual(
{
"first.txt": "This is a test message for {{EMAIL}}",
"second.txt": "Hello {{TWITTER}} call me on {{PHONE}}.",
},
scrubadub.clean_documents(
{
"first.txt": "This is a test message for <EMAIL>",
"second.txt": "Hello @Jane call me on +33 4 41 26 62 36.",
},
),
)
def test_list_filth(self):
"""Test the top level list_filth api"""
filths = scrubadub.list_filth("This is a test message for <EMAIL>")
self.assertEqual(
[scrubadub.filth.EmailFilth(text='<EMAIL>', detector_name='email', beg=27, end=46)],
filths,
)
def test_list_filth_docuemnts(self):
"""Test the top level list_filth_documents api"""
filths = scrubadub.list_filth_documents(
{
"first.txt": "This is a test message for <EMAIL>",
"second.txt": "Hello @Jane call me on +33 4 41 26 62 36.",
}
)
self.assertEqual(
scrubadub.Scrubber._sort_filths([
scrubadub.filth.EmailFilth(
text='<EMAIL>', document_name='first.txt', detector_name='email', beg=27, end=46
),
scrubadub.filth.TwitterFilth(
text='@Jane', document_name='second.txt', detector_name='twitter', beg=6, end=11
),
scrubadub.filth.PhoneFilth(
text='+33 4 41 26 62 36', document_name='second.txt', detector_name='phone', beg=23, end=40
),
]),
scrubadub.Scrubber._sort_filths(filths),
)
def test_quickstart(self):
"""Test the example given in the quick start docs"""
text = "My cat can be contacted on <EMAIL>, or 1800 555-5555"
self.assertEqual(
'My cat can be contacted on {{EMAIL}}, or {{PHONE}}',
scrubadub.clean(text),
)
```
#### File: scrubadub/tests/test_exceptions.py
```python
import unittest
from scrubadub import exceptions
class ExceptionsTestCase(unittest.TestCase):
def test_render(self):
exception = exceptions.ScrubadubException()
exception.var = 'there'
self.assertEquals(exception.render('test'), 'test')
self.assertEquals(exception.render('url %(issues_url)s'), 'url ' + exception.issues_url)
self.assertEquals(exception.render('hello %(var)s'), 'hello there')
```
#### File: scrubadub/tests/test_filth.py
```python
import re
import unittest
from scrubadub.filth import Filth, MergedFilth
from scrubadub.exceptions import InvalidReplaceWith, FilthMergeError
class FilthTestCase(unittest.TestCase):
def test_disallowed_replace_with(self):
"""replace_with should fail gracefully"""
filth = Filth()
with self.assertRaises(InvalidReplaceWith):
filth.replace_with('surrogate')
with self.assertRaises(InvalidReplaceWith):
filth.replace_with('something_invalid')
def test_nonoverlapping_filth(self):
"""can't merge non-overlapping filth"""
a_filth = Filth(beg=0, end=3, text="the")
b_filth = Filth(beg=4, end=7, text="end")
with self.assertRaises(FilthMergeError):
a_filth.merge(b_filth)
with self.assertRaises(FilthMergeError):
b_filth.merge(a_filth)
def test_text_merge(self):
"""make sure text length is correct"""
class SomeFilth(Filth):
type = 'something'
text = "the end"
a_filth = SomeFilth(beg=0, end=3, text=text[:3])
b_filth = SomeFilth(beg=1, end=7, text=text[1:])
c_filth = a_filth.merge(b_filth)
self.assertEqual(c_filth.text, text)
c_filth = b_filth.merge(a_filth)
self.assertEqual(c_filth.text, text)
d_filth = c_filth.merge(a_filth)
self.assertEqual(d_filth.text, text)
b_filth.end = 2
with self.assertRaises(FilthMergeError):
b_filth.merge(a_filth)
def test_invalid_merge_documents(self):
"""Ensure Filth in two different documents cant be merged"""
filth_a = Filth(0, 2, text='aa', document_name='one')
filth_b = Filth(1, 2, text='a', document_name='two')
with self.assertRaises(FilthMergeError):
filth_a.merge(filth_b)
with self.assertRaises(FilthMergeError):
filth_b.merge(filth_a)
def test_filth_string(self):
"""Test the Filth to string function"""
filth = Filth(beg=0, end=5)
self.assertEqual(str(filth), "<Filth text='' beg=0 end=5>")
filth = Filth(beg=0, end=5)
self.assertEqual(filth.__repr__(), "<Filth text='' beg=0 end=5>")
filth = Filth(beg=0, end=5)
self.assertEqual(filth._to_string(), "<Filth text='' beg=0 end=5>")
filth = Filth(beg=0, end=5, text='hello')
self.assertEqual(str(filth), "<Filth text='hello' beg=0 end=5>")
filth = Filth(beg=0, end=5, text='hello', document_name='hello.txt')
self.assertEqual(str(filth), "<Filth text='hello' document_name='hello.txt' beg=0 end=5>")
filth = Filth(beg=0, end=5, text='hello', document_name='hello.txt')
self.assertEqual(filth._to_string(attributes=['text']), "<Filth text='hello'>")
self.assertEqual(filth._to_string(attributes=['beg', 'end', 'text']), "<Filth beg=0 end=5 text='hello'>")
self.assertEqual(
filth._to_string(attributes=['text', 'document_name']),
"<Filth text='hello' document_name='hello.txt'>"
)
def test_merged_to_string(self):
"""Test the MergedFilth to string"""
class TestFilth(Filth):
type = 'test_filth'
merged = MergedFilth(TestFilth(0, 2, 'ab'), Filth(1, 2, 'b'))
self.assertEqual(merged.__repr__(), "<MergedFilth filths=[<TestFilth text='ab' beg=0 end=2>, <Filth text='b' beg=1 end=2>]>")
def test_equality(self):
"""Test the filth equality function"""
self.assertTrue(
Filth(beg=0, end=5, text='hello') ==
Filth(beg=0, end=5, text='hello')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') ==
Filth(beg=0, end=5, text='hello', match=re.match('123', '1234'))
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') !=
Filth(beg=1, end=5, text='hello')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') !=
Filth(beg=0, end=6, text='hello')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') !=
Filth(beg=0, end=5, text='hellou')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test') ==
Filth(beg=0, end=5, text='hello', document_name='test')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') !=
Filth(beg=0, end=5, text='hello', document_name='test')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test') !=
Filth(beg=0, end=5, text='hello')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test') !=
Filth(beg=0, end=5, text='hello', document_name='another_test')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', detector_name='tester') ==
Filth(beg=0, end=5, text='hello', detector_name='tester')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', detector_name='tester') !=
Filth(beg=0, end=5, text='hello', detector_name='another_tester')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', detector_name='tester') !=
Filth(beg=0, end=5, text='hello')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello') !=
Filth(beg=0, end=5, text='hello', detector_name='tester')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test', detector_name='tester') ==
Filth(beg=0, end=5, text='hello', document_name='test', detector_name='tester')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test', detector_name='tester') !=
Filth(beg=0, end=5, text='hello', document_name='test', detector_name='another_tester')
)
self.assertTrue(
Filth(beg=0, end=5, text='hello', document_name='test', detector_name='tester') !=
Filth(beg=0, end=5, text='hello', document_name='another_test', detector_name='tester')
)
``` |
{
"source": "jomey/isnobal-local",
"score": 2
} |
#### File: isnobal-local/comparison/common.py
```python
import dask
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.patches as mpatches
import matplotlib.font_manager as font_manager
import holoviews as hv
from holoviews import dim, opts
from pathlib import Path
from pathlib import Path, PurePath
from snobedo.lib.dask_utils import start_cluster, client_ip_and_port
from snobedo.snotel import SnotelLocations
from raster_file import RasterFile
SHARED_STORE = PurePath('/uufs/chpc.utah.edu/common/home/skiles-group1')
DATA_DIR = SHARED_STORE.joinpath('jmeyer')
HRRR_DIR = SHARED_STORE.joinpath('HRRR_water_years')
SNOBAL_DIR = SHARED_STORE.joinpath('erw_isnobal')
ASO_DIR = DATA_DIR.joinpath('ASO-data')
CBRFC_DIR = DATA_DIR.joinpath('CBRFC')
SNOTEL_DIR = DATA_DIR.joinpath( 'Snotel')
FIGURES_DIR = DATA_DIR.joinpath('figures')
# Plot styles
BOKEH_FONT=dict(fontsize={'title': 14, 'labels': 12, 'xticks': 12, 'yticks': 12, 'legend': 12})
# Xarray options
# Used in comparison to SNOTEL site locations
COARSEN_OPTS = dict(x=2, y=2)
RESAMPLE_1_DAY_OPTS = dict(time='1D', base=23)
## CBRFC zones
# Corresponds to values in classification tif
ALEC2HLF = 1
ALEC2HMF = 2
ALEC2HUF = 3
# CBRFC values are deliverd in inches
INCH_TO_MM = 25.4
def aspect_classes():
aspects = RasterFile(DATA_DIR / 'project-data/iSnobal/ERW/aspect_class_ERW.tif')
aspects_data = aspects.band_values()
return aspects_data
def cbrfc_zones():
zones = RasterFile(CBRFC_DIR / 'ERW_CBRFC_zones.tif')
zone_data = zones.band_values()
zone_data[zone_data==241] = 0
return zone_data
# HRRR helpers
def hrrr_pixel_index(hrrr_file, site):
x_index = np.abs(hrrr_file.longitude.values - hrrr_longitude(site.lon))
y_index = np.abs(hrrr_file.latitude.values - site.lat)
return np.unravel_index((x_index + y_index).argmin(), x_index.shape)
def hrrr_longitude(longitude):
return longitude % 360
@dask.delayed
def hrrr_snotel_pixel(file, x_pixel_index, y_pixel_index):
"""
Read GRIB file surface values, remove unsed dimensions, and
set the time dimension.
Required to be able to concatenate all GRIB file to a time series
"""
hrrr_file = xr.open_dataset(
file.as_posix(),
engine='cfgrib',
backend_kwargs={
'errors': 'ignore',
'indexpath': '',
'filter_by_keys': {
'level': 0,
'typeOfLevel': 'surface',
}
},
).isel(x=[x_pixel_index], y=[y_pixel_index])
del hrrr_file.coords['valid_time']
del hrrr_file.coords['surface']
del hrrr_file.coords['step']
return hrrr_file.expand_dims(time=[hrrr_file.time.values])
# Plot settings and helpers
plt.rcParams.update(
{
'axes.labelsize': 10
}
)
LEGEND_TEXT = "{0:10} {1:8}"
LEGEND_DATE = "%Y-%m-%d"
def legend_text(label, value, color='none'):
return mpatches.Patch(
color=color, label=LEGEND_TEXT.format(label, value)
)
def add_legend_box(ax, entries):
ax.legend(
handles=entries,
loc='upper left',
prop=font_manager.FontProperties(
family='monospace', style='normal', size=8
),
)
## Use hvplot
def use_hvplot():
import hvplot.xarray
import hvplot.pandas
pd.options.plotting.backend = 'holoviews'
```
#### File: snobedo/lib/isnobal_helpers.py
```python
import datetime
def day_filter(dataset):
"""
There is an issue with AWSM (#55 on GitHub), where there is an extra
hour in SMRF outputs. This filter removes the additional hour of the day.
:param dataset: XArray dataset
:return:
Xarray dataset with `sel` filter applied
"""
return dataset.sel(time=dataset.time[0].dt.strftime('%Y-%m-%d').values)
def hour_filter(dataset):
"""
iSnobal sometimes has an additional hour in their snow.nc outputs, aside
from the expected end of day value. This filter ensure the datasets are
only reading the last hour of the day value and ignore others.
:param dataset: XArray dataset
:return:
Xarray dataset with `sel` filter applied
"""
return dataset.sel(time=datetime.time(23))
``` |
{
"source": "jomey/raster_compare",
"score": 3
} |
#### File: raster_compare/base/raster_data_difference.py
```python
import numpy as np
from osgeo import gdal
from .median_absolute_deviation import MedianAbsoluteDeviation
from .raster_file import RasterFile
class RasterDataDifference(object):
GDAL_DRIVER = gdal.GetDriverByName('GTiff')
def __init__(self, lidar, sfm, band_number):
self.lidar = RasterFile(lidar, band_number)
self.sfm = RasterFile(sfm, band_number)
self._aspect = None
self.band_values = self.sfm.band_values() - self.lidar.band_values()
self.band_mask = self.band_values.mask
self.mad = MedianAbsoluteDeviation(self.band_values.compressed())
self._slope = None
@property
def band_values(self):
return self._band_values
@band_values.setter
def band_values(self, value):
self._band_values = value
@property
def band_mask(self):
return self._band_mask
@band_mask.setter
def band_mask(self, value):
self._band_mask = np.copy(value)
def band_outlier_max(self):
return self.mad.data_median + self.mad.standard_deviation(2)
def band_outlier_min(self):
return self.mad.data_median - self.mad.standard_deviation(2)
@property
def band_filtered(self):
self.band_values.mask = np.ma.mask_or(
self.band_mask,
np.ma.masked_outside(
self.band_unfiltered,
self.band_outlier_min(),
self.band_outlier_max()
).mask
)
return self.band_values
@property
def band_unfiltered(self):
self.band_values.mask = self.band_mask
return self.band_values
@property
def band_outliers(self):
self.band_values.mask = np.ma.mask_or(
self.band_mask,
np.ma.masked_inside(
self.band_unfiltered,
self.band_outlier_min(),
self.band_outlier_max()
).mask
)
return self.band_values
@property
def aspect(self):
if self._aspect is None:
self._aspect = self.sfm.aspect - self.lidar.aspect
return self._aspect
@property
def slope(self):
if self._slope is None:
self._slope = self.sfm.slope - self.lidar.slope
return self._slope
```
#### File: raster_compare/plots/area_differences.py
```python
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
from matplotlib.gridspec import GridSpec
from palettable.colorbrewer.diverging import RdBu_5 as PlotColor
from .plot_base import PlotBase
# Plot differences between rasters and show histogram of the differences
class AreaDifferences(PlotBase):
TITLE = '{0} differences'
HIST_TEXT = 'Median (abs): {:4.2f}\n' \
'NMAD : {:4.2f}\n' \
'68.3% (abs): {:4.2f}\n' \
'95% (abs): {:4.2f}'
HIST_BIN_WIDTH = 0.01
BOX_PLOT_TEXT = '{0:8}: {1:6.3f}'
BOX_PLOT_WHISKERS = [5, 95]
OUTPUT_FILE_NAME = 'elevation_differences.png'
COLORMAP = PlotColor.mpl_colormap
def add_hist_stats(self, ax):
box_text = self.HIST_TEXT.format(
self.data.mad.percentile(50, absolute=True),
self.data.mad.normalized(),
self.data.mad.standard_deviation(1, absolute=True),
self.data.mad.standard_deviation(2, absolute=True),
)
self.add_to_legend(
ax, box_text,
loc='upper left', handlelength=0, handletextpad=0,
)
def add_box_plot_stats(self, ax, box_plot_data, data):
text = [
self.BOX_PLOT_TEXT.format(
'Median', box_plot_data['medians'][0].get_ydata()[0]
),
self.BOX_PLOT_TEXT.format('Mean', data.mean()),
self.BOX_PLOT_TEXT.format('Nmad', self.data.mad.normalized()),
self.BOX_PLOT_TEXT.format('Std', data.std()),
]
self.add_to_legend(
ax, '\n'.join(text), handlelength=0, handletextpad=0
)
# TODO - Zoom into each graph to only show values within the 95th
# percentile
def plot(self):
self.print_status()
figure = plt.figure(
figsize=(17, 14),
dpi=150,
constrained_layout=False,
)
grid_opts = dict(figure=figure, height_ratios=[3, 1])
difference = self.data.band_values
if self.data_description == 'Elevation':
grid_spec = GridSpec(
nrows=2, ncols=3, width_ratios=[3, 2, 2], **grid_opts
)
bins = np.arange(
difference.min(),
difference.max() + self.HIST_BIN_WIDTH,
self.HIST_BIN_WIDTH
)
bounds = dict(
norm=colors.BoundaryNorm(
boundaries=bins, ncolors=self.COLORMAP.N,
)
)
else:
grid_spec = GridSpec(
nrows=2, ncols=2, width_ratios=[3, 2], **grid_opts
)
bounds = dict()
ax1 = figure.add_subplot(grid_spec[0, :])
diff_plot = ax1.imshow(
difference,
cmap=self.COLORMAP,
alpha=0.8,
extent=self.sfm.extent,
**bounds
)
ax1.set_title(self.TITLE.format(self.data_description))
self.insert_colorbar(
ax1, diff_plot, self.SCALE_BAR_LABEL[self.data_description]
)
difference = difference[np.isfinite(difference)].compressed()
# Reset bins to entire range of values for Histogram
bins = np.arange(
np.nanmin(difference),
np.nanmax(difference),
self.HIST_BIN_WIDTH
)
ax2 = figure.add_subplot(grid_spec[1, 0])
ax2.hist(difference, bins=bins)
ax2.set_xlabel(self.SCALE_BAR_LABEL[self.data_description])
ax2.set_ylabel("Count $(10^5)$")
ax2.ticklabel_format(style='sci', axis='y', scilimits=(4, 4))
ax2.yaxis.get_offset_text().set_visible(False)
if self.data_description == 'Elevation':
ax2.set_title('Relative Elevation Differences')
ax3 = figure.add_subplot(grid_spec[1, 1])
box = ax3.boxplot(
difference,
sym='k+',
whis=self.BOX_PLOT_WHISKERS,
positions=[0.1]
)
ax3.set_xlim([0, .35])
ax3.tick_params(
axis='x', which='both', bottom=False, top=False, labelbottom=False
)
ax3.set_ylabel(self.SCALE_BAR_LABEL[self.data_description])
self.add_box_plot_stats(ax3, box, difference)
if self.data_description == 'Elevation':
ax3.set_title('Relative Elevation Differences')
if self.data_description == 'Elevation':
ax4 = figure.add_subplot(grid_spec[1, 2])
probplot = sm.ProbPlot(difference)
probplot.qqplot(ax=ax4, line='s')
ax4.get_lines()[0].set(markersize=1)
ax4.get_lines()[1].set(color='black', dashes=[4, 1])
ax4.set_title('Normal Q-Q Plot')
plt.savefig(self.output_file)
return figure
```
#### File: raster_compare/plots/plot_layout.py
```python
import matplotlib.pyplot as plt
class PlotLayout(object):
@staticmethod
def two_row():
"""
Two row design with legend axes at the bottom
:return:
"""
return plt.subplots(
nrows=3, figsize=(6, 12),
gridspec_kw={'height_ratios': [1, 1, 0.07], 'hspace': 0.3}
)
@staticmethod
def two_col():
"""
Two column design with legend axes on the right
:return:
"""
return plt.subplots(
ncols=3, figsize=(12, 7),
gridspec_kw={'width_ratios': [1, 1, 0.05], 'wspace': 0.07}
)
```
#### File: raster_compare/plots/regression.py
```python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.api as sm
from .plot_base import PlotBase
class Regression(PlotBase):
EXPLANATORY_VARS = [
'lidar_elevation', 'lidar_slope', 'lidar_aspect',
'sfm_elevation', 'sfm_slope', 'sfm_aspect',
]
def __init__(self, data, **kwargs):
super().__init__(data, **kwargs)
self.df = self.load_data_frame()
@property
def df(self):
return self._df
@df.setter
def df(self, value):
self._df = value
def load_data_frame(self):
return pd.DataFrame({
'diff': self.data.band_filtered.filled(np.NaN).ravel(),
'lidar_elevation': self.lidar.band_values().filled(np.NaN).ravel(),
'sfm_elevation': self.sfm.band_values().filled(np.NaN).ravel(),
'lidar_slope': self.lidar.slope.filled(np.NaN).ravel(),
'sfm_slope': self.sfm.slope.filled(np.NaN).ravel(),
'lidar_aspect': self.lidar.aspect.filled(np.NaN).ravel(),
'sfm_aspect': self.sfm.aspect.filled(np.NaN).ravel(),
})
def save_plot(self, **kwargs):
plt.savefig(
self.output_path + '/{0}_scatter_plot.png'.format(kwargs['name'])
)
def plot_lidar_vs_sfm(self):
self.print_status('Lidar vs. SfM elevations')
plt.figure()
plt.scatter(self.sfm.band_filtered, self.lidar.band_filtered, s=0.5)
plt.xlabel('Lidar')
plt.ylabel('SfM')
self.save_plot(name='Lidar_vs_sfm')
def plot_for_query(self, query, name):
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(12, 8))
row = 0
col = 0
for variable in self.EXPLANATORY_VARS:
self.print_status(name + ' difference values vs. ' + variable)
query.plot(
kind='scatter', x=variable, y='diff', s=0.5, ax=axes[row, col]
)
axes[row, col].set_title(variable)
axes[row, col].set_xlabel('')
if col == 0:
axes[row, col].set_ylabel('Difference in Elevation')
else:
axes[row, col].set_ylabel('')
if col == 2:
col = 0
row += 1
else:
col += 1
self.save_plot(name=name + '_elevation_diff')
def plot_difference_vs_source(self):
self.plot_for_query(self.df.query('diff < 0'), 'negative')
self.plot_for_query(self.df.query('diff > 0'), 'positive')
@staticmethod
def aspect_to_category(degree):
if 45 < degree <= 135:
return 'East'
elif 135 < degree <= 225:
return 'South'
elif 225 < degree <= 315:
return 'West'
elif degree is np.NAN:
return np.NaN
else:
return 'North'
def categorize_aspect(self, column_name, from_column_name):
self.df[column_name] = self.df.apply(
lambda row: self.aspect_to_category(row[from_column_name]), axis=1
)
self.df[column_name].astype('category')
def hexbin_plot(self):
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(12, 8))
row = 0
col = 0
for variable in self.EXPLANATORY_VARS:
self.print_status('Hexbin plot for ' + variable)
self.df.query('-.48 > diff | diff > 0.6').plot.hexbin(
x='diff', y=variable, gridsize=40, ax=axes[row, col]
)
axes[row, col].set_title(variable)
axes[row, col].set_xlabel('')
if col == 0:
axes[row, col].set_ylabel('Difference in Elevation')
else:
axes[row, col].set_ylabel('')
if col == 2:
col = 0
row += 1
else:
col += 1
self.save_plot(name='hexbin_elevation_diff')
def qqplot(self):
fig = plt.figure(figsize=(12,8))
probplot = sm.ProbPlot(self.data.band_filtered.compressed())
ax = fig.gca()
probplot.qqplot(ax=ax, line='s')
ax.get_lines()[0].set(markersize=1)
ax.get_lines()[1].set(color='black', dashes=[4, 1])
ax.set_title('Normal Q-Q Plot')
plt.savefig(self.output_path + '/qq_plot.png')
def plot_all(self):
self.plot_lidar_vs_sfm()
self.plot_difference_vs_source()
self.hexbin_plot()
@staticmethod
def fit_model(y, x, name):
print('\n** OLS for: ' + name + ' **\n')
model = sm.OLS(y, x, missing='drop').fit()
print(model.summary())
def fit_difference_vs_source(self, source, name):
for variable in self.TYPES:
self.fit_model(
self.elevation_differences,
getattr(source, variable).ravel(),
name + ': ' + variable,
)
def fit_lidar_vs_sfm(self):
self.fit_model(
self.lidar.band_filtered.ravel(),
self.sfm.band_filtered.ravel(),
'Lidar vs. SfM',
)
def fit_all(self):
self.fit_lidar_vs_sfm()
self.fit_difference_vs_source(self.lidar, 'Lidar')
self.fit_difference_vs_source(self.sfm, 'SfM')
def run(self):
# self.plot_all()
self.qqplot()
# self.fit_all()
``` |
{
"source": "jomido/tswss",
"score": 3
} |
#### File: jomido/tswss/server.py
```python
import os
import sys
import json
import random
# 3rd party
from twisted.python import log
from autobahn.websocket import (
WebSocketServerFactory,
WebSocketServerProtocol,
listenWS
)
# internal
from client import ClientState
# / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
#
# THE PROTOCOL (i.e. client) FOR THE WEBSOCKET SERVER
#
# / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
class MyWebSocketServerProtocol(WebSocketServerProtocol):
def onOpen(self):
"""
Register each client as it comes in (self == client) to the
web socket server.
"""
self.factory.register(self)
def onMessage(self, msg, binary):
"""
All websocket messages should be valid JSON. We pass the message and
the client that sent the message (self) to the web socket server.
"""
if not binary:
try:
msg = json.loads(msg)
except:
msg = {}
print ("Got message {} from {}".format(msg, self.peerstr))
self.factory.receive(msg, self)
def connectionLost(self, reason):
"""
Unregister each client (self) as it disconnects.
"""
WebSocketServerProtocol.connectionLost(self, reason)
self.factory.unregister(self)
# / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
#
# WEB SOCKET SERVER COMPONENT
#
# / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
class MyWebSocketServer(WebSocketServerFactory):
protocol = MyWebSocketServerProtocol
def __init__(self, url):
WebSocketServerFactory.__init__(self, url)
self.clients = []
self.client_state = {}
self.tick()
def tick(self):
"""
The server tick time. Housekeeping can be done here.
NOTE: This is initially kicked off by self.__init__
"""
print ("Server ticks.")
reactor.callLater(1, self.tick)
if random.choice([0, 0, 1]):
self.broadcast(json.dumps({"number": random.randint(1, 10)}))
def register(self, client):
"""
Register a client, associating some state.
"""
if not client in self.clients:
print "registered client " + client.peerstr
self.clients.append(client)
self.client_state[client] = ClientState()
def unregister(self, client):
"""
Remove a client from the registry.
"""
if client in self.clients:
print "unregistered client " + client.peerstr
self.clients.remove(client)
del self.client_state[client]
def send(self, msg, client):
"""
Send a message to a particular client.
:msg should be a dict
:client should be a WebSocketServerProtocol instance
"""
if client in self.clients:
print ("Replying to {} with {}...".format(client.peerstr, msg))
client.sendMessage(json.dumps(msg)) # TODO: superize this so that
# the client's sendMessage method
# handles the json serializing
def receive(self, msg, client):
"""
Websocket messages from clients come in here, along with the client.
:msg should be a dict
:client should be a WebSocketServerProtocol instance
"""
if client not in self.clients:
return
if not isinstance(msg, dict):
return
if msg.get("msgId", None) == None:
return
if msg.get("commandText", None) == None:
return
self.client_state[client].foo = 1
msg = dict(
msgId=msg.get("msgId"),
lines=["Hello.", "World."]
)
self.reply(msg, client)
def broadcast(self, msg):
"""
Broadcasts a message to all connected websockets.
:msg should be valid json
"""
print ("broadcasting message '{}'...".format(msg))
for c in self.clients:
print ("broadcasting to " + c.peerstr)
c.sendMessage(msg)
# / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
#
# STATIC SITE COMPONENT
#
# / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
from twisted.web import resource
class Home(resource.Resource):
isLeaf = False
def getChild(self, name, request):
if name == '':
return self # I assume by returning self I am returning
# self.render_GET
return resource.Resource.getChild(self, name, request)
def render_GET(self, request):
html = "<b>Ruh-roh.</b>"
try:
app_html = os.path.join(self.root_dir, self.app, 'app.html')
with open(app_html, 'r') as f:
html = f.read()
except:
log.err()
pass
return html
# / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
#
# MAIN SCRIPT
#
# / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
def setup_websocket_component(port=6789):
"""
Setup and start the websocket portion of the server.
"""
log.startLogging(sys.stdout)
address = "ws://localhost:" + str(port)
server = MyWebSocketServer(address)
listenWS(server)
from twisted.internet import reactor
def setup_static_component(port=8000, root=None, app="some-app", css="css",
js="js", images="images", fonts="fonts"):
"""
Setup and start the static portion of the server.
"""
from twisted.web import server, static
if not root:
root = os.getcwd()
site_root = Home()
site_root.root_dir = root
site_root.app = app
if not root:
root = os.getcwd()
# Add static directories
site_root.putChild('css', static.File(os.path.join(root, app, css)))
site_root.putChild('js', static.File(os.path.join(root, app, js)))
site_root.putChild('images', static.File(os.path.join(root, app, images)))
site_root.putChild('fonts', static.File(os.path.join(root, app, fonts)))
site = server.Site(site_root)
reactor.listenTCP(port, site)
def run():
reactor.run() # blocks
if __name__ == '__main__':
# 1
setup_websocket_component(port=6789)
# 2
setup_static_component(
port=8000,
app='test-app'
)
# 3
run()
``` |
{
"source": "Jomigi/Cone_angle_artifact_reduction",
"score": 3
} |
#### File: Cone_angle_artifact_reduction/code/utils.py
```python
import matplotlib.pyplot as plt
import numpy as np# linear algebra
def plot_loss(epochs, results_folder, loss_list, loss_f, dataset):
x_grid = list(range(epochs))
plt.title(dataset + ' Losses Across Epochs')
plt.ylabel('MSE Loss')
plt.xlabel('Epochs')
plt.xticks(x_grid)
plt.plot(loss_list)
fig_path = results_folder + dataset + 'loss'
plt.savefig(fig_path)
#plt.show()
plt.close('all')
``` |
{
"source": "jomih/escalator",
"score": 3
} |
#### File: jomih/escalator/script_tonalidad.py
```python
import re
"""
Script para determinar las posibles tonalidades a partir de las notas insertadas
Las notas se puede insertar del siguiente modo:
C
Cmaj7
Cmin7
Csus
C#
Por ahora, no se soporta poner "b". Cualquier bemol debe meterse como "#"
"""
def modo_jonico(nota):
candidata = 0
#Cadencia jonico 7: 1 - 1 - 1/2 - 1 - 1 - 1 - 1/2
cadencia = (2, 2, 1, 2, 2, 2, 1)
tonos = ('maj7', 'min7', 'min7', 'maj7', 'dom', 'min7', 'semi', 'maj7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_dorico(nota):
candidata = 0
#Cadencia dorico 7: 1 - 1/2 - 1 - 1 - 1 - 1/2 - 1
cadencia = (2, 1, 2, 2, 2, 1, 2)
tonos = ('min7', 'semi', 'maj7', 'min7', 'min7', 'maj7', 'dom', 'min7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_frigio(nota):
candidata = 0
#Cadencia frigio 7: 1/2 - 1 - 1 - 1- 1/2 - 1 - 1
cadencia = (1, 2, 2, 2, 1, 2, 2)
tonos = ('min7', 'maj7', 'dom', 'min7', 'dism', 'maj7', 'min7', 'min7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_lidio(nota):
candidata = 0
#Cadencia lidio 7: 1 - 1 - 1 - 1/2 - 1 - 1- 1/2
cadencia = (2, 2, 2, 1, 2, 2, 1)
tonos = ('maj7', 'dom', 'min7', 'dism', 'maj7', 'min7', 'min7', 'maj7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_mixolidio(nota):
candidata = 0
#Cadencia mixolidio 7: 1 - 1 - 1/2 - 1 - 1 - 1/2 - 1
cadencia = (2, 2, 1, 2, 2, 1, 2)
tonos = ('dom', 'min7', 'dism', 'maj7', 'min7', 'min7', 'maj7', 'dom')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_eolico(nota):
candidata = 0
#Cadencia eolico 7: 1 - 1/2 - 1 - 1- 1/2 - 1 - 1
cadencia = (2, 1, 2, 2, 1, 2, 2)
tonos = ('min7', 'dism', 'maj7', 'min7', 'min7', 'maj7', 'dom', 'min7')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def modo_locria(nota):
candidata = 0
#Cadencia locria 7: 1/2 - 1 - 1- 1/2 - 1 - 1 - 1
cadencia = (1, 2, 2, 1, 2, 2, 2)
tonos = ('dism', 'maj7', 'min7', 'min7', 'maj7', 'dom', 'min7', 'dism')
abanico_notas = ('C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B')
base = abanico_notas.index(nota.replace('maj7','').replace('min7',''))
#print ' posicion para', nota,' en mayor', base
tono = []
index = 0
varTmp1 = abanico_notas[base]
varTmp2 = tonos[index]
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
for value in cadencia:
index = index + 1
#print 'index es', index
#print 'buscara posicion', (base+value) % len(abanico_notas)
varTmp1 = abanico_notas[(base+value) % len(abanico_notas)]
#print ' nota encontrada', varTmp1
varTmp2 = tonos[index]
#print ' tono para nota encontrada', varTmp2
varTmp3 = ''.join([varTmp1, varTmp2])
tono.append(varTmp3)
base += value
#print 'Tonalidad', nota,'mayor:'
#for value in tono:
# print ' ', value
return (tono)
def chequeo_tono(tono, notas_array):
#print 'tono es', tono
for value in notas_array:
candidata = 0
#print 'value vale', value
if (value.find('#') != -1):
for nota in tono:
#print 'nota es', nota
if nota.startswith(value):
candidata = 1
break
else:
for nota in tono:
#print 'nota vale', nota
if (nota.startswith(value) and not (nota.find('#') != -1)):
candidata = 1
#print 'hizo match'
break
if not(candidata):
break
return(candidata)
def main():
notas_input = raw_input("Inserta las notas separadas por espacio: ")
notas_array = notas_input.split(' ')
while ('' in notas_array):
notas_array.remove('')
#index = notas_array.index('')
#notas_array.pop(index)
posibles_tonos = []
for index in range(0,len(notas_array)):
#Chequeo <NAME> (I)
tono = modo_jonico(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Jonico I (maj7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Dorico Min7 (II)
tono = modo_dorico(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Dorico II (min7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Frigio Min7 (III)
tono = modo_frigio(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Frigio III (min7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo <NAME> (IV)
tono = modo_lidio(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Lidio IV (maj7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Mixolidio Dom (V)
tono = modo_mixolidio(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Mixolidio V (dom)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Eolico Min7 (VI)
tono = modo_eolico(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Eolico VI (min7)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
#Chequeo Locria (VII)
tono = modo_locria(notas_array[index])
candidata = chequeo_tono(tono, notas_array)
if (candidata):
posibles_tonos.append({})
posibles_tonos[-1]['modo'] = 'Locria VII (dism)'
posibles_tonos[-1]['escala'] = []
posibles_tonos[-1]['escala'].append(tono)
if (len(posibles_tonos)):
print '\nPosibles tonalidades:'
for index in range(0,len(posibles_tonos)):
print ' # Tonalidad', posibles_tonos[index]['modo']
print ' Escala', posibles_tonos[index]['escala']
else:
print '\nNo se han encontrado posibles tonos'
#for line in sys.stdin:
# print line
##############
#Main Program
##############
if __name__ == "__main__":
print '\n## Script started\n'
main()
print '\n## Script finished\n'
``` |
{
"source": "jomih/Python_Automation_Training",
"score": 3
} |
#### File: Week1/Exercise8/script_exercise8.py
```python
from ciscoconfparse import CiscoConfParse
##############
##############
#Main Function
##############
##############
def main():
#YAML file
cisco_cfg = CiscoConfParse("./cisco_file.txt")
crypto_sections = cisco_cfg.find_objects(r"^crypto map CRYPTO")
#print 'Len of crypto_sections is', len(crypto_sections)
for item in crypto_sections:
print item.children
print'\n'
for index in range(0,len(crypto_sections)):
print crypto_sections[index].text
for item in crypto_sections[index].all_children:
print item.text
print'\n'
##############
#Main Program
##############
if __name__ == "__main__":
print '\n## Script Automation Course: Exercise 8\n'
main()
print '\n## Script finished\n'
```
#### File: Week3/Exercise1/script_exercise1.py
```python
import snmp_helper
import time
"""
1. Using SNMPv3 create a script that detects router configuration changes.
If the running configuration has changed, then send an email notification to yourself identifying the router that changed and the time that it changed
"""
DEVICE = ('172.16.17.32', '161') #pynet-rtr2 (Cisco 881)
OID = '1.3.6.1.4.1.9.9.43.1.1.1.0' #ccmHistoryRunningLastChanged =
SNMP_USER = ('pysnmp', 'galileo1', 'galileo1') #username, auth_key, encrypt_key
def send_mail(recipient, subject, message, sender):
'''
Simple function to help simplify sending SMTP email
Assumes a mailserver is available on localhost
'''
import smtplib
from email.mime.text import MIMEText
message = MIMEText(message)
message['Subject'] = subject
message['From'] = sender
message['To'] = recipient
# Create SMTP connection object to localhost
smtp_conn = smtplib.SMTP('localhost')
# Send the email
smtp_conn.sendmail(sender, recipient, message.as_string())
# Close SMTP connection
smtp_conn.quit()
return True
def main():
previous_changed_seconds = 0
changes = 0
while (1):
snmp_output = snmp_helper.snmp_get_oid_v3(DEVICE, SNMP_USER, oid=OID)
print 'Raw output:', snmp_output
current_changed_seconds = snmp_helper.snmp_extract(snmp_output)
print '\nFormatted output:', current_changed_seconds
print 'changes is', changes
if (not(int(previous_changed_seconds) == int(current_changed_seconds))):
if (changes):
print '\nConfig changed (\#', str(changes),')', str(int(current_changed_seconds)/100),'secs ago'
message_sender = '<EMAIL>'
message_subject = 'config changed'
message_recipient = '<EMAIL>'
message_content = 'Config of router has recently changed'
email_result = send_mail(message_recipient, message_subject, message_content, message_sender)
if (email_result):
print 'Email sent'
else:
print 'Error sending email'
changes = changes + 1
previous_changed_seconds = current_changed_seconds
else:
print 'Nothing changed:'
print ' previous_changed_seconds', previous_changed_seconds
print ' current_changed_seconds', current_changed_seconds
print ' changes', changes
time.sleep(310)
##############
#Main Program
##############
if __name__ == "__main__":
print '\n## Script started\n'
main()
print '\n## Script finished\n'
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.